diff --git a/.github/workflows/ui-e2e.yaml b/.github/workflows/ui-e2e.yaml index 8a9962b9379..47a5603cac7 100644 --- a/.github/workflows/ui-e2e.yaml +++ b/.github/workflows/ui-e2e.yaml @@ -4,7 +4,7 @@ on: branches: - main paths: - - 'ui' + - 'ui/**' - '.github/workflows/ui-e2e.yaml' jobs: diff --git a/Makefile b/Makefile index e53c7f7e1f8..c2c2b402554 100644 --- a/Makefile +++ b/Makefile @@ -246,3 +246,16 @@ generate-apiserver: ## Generate OpenAPISpec library based on ododevapispec.yaml --additional-properties=outputAsLibrary=true,onlyInterfaces=true,hideGenerationTimestamp=true && \ echo "Formatting generated files:" && go fmt ./pkg/apiserver-gen/... && \ echo "Removing pkg/apiserver-gen/api/openapi.yaml" && rm ./pkg/apiserver-gen/api/openapi.yaml + +.PHONY: generate-apifront +generate-apifront: ## Generate OpenAPISpec library based on ododevapispec.yaml inside ui/src/app + podman run --rm \ + -v ${PWD}:/local \ + docker.io/openapitools/openapi-generator-cli:v6.6.0 \ + generate \ + -i /local/ododevapispec.yaml \ + -g typescript-angular \ + -o /local/ui/src/app/api-gen + +.PHONY: generate-api +generate-api: generate-apiserver generate-apifront ## Generate code based on ododevapispec.yaml diff --git a/go.mod b/go.mod index 67f70ea15c4..90c284902f4 100644 --- a/go.mod +++ b/go.mod @@ -13,6 +13,7 @@ require ( github.com/devfile/registry-support/index/generator v0.0.0-20230322155332-33914affc83b github.com/devfile/registry-support/registry-library v0.0.0-20221201200738-19293ac0b8ab github.com/fatih/color v1.14.1 + github.com/feloy/devfile-lifecycle v0.0.0-20230703133341-1c1589018778 github.com/frapposelli/wwhrd v0.4.0 github.com/fsnotify/fsnotify v1.6.0 github.com/ghodss/yaml v1.0.0 diff --git a/go.sum b/go.sum index 6d92b513075..7d36b38cbf9 100644 --- a/go.sum +++ b/go.sum @@ -459,6 +459,10 @@ github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8Wlg github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/feloy/devfile-lifecycle v0.0.0-20230703132439-dcf25e046335 h1:fNtlKfkIi1QZcfvjHnXvP+w7kH6846jH9flvDHJgpWA= +github.com/feloy/devfile-lifecycle v0.0.0-20230703132439-dcf25e046335/go.mod h1:rBF2ebDSKA42kFdoWAETkPplCUoQgjMWRMyYjgymERg= +github.com/feloy/devfile-lifecycle v0.0.0-20230703133341-1c1589018778 h1:wLd04m6xrPZkon6qORwSvA64lFDqYYXcSZXBsVw7tHo= +github.com/feloy/devfile-lifecycle v0.0.0-20230703133341-1c1589018778/go.mod h1:rBF2ebDSKA42kFdoWAETkPplCUoQgjMWRMyYjgymERg= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= diff --git a/ododevapispec.yaml b/ododevapispec.yaml index b1f3221f11c..ab378e8e2d5 100644 --- a/ododevapispec.yaml +++ b/ododevapispec.yaml @@ -223,6 +223,947 @@ paths: example: message: "a push operation is not possible at this time. Please retry later" + /devstate/devfile: + put: + description: Updates the complete Devfile content + requestBody: + content: + application/json: + schema: + type: object + required: + - content + properties: + content: + type: string + responses: + '200': + description: Devfile content was successfully updated + content: + application/json: + schema: + $ref: '#/components/schemas/DevfileContent' + example: + { + "content": "schemaVersion: 2.2.0\n", + "commands": [], + "containers": [], + "images": [], + "resources": [], + "events": { + "preStart": null, + "postStart": null, + "preStop": null, + "postStop": null + }, + "metadata": { + "name": "", + "version": "", + "displayName": "", + description": "", + "tags": "", + "architectures": "", + "icon": "", + "globalMemoryLimit": "", + "projectType": "", + "language": "", + "website": "", + "provider": "", + "supportUrl": "" + } + } + '500': + description: Error updating the Devfile content + content: + application/json: + schema: + $ref: '#/components/schemas/GeneralError' + example: + message: "Error updating the Devfile content" + + get: + description: Get the complete Devfile content + responses: + '200': + description: Devfile content was successfully returned + content: + application/json: + schema: + $ref: '#/components/schemas/DevfileContent' + example: + { + "content": "schemaVersion: 2.2.0\n", + "commands": [], + "containers": [], + "images": [], + "resources": [], + "events": { + "preStart": null, + "postStart": null, + "preStop": null, + "postStop": null + }, + "metadata": { + "name": "", + "version": "", + "displayName": "", + description": "", + "tags": "", + "architectures": "", + "icon": "", + "globalMemoryLimit": "", + "projectType": "", + "language": "", + "website": "", + "provider": "", + "supportUrl": "" + } + } + '500': + description: Error getting the Devfile content + content: + application/json: + schema: + $ref: '#/components/schemas/GeneralError' + example: + message: "Error getting the Devfile content" + + delete: + description: Clear the Devfile content + responses: + '200': + description: Devfile content was successfully cleared + content: + application/json: + schema: + $ref: '#/components/schemas/DevfileContent' + example: + { + "content": "schemaVersion: 2.2.0\n", + "commands": [], + "containers": [], + "images": [], + "resources": [], + "events": { + "preStart": null, + "postStart": null, + "preStop": null, + "postStop": null + }, + "metadata": { + "name": "", + "version": "", + "displayName": "", + description": "", + "tags": "", + "architectures": "", + "icon": "", + "globalMemoryLimit": "", + "projectType": "", + "language": "", + "website": "", + "provider": "", + "supportUrl": "" + } + } + '500': + description: Error clearing the Devfile content + content: + application/json: + schema: + $ref: '#/components/schemas/GeneralError' + example: + message: "Error clearing the Devfile content" + + /devstate/metadata: + put: + description: Updates the metadata for the Devfile + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/Metadata' + responses: + '200': + description: metadata was successfully updated + content: + application/json: + schema: + $ref: '#/components/schemas/DevfileContent' + example: + { + "content": "schemaVersion: 2.2.0\n", + "commands": [], + "containers": [], + "images": [], + "resources": [], + "events": { + "preStart": null, + "postStart": null, + "preStop": null, + "postStop": null + }, + "metadata": { + "name": "", + "version": "", + "displayName": "", + description": "", + "tags": "", + "architectures": "", + "icon": "", + "globalMemoryLimit": "", + "projectType": "", + "language": "", + "website": "", + "provider": "", + "supportUrl": "" + } + } + '500': + description: Error updating the metadata + content: + application/json: + schema: + $ref: '#/components/schemas/GeneralError' + example: + message: "Error updating the metadata" + + /devstate/container: + post: + description: Add a new container to the Devfile + requestBody: + content: + application/json: + schema: + type: object + properties: + name: + description: Name of the container + type: string + image: + description: Container image + type: string + command: + description: Entrypoint of the container + type: array + items: { + type: string + } + args: + description: Args passed to the Container entrypoint + type: array + items: { + type: string + } + memReq: + description: Requested memory for the deployed container + type: string + memLimit: + description: Memory limit for the deployed container + type: string + cpuReq: + description: Requested CPU for the deployed container + type: string + cpuLimit: + description: CPU limit for the deployed container + type: string + responses: + '200': + description: container was successfully added to the devfile + content: + application/json: + schema: + $ref: '#/components/schemas/DevfileContent' + example: + { + "content": "schemaVersion: 2.2.0\n", + "commands": [], + "containers": [], + "images": [], + "resources": [], + "events": { + "preStart": null, + "postStart": null, + "preStop": null, + "postStop": null + }, + "metadata": { + "name": "", + "version": "", + "displayName": "", + description": "", + "tags": "", + "architectures": "", + "icon": "", + "globalMemoryLimit": "", + "projectType": "", + "language": "", + "website": "", + "provider": "", + "supportUrl": "" + } + } + '500': + description: Error adding the container + content: + application/json: + schema: + $ref: '#/components/schemas/GeneralError' + example: + message: "Error adding the container" + + /devstate/container/{containerName}: + delete: + description: "Delete a container from the Devfile" + parameters: + - name: containerName + in: path + description: Container name to delete + required: true + schema: + type: string + responses: + '200': + content: + application/json: + schema: + $ref: '#/components/schemas/GeneralSuccess' + example: + message: "Container has been deleted" + description: "Container has been deleted" + '500': + description: Error deleting the container + content: + application/json: + schema: + $ref: '#/components/schemas/GeneralError' + example: + message: "Error deleting the container" + + /devstate/image: + post: + description: Add a new image to the Devfile + requestBody: + content: + application/json: + schema: + type: object + properties: + name: + description: Name of the image + type: string + imageName: + type: string + args: + type: array + items: { + type: string + } + buildContext: + type: string + rootRequired: + type: boolean + uri: + type: string + responses: + '200': + description: image was successfully added to the devfile + content: + application/json: + schema: + $ref: '#/components/schemas/DevfileContent' + example: + { + "content": "schemaVersion: 2.2.0\n", + "commands": [], + "containers": [], + "images": [], + "resources": [], + "events": { + "preStart": null, + "postStart": null, + "preStop": null, + "postStop": null + }, + "metadata": { + "name": "", + "version": "", + "displayName": "", + description": "", + "tags": "", + "architectures": "", + "icon": "", + "globalMemoryLimit": "", + "projectType": "", + "language": "", + "website": "", + "provider": "", + "supportUrl": "" + } + } + '500': + description: Error adding the image + content: + application/json: + schema: + $ref: '#/components/schemas/GeneralError' + example: + message: "Error adding the image" + + /devstate/image/{imageName}: + delete: + description: "Delete an image from the Devfile" + parameters: + - name: imageName + in: path + description: Image name to delete + required: true + schema: + type: string + responses: + '200': + content: + application/json: + schema: + $ref: '#/components/schemas/GeneralSuccess' + example: + message: "Image has been deleted" + description: "Image has been deleted" + '500': + description: Error deleting the image + content: + application/json: + schema: + $ref: '#/components/schemas/GeneralError' + example: + message: "Error deleting the image" + + /devstate/resource: + post: + description: Add a new Kubernetes resource to the Devfile + requestBody: + content: + application/json: + schema: + type: object + properties: + name: + description: Name of the resource + type: string + inlined: + type: string + uri: + type: string + responses: + '200': + description: resource was successfully added to the devfile + content: + application/json: + schema: + $ref: '#/components/schemas/DevfileContent' + example: + { + "content": "schemaVersion: 2.2.0\n", + "commands": [], + "containers": [], + "images": [], + "resources": [], + "events": { + "preStart": null, + "postStart": null, + "preStop": null, + "postStop": null + }, + "metadata": { + "name": "", + "version": "", + "displayName": "", + description": "", + "tags": "", + "architectures": "", + "icon": "", + "globalMemoryLimit": "", + "projectType": "", + "language": "", + "website": "", + "provider": "", + "supportUrl": "" + } + } + '500': + description: Error adding the resource + content: + application/json: + schema: + $ref: '#/components/schemas/GeneralError' + example: + message: "Error adding the resource" + + /devstate/resource/{resourceName}: + delete: + description: "Delete a resource from the Devfile" + parameters: + - name: resourceName + in: path + description: Resource name to delete + required: true + schema: + type: string + responses: + '200': + content: + application/json: + schema: + $ref: '#/components/schemas/GeneralSuccess' + example: + message: "Resource has been deleted" + description: "Resource has been deleted" + '500': + description: Error deleting the resource + content: + application/json: + schema: + $ref: '#/components/schemas/GeneralError' + example: + message: "Error deleting the resource" + + /devstate/applyCommand: + post: + description: Add a new Apply Command to the Devfile + requestBody: + content: + application/json: + schema: + type: object + properties: + name: + description: Name of the command + type: string + component: + type: string + responses: + '200': + description: Apply command was successfully added to the devfile + content: + application/json: + schema: + $ref: '#/components/schemas/DevfileContent' + example: + { + "content": "schemaVersion: 2.2.0\n", + "commands": [], + "containers": [], + "images": [], + "resources": [], + "events": { + "preStart": null, + "postStart": null, + "preStop": null, + "postStop": null + }, + "metadata": { + "name": "", + "version": "", + "displayName": "", + description": "", + "tags": "", + "architectures": "", + "icon": "", + "globalMemoryLimit": "", + "projectType": "", + "language": "", + "website": "", + "provider": "", + "supportUrl": "" + } + } + '500': + description: Error adding the Apply command + content: + application/json: + schema: + $ref: '#/components/schemas/GeneralError' + example: + message: "Error adding the Apply command" + + /devstate/command/{commandName}: + delete: + description: "Delete a command from the Devfile" + parameters: + - name: commandName + in: path + description: Command name to delete + required: true + schema: + type: string + responses: + '200': + content: + application/json: + schema: + $ref: '#/components/schemas/GeneralSuccess' + example: + message: "Command has been deleted" + description: "Command has been deleted" + '500': + description: Error deleting the command + content: + application/json: + schema: + $ref: '#/components/schemas/GeneralError' + example: + message: "Error deleting the command" + + /devstate/command/{commandName}/move: + post: + description: "Move a command" + parameters: + - name: commandName + in: path + description: Command name to move + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + type: object + properties: + fromGroup: + description: Initial group of the command + type: string + fromIndex: + description: Initial index of the command in the group + type: integer + toGroup: + description: Target group of the command + type: string + toIndex: + description: Target index of the command in the group + type: integer + responses: + '200': + content: + application/json: + schema: + $ref: '#/components/schemas/GeneralSuccess' + example: + message: "Command has been moved to group NewGroup position 4" + description: "Command has been moved" + '500': + description: Error moving the command + content: + application/json: + schema: + $ref: '#/components/schemas/GeneralError' + example: + message: "Error moving the command to group NewGroup position 4" + + /devstate/command/{commandName}/setDefault: + post: + description: "Set a command as default command for a group" + parameters: + - name: commandName + in: path + description: Command name to set as default + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + type: object + properties: + group: + description: group of the command + type: string + responses: + '200': + content: + application/json: + schema: + $ref: '#/components/schemas/GeneralSuccess' + example: + message: "Command has been set as default" + description: "Command has been set as default" + '500': + description: Error setting the command as default + content: + application/json: + schema: + $ref: '#/components/schemas/GeneralError' + example: + message: "Error setting the command as default" + + /devstate/command/{commandName}/unsetDefault: + post: + description: "Unset a command as default command for its group" + parameters: + - name: commandName + in: path + description: Command name to set as default + required: true + schema: + type: string + responses: + '200': + content: + application/json: + schema: + $ref: '#/components/schemas/GeneralSuccess' + example: + message: "Command has been unset as default" + description: "Command has been unset as default" + '500': + description: Error unsetting the command as default + content: + application/json: + schema: + $ref: '#/components/schemas/GeneralError' + example: + message: "Error unsetting the command as default" + + /devstate/compositeCommand: + post: + description: Add a new Composite Command to the Devfile + requestBody: + content: + application/json: + schema: + type: object + properties: + name: + description: Name of the command + type: string + parallel: + type: boolean + commands: + type: array + items: { + type: string + } + responses: + '200': + description: Composite command was successfully added to the devfile + content: + application/json: + schema: + $ref: '#/components/schemas/DevfileContent' + example: + { + "content": "schemaVersion: 2.2.0\n", + "commands": [], + "containers": [], + "images": [], + "resources": [], + "events": { + "preStart": null, + "postStart": null, + "preStop": null, + "postStop": null + }, + "metadata": { + "name": "", + "version": "", + "displayName": "", + description": "", + "tags": "", + "architectures": "", + "icon": "", + "globalMemoryLimit": "", + "projectType": "", + "language": "", + "website": "", + "provider": "", + "supportUrl": "" + } + } + '500': + description: Error adding the Composite command + content: + application/json: + schema: + $ref: '#/components/schemas/GeneralError' + example: + message: "Error adding the Composite command" + + /devstate/execCommand: + post: + description: Add a new Exec Command to the Devfile + requestBody: + content: + application/json: + schema: + type: object + properties: + name: + description: Name of the command + type: string + component: + type: string + commandLine: + type: string + workingDir: + type: string + hotReloadCapable: + type: boolean + responses: + '200': + description: Exec command was successfully added to the devfile + content: + application/json: + schema: + $ref: '#/components/schemas/DevfileContent' + example: + { + "content": "schemaVersion: 2.2.0\n", + "commands": [], + "containers": [], + "images": [], + "resources": [], + "events": { + "preStart": null, + "postStart": null, + "preStop": null, + "postStop": null + }, + "metadata": { + "name": "", + "version": "", + "displayName": "", + description": "", + "tags": "", + "architectures": "", + "icon": "", + "globalMemoryLimit": "", + "projectType": "", + "language": "", + "website": "", + "provider": "", + "supportUrl": "" + } + } + '500': + description: Error adding the Exec command + content: + application/json: + schema: + $ref: '#/components/schemas/GeneralError' + example: + message: "Error adding the Exec command" + + /devstate/events: + put: + description: Updates the Events for the Devfile + requestBody: + content: + application/json: + schema: + type: object + properties: + eventName: + type: string + enum: [postStart,postStop,preStart,preStop] + commands: + type: array + items: { + type: string + } + responses: + '200': + description: Events were successfully updated + content: + application/json: + schema: + $ref: '#/components/schemas/DevfileContent' + example: + { + "content": "schemaVersion: 2.2.0\n", + "commands": [], + "containers": [], + "images": [], + "resources": [], + "events": { + "preStart": null, + "postStart": null, + "preStop": null, + "postStop": null + }, + "metadata": { + "name": "", + "version": "", + "displayName": "", + description": "", + "tags": "", + "architectures": "", + "icon": "", + "globalMemoryLimit": "", + "projectType": "", + "language": "", + "website": "", + "provider": "", + "supportUrl": "" + } + } + '500': + description: Error updating the events + content: + application/json: + schema: + $ref: '#/components/schemas/GeneralError' + example: + message: "Error updating the events" + + /devstate/chart: + get: + description: Get chart representing the Devfile cycle in mermaid format + responses: + '200': + description: Chart representing the Devfile cycle + content: + application/json: + schema: + type: object + required: + - chart + properties: + chart: + type: string + description: chart in mermaid format + + /devstate/quantityValid: + post: + description: Check if a quantity is valid + requestBody: + content: + application/json: + schema: + type: object + required: + - quantity + properties: + quantity: + type: string + responses: + '200': + description: quantity is valid + content: + application/json: + schema: + $ref: '#/components/schemas/GeneralSuccess' + example: + message: "Quantity \"1Gi\" is valid" + '400': + description: quantity is not valid + content: + application/json: + schema: + $ref: '#/components/schemas/GeneralError' + example: + message: "Quantity \"aze\" is not valid" + components: schemas: GeneralSuccess: @@ -234,4 +1175,218 @@ components: type: object properties: message: - type: string \ No newline at end of file + type: string + DevfileContent: + type: object + required: + - content + - commands + - containers + - images + - resources + - events + - metadata + properties: + content: + type: string + commands: + type: array + items: + $ref: '#/components/schemas/Command' + containers: + type: array + items: + $ref: '#/components/schemas/Container' + images: + type: array + items: + $ref: '#/components/schemas/Image' + resources: + type: array + items: + $ref: '#/components/schemas/Resource' + events: + $ref: '#/components/schemas/Events' + metadata: + $ref: '#/components/schemas/Metadata' + Command: + type: object + required: + - name + - group + - type + properties: + name: + type: string + group: + type: string + default: + type: boolean + type: + type: string + exec: + $ref: '#/components/schemas/ExecCommand' + apply: + $ref: '#/components/schemas/ApplyCommand' + image: + $ref: '#/components/schemas/ImageCommand' + composite: + $ref: '#/components/schemas/CompositeCommand' + ExecCommand: + type: object + required: + - component + - commandLine + - workingDir + - hotReloadCapable + properties: + component: + type: string + commandLine: + type: string + workingDir: + type: string + hotReloadCapable: + type: boolean + ApplyCommand: + type: object + required: + - component + properties: + component: + type: string + ImageCommand: + type: object + required: + - component + properties: + component: + type: string + CompositeCommand: + type: object + required: + - commands + - parallel + properties: + commands: + type: array + items: + type: string + parallel: + type: boolean + Container: + type: object + required: + - name + - image + - command + - args + - memoryRequest + - memoryLimit + - cpuRequest + - cpuLimit + properties: + name: + type: string + image: + type: string + command: + type: array + items: + type: string + args: + type: array + items: + type: string + memoryRequest: + type: string + memoryLimit: + type: string + cpuRequest: + type: string + cpuLimit: + type: string + Image: + type: object + required: + - name + - imageName + - args + - buildContext + - rootRequired + - uri + properties: + name: + type: string + imageName: + type: string + args: + type: array + items: + type: string + buildContext: + type: string + rootRequired: + type: boolean + uri: + type: string + Resource: + type: object + required: + - name + properties: + name: + type: string + inlined: + type: string + uri: + type: string + Events: + type: object + properties: + preStart: + type: array + items: + type: string + postStart: + type: array + items: + type: string + preStop: + type: array + items: + type: string + postStop: + type: array + items: + type: string + Metadata: + type: object + properties: + name: + type: string + version: + type: string + displayName: + type: string + description: + type: string + tags: + type: string + architectures: + type: string + icon: + type: string + globalMemoryLimit: + type: string + projectType: + type: string + language: + type: string + website: + type: string + provider: + type: string + supportUrl: + type: string + diff --git a/pkg/apiserver-gen/.openapi-generator/FILES b/pkg/apiserver-gen/.openapi-generator/FILES index dcaf4f3948c..0b3c4c8a585 100644 --- a/pkg/apiserver-gen/.openapi-generator/FILES +++ b/pkg/apiserver-gen/.openapi-generator/FILES @@ -8,7 +8,30 @@ go/impl.go go/logger.go go/model__component_command_post_request.go go/model__component_get_200_response.go +go/model__devstate_apply_command_post_request.go +go/model__devstate_chart_get_200_response.go +go/model__devstate_command__command_name__move_post_request.go +go/model__devstate_command__command_name__set_default_post_request.go +go/model__devstate_composite_command_post_request.go +go/model__devstate_container_post_request.go +go/model__devstate_devfile_put_request.go +go/model__devstate_events_put_request.go +go/model__devstate_exec_command_post_request.go +go/model__devstate_image_post_request.go +go/model__devstate_quantity_valid_post_request.go +go/model__devstate_resource_post_request.go go/model__instance_get_200_response.go +go/model_apply_command.go +go/model_command.go +go/model_composite_command.go +go/model_container.go +go/model_devfile_content.go +go/model_events.go +go/model_exec_command.go go/model_general_error.go go/model_general_success.go +go/model_image.go +go/model_image_command.go +go/model_metadata.go +go/model_resource.go go/routers.go diff --git a/pkg/apiserver-gen/go/api.go b/pkg/apiserver-gen/go/api.go index c8e13b62193..5a920f366c7 100644 --- a/pkg/apiserver-gen/go/api.go +++ b/pkg/apiserver-gen/go/api.go @@ -20,6 +20,26 @@ import ( type DefaultApiRouter interface { ComponentCommandPost(http.ResponseWriter, *http.Request) ComponentGet(http.ResponseWriter, *http.Request) + DevstateApplyCommandPost(http.ResponseWriter, *http.Request) + DevstateChartGet(http.ResponseWriter, *http.Request) + DevstateCommandCommandNameDelete(http.ResponseWriter, *http.Request) + DevstateCommandCommandNameMovePost(http.ResponseWriter, *http.Request) + DevstateCommandCommandNameSetDefaultPost(http.ResponseWriter, *http.Request) + DevstateCommandCommandNameUnsetDefaultPost(http.ResponseWriter, *http.Request) + DevstateCompositeCommandPost(http.ResponseWriter, *http.Request) + DevstateContainerContainerNameDelete(http.ResponseWriter, *http.Request) + DevstateContainerPost(http.ResponseWriter, *http.Request) + DevstateDevfileDelete(http.ResponseWriter, *http.Request) + DevstateDevfileGet(http.ResponseWriter, *http.Request) + DevstateDevfilePut(http.ResponseWriter, *http.Request) + DevstateEventsPut(http.ResponseWriter, *http.Request) + DevstateExecCommandPost(http.ResponseWriter, *http.Request) + DevstateImageImageNameDelete(http.ResponseWriter, *http.Request) + DevstateImagePost(http.ResponseWriter, *http.Request) + DevstateMetadataPut(http.ResponseWriter, *http.Request) + DevstateQuantityValidPost(http.ResponseWriter, *http.Request) + DevstateResourcePost(http.ResponseWriter, *http.Request) + DevstateResourceResourceNameDelete(http.ResponseWriter, *http.Request) InstanceDelete(http.ResponseWriter, *http.Request) InstanceGet(http.ResponseWriter, *http.Request) } @@ -31,6 +51,26 @@ type DefaultApiRouter interface { type DefaultApiServicer interface { ComponentCommandPost(context.Context, ComponentCommandPostRequest) (ImplResponse, error) ComponentGet(context.Context) (ImplResponse, error) + DevstateApplyCommandPost(context.Context, DevstateApplyCommandPostRequest) (ImplResponse, error) + DevstateChartGet(context.Context) (ImplResponse, error) + DevstateCommandCommandNameDelete(context.Context, string) (ImplResponse, error) + DevstateCommandCommandNameMovePost(context.Context, string, DevstateCommandCommandNameMovePostRequest) (ImplResponse, error) + DevstateCommandCommandNameSetDefaultPost(context.Context, string, DevstateCommandCommandNameSetDefaultPostRequest) (ImplResponse, error) + DevstateCommandCommandNameUnsetDefaultPost(context.Context, string) (ImplResponse, error) + DevstateCompositeCommandPost(context.Context, DevstateCompositeCommandPostRequest) (ImplResponse, error) + DevstateContainerContainerNameDelete(context.Context, string) (ImplResponse, error) + DevstateContainerPost(context.Context, DevstateContainerPostRequest) (ImplResponse, error) + DevstateDevfileDelete(context.Context) (ImplResponse, error) + DevstateDevfileGet(context.Context) (ImplResponse, error) + DevstateDevfilePut(context.Context, DevstateDevfilePutRequest) (ImplResponse, error) + DevstateEventsPut(context.Context, DevstateEventsPutRequest) (ImplResponse, error) + DevstateExecCommandPost(context.Context, DevstateExecCommandPostRequest) (ImplResponse, error) + DevstateImageImageNameDelete(context.Context, string) (ImplResponse, error) + DevstateImagePost(context.Context, DevstateImagePostRequest) (ImplResponse, error) + DevstateMetadataPut(context.Context, Metadata) (ImplResponse, error) + DevstateQuantityValidPost(context.Context, DevstateQuantityValidPostRequest) (ImplResponse, error) + DevstateResourcePost(context.Context, DevstateResourcePostRequest) (ImplResponse, error) + DevstateResourceResourceNameDelete(context.Context, string) (ImplResponse, error) InstanceDelete(context.Context) (ImplResponse, error) InstanceGet(context.Context) (ImplResponse, error) } diff --git a/pkg/apiserver-gen/go/api_default.go b/pkg/apiserver-gen/go/api_default.go index 18988caae88..5eea644c344 100644 --- a/pkg/apiserver-gen/go/api_default.go +++ b/pkg/apiserver-gen/go/api_default.go @@ -13,6 +13,8 @@ import ( "encoding/json" "net/http" "strings" + + "github.com/gorilla/mux" ) // DefaultApiController binds http requests to an api service and writes the service results to the http response @@ -60,6 +62,126 @@ func (c *DefaultApiController) Routes() Routes { "/api/v1/component", c.ComponentGet, }, + { + "DevstateApplyCommandPost", + strings.ToUpper("Post"), + "/api/v1/devstate/applyCommand", + c.DevstateApplyCommandPost, + }, + { + "DevstateChartGet", + strings.ToUpper("Get"), + "/api/v1/devstate/chart", + c.DevstateChartGet, + }, + { + "DevstateCommandCommandNameDelete", + strings.ToUpper("Delete"), + "/api/v1/devstate/command/{commandName}", + c.DevstateCommandCommandNameDelete, + }, + { + "DevstateCommandCommandNameMovePost", + strings.ToUpper("Post"), + "/api/v1/devstate/command/{commandName}/move", + c.DevstateCommandCommandNameMovePost, + }, + { + "DevstateCommandCommandNameSetDefaultPost", + strings.ToUpper("Post"), + "/api/v1/devstate/command/{commandName}/setDefault", + c.DevstateCommandCommandNameSetDefaultPost, + }, + { + "DevstateCommandCommandNameUnsetDefaultPost", + strings.ToUpper("Post"), + "/api/v1/devstate/command/{commandName}/unsetDefault", + c.DevstateCommandCommandNameUnsetDefaultPost, + }, + { + "DevstateCompositeCommandPost", + strings.ToUpper("Post"), + "/api/v1/devstate/compositeCommand", + c.DevstateCompositeCommandPost, + }, + { + "DevstateContainerContainerNameDelete", + strings.ToUpper("Delete"), + "/api/v1/devstate/container/{containerName}", + c.DevstateContainerContainerNameDelete, + }, + { + "DevstateContainerPost", + strings.ToUpper("Post"), + "/api/v1/devstate/container", + c.DevstateContainerPost, + }, + { + "DevstateDevfileDelete", + strings.ToUpper("Delete"), + "/api/v1/devstate/devfile", + c.DevstateDevfileDelete, + }, + { + "DevstateDevfileGet", + strings.ToUpper("Get"), + "/api/v1/devstate/devfile", + c.DevstateDevfileGet, + }, + { + "DevstateDevfilePut", + strings.ToUpper("Put"), + "/api/v1/devstate/devfile", + c.DevstateDevfilePut, + }, + { + "DevstateEventsPut", + strings.ToUpper("Put"), + "/api/v1/devstate/events", + c.DevstateEventsPut, + }, + { + "DevstateExecCommandPost", + strings.ToUpper("Post"), + "/api/v1/devstate/execCommand", + c.DevstateExecCommandPost, + }, + { + "DevstateImageImageNameDelete", + strings.ToUpper("Delete"), + "/api/v1/devstate/image/{imageName}", + c.DevstateImageImageNameDelete, + }, + { + "DevstateImagePost", + strings.ToUpper("Post"), + "/api/v1/devstate/image", + c.DevstateImagePost, + }, + { + "DevstateMetadataPut", + strings.ToUpper("Put"), + "/api/v1/devstate/metadata", + c.DevstateMetadataPut, + }, + { + "DevstateQuantityValidPost", + strings.ToUpper("Post"), + "/api/v1/devstate/quantityValid", + c.DevstateQuantityValidPost, + }, + { + "DevstateResourcePost", + strings.ToUpper("Post"), + "/api/v1/devstate/resource", + c.DevstateResourcePost, + }, + { + "DevstateResourceResourceNameDelete", + strings.ToUpper("Delete"), + "/api/v1/devstate/resource/{resourceName}", + c.DevstateResourceResourceNameDelete, + }, { "InstanceDelete", strings.ToUpper("Delete"), @@ -112,6 +234,412 @@ func (c *DefaultApiController) ComponentGet(w http.ResponseWriter, r *http.Reque } +// DevstateApplyCommandPost - +func (c *DefaultApiController) DevstateApplyCommandPost(w http.ResponseWriter, r *http.Request) { + devstateApplyCommandPostRequestParam := DevstateApplyCommandPostRequest{} + d := json.NewDecoder(r.Body) + d.DisallowUnknownFields() + if err := d.Decode(&devstateApplyCommandPostRequestParam); err != nil { + c.errorHandler(w, r, &ParsingError{Err: err}, nil) + return + } + if err := AssertDevstateApplyCommandPostRequestRequired(devstateApplyCommandPostRequestParam); err != nil { + c.errorHandler(w, r, err, nil) + return + } + result, err := c.service.DevstateApplyCommandPost(r.Context(), devstateApplyCommandPostRequestParam) + // If an error occurred, encode the error with the status code + if err != nil { + c.errorHandler(w, r, err, &result) + return + } + // If no error, encode the body and the result code + EncodeJSONResponse(result.Body, &result.Code, w) + +} + +// DevstateChartGet - +func (c *DefaultApiController) DevstateChartGet(w http.ResponseWriter, r *http.Request) { + result, err := c.service.DevstateChartGet(r.Context()) + // If an error occurred, encode the error with the status code + if err != nil { + c.errorHandler(w, r, err, &result) + return + } + // If no error, encode the body and the result code + EncodeJSONResponse(result.Body, &result.Code, w) + +} + +// DevstateCommandCommandNameDelete - +func (c *DefaultApiController) DevstateCommandCommandNameDelete(w http.ResponseWriter, r *http.Request) { + params := mux.Vars(r) + commandNameParam := params["commandName"] + result, err := c.service.DevstateCommandCommandNameDelete(r.Context(), commandNameParam) + // If an error occurred, encode the error with the status code + if err != nil { + c.errorHandler(w, r, err, &result) + return + } + // If no error, encode the body and the result code + EncodeJSONResponse(result.Body, &result.Code, w) + +} + +// DevstateCommandCommandNameMovePost - +func (c *DefaultApiController) DevstateCommandCommandNameMovePost(w http.ResponseWriter, r *http.Request) { + params := mux.Vars(r) + commandNameParam := params["commandName"] + devstateCommandCommandNameMovePostRequestParam := DevstateCommandCommandNameMovePostRequest{} + d := json.NewDecoder(r.Body) + d.DisallowUnknownFields() + if err := d.Decode(&devstateCommandCommandNameMovePostRequestParam); err != nil { + c.errorHandler(w, r, &ParsingError{Err: err}, nil) + return + } + if err := AssertDevstateCommandCommandNameMovePostRequestRequired(devstateCommandCommandNameMovePostRequestParam); err != nil { + c.errorHandler(w, r, err, nil) + return + } + result, err := c.service.DevstateCommandCommandNameMovePost(r.Context(), commandNameParam, devstateCommandCommandNameMovePostRequestParam) + // If an error occurred, encode the error with the status code + if err != nil { + c.errorHandler(w, r, err, &result) + return + } + // If no error, encode the body and the result code + EncodeJSONResponse(result.Body, &result.Code, w) + +} + +// DevstateCommandCommandNameSetDefaultPost - +func (c *DefaultApiController) DevstateCommandCommandNameSetDefaultPost(w http.ResponseWriter, r *http.Request) { + params := mux.Vars(r) + commandNameParam := params["commandName"] + devstateCommandCommandNameSetDefaultPostRequestParam := DevstateCommandCommandNameSetDefaultPostRequest{} + d := json.NewDecoder(r.Body) + d.DisallowUnknownFields() + if err := d.Decode(&devstateCommandCommandNameSetDefaultPostRequestParam); err != nil { + c.errorHandler(w, r, &ParsingError{Err: err}, nil) + return + } + if err := AssertDevstateCommandCommandNameSetDefaultPostRequestRequired(devstateCommandCommandNameSetDefaultPostRequestParam); err != nil { + c.errorHandler(w, r, err, nil) + return + } + result, err := c.service.DevstateCommandCommandNameSetDefaultPost(r.Context(), commandNameParam, devstateCommandCommandNameSetDefaultPostRequestParam) + // If an error occurred, encode the error with the status code + if err != nil { + c.errorHandler(w, r, err, &result) + return + } + // If no error, encode the body and the result code + EncodeJSONResponse(result.Body, &result.Code, w) + +} + +// DevstateCommandCommandNameUnsetDefaultPost - +func (c *DefaultApiController) DevstateCommandCommandNameUnsetDefaultPost(w http.ResponseWriter, r *http.Request) { + params := mux.Vars(r) + commandNameParam := params["commandName"] + result, err := c.service.DevstateCommandCommandNameUnsetDefaultPost(r.Context(), commandNameParam) + // If an error occurred, encode the error with the status code + if err != nil { + c.errorHandler(w, r, err, &result) + return + } + // If no error, encode the body and the result code + EncodeJSONResponse(result.Body, &result.Code, w) + +} + +// DevstateCompositeCommandPost - +func (c *DefaultApiController) DevstateCompositeCommandPost(w http.ResponseWriter, r *http.Request) { + devstateCompositeCommandPostRequestParam := DevstateCompositeCommandPostRequest{} + d := json.NewDecoder(r.Body) + d.DisallowUnknownFields() + if err := d.Decode(&devstateCompositeCommandPostRequestParam); err != nil { + c.errorHandler(w, r, &ParsingError{Err: err}, nil) + return + } + if err := AssertDevstateCompositeCommandPostRequestRequired(devstateCompositeCommandPostRequestParam); err != nil { + c.errorHandler(w, r, err, nil) + return + } + result, err := c.service.DevstateCompositeCommandPost(r.Context(), devstateCompositeCommandPostRequestParam) + // If an error occurred, encode the error with the status code + if err != nil { + c.errorHandler(w, r, err, &result) + return + } + // If no error, encode the body and the result code + EncodeJSONResponse(result.Body, &result.Code, w) + +} + +// DevstateContainerContainerNameDelete - +func (c *DefaultApiController) DevstateContainerContainerNameDelete(w http.ResponseWriter, r *http.Request) { + params := mux.Vars(r) + containerNameParam := params["containerName"] + result, err := c.service.DevstateContainerContainerNameDelete(r.Context(), containerNameParam) + // If an error occurred, encode the error with the status code + if err != nil { + c.errorHandler(w, r, err, &result) + return + } + // If no error, encode the body and the result code + EncodeJSONResponse(result.Body, &result.Code, w) + +} + +// DevstateContainerPost - +func (c *DefaultApiController) DevstateContainerPost(w http.ResponseWriter, r *http.Request) { + devstateContainerPostRequestParam := DevstateContainerPostRequest{} + d := json.NewDecoder(r.Body) + d.DisallowUnknownFields() + if err := d.Decode(&devstateContainerPostRequestParam); err != nil { + c.errorHandler(w, r, &ParsingError{Err: err}, nil) + return + } + if err := AssertDevstateContainerPostRequestRequired(devstateContainerPostRequestParam); err != nil { + c.errorHandler(w, r, err, nil) + return + } + result, err := c.service.DevstateContainerPost(r.Context(), devstateContainerPostRequestParam) + // If an error occurred, encode the error with the status code + if err != nil { + c.errorHandler(w, r, err, &result) + return + } + // If no error, encode the body and the result code + EncodeJSONResponse(result.Body, &result.Code, w) + +} + +// DevstateDevfileDelete - +func (c *DefaultApiController) DevstateDevfileDelete(w http.ResponseWriter, r *http.Request) { + result, err := c.service.DevstateDevfileDelete(r.Context()) + // If an error occurred, encode the error with the status code + if err != nil { + c.errorHandler(w, r, err, &result) + return + } + // If no error, encode the body and the result code + EncodeJSONResponse(result.Body, &result.Code, w) + +} + +// DevstateDevfileGet - +func (c *DefaultApiController) DevstateDevfileGet(w http.ResponseWriter, r *http.Request) { + result, err := c.service.DevstateDevfileGet(r.Context()) + // If an error occurred, encode the error with the status code + if err != nil { + c.errorHandler(w, r, err, &result) + return + } + // If no error, encode the body and the result code + EncodeJSONResponse(result.Body, &result.Code, w) + +} + +// DevstateDevfilePut - +func (c *DefaultApiController) DevstateDevfilePut(w http.ResponseWriter, r *http.Request) { + devstateDevfilePutRequestParam := DevstateDevfilePutRequest{} + d := json.NewDecoder(r.Body) + d.DisallowUnknownFields() + if err := d.Decode(&devstateDevfilePutRequestParam); err != nil { + c.errorHandler(w, r, &ParsingError{Err: err}, nil) + return + } + if err := AssertDevstateDevfilePutRequestRequired(devstateDevfilePutRequestParam); err != nil { + c.errorHandler(w, r, err, nil) + return + } + result, err := c.service.DevstateDevfilePut(r.Context(), devstateDevfilePutRequestParam) + // If an error occurred, encode the error with the status code + if err != nil { + c.errorHandler(w, r, err, &result) + return + } + // If no error, encode the body and the result code + EncodeJSONResponse(result.Body, &result.Code, w) + +} + +// DevstateEventsPut - +func (c *DefaultApiController) DevstateEventsPut(w http.ResponseWriter, r *http.Request) { + devstateEventsPutRequestParam := DevstateEventsPutRequest{} + d := json.NewDecoder(r.Body) + d.DisallowUnknownFields() + if err := d.Decode(&devstateEventsPutRequestParam); err != nil { + c.errorHandler(w, r, &ParsingError{Err: err}, nil) + return + } + if err := AssertDevstateEventsPutRequestRequired(devstateEventsPutRequestParam); err != nil { + c.errorHandler(w, r, err, nil) + return + } + result, err := c.service.DevstateEventsPut(r.Context(), devstateEventsPutRequestParam) + // If an error occurred, encode the error with the status code + if err != nil { + c.errorHandler(w, r, err, &result) + return + } + // If no error, encode the body and the result code + EncodeJSONResponse(result.Body, &result.Code, w) + +} + +// DevstateExecCommandPost - +func (c *DefaultApiController) DevstateExecCommandPost(w http.ResponseWriter, r *http.Request) { + devstateExecCommandPostRequestParam := DevstateExecCommandPostRequest{} + d := json.NewDecoder(r.Body) + d.DisallowUnknownFields() + if err := d.Decode(&devstateExecCommandPostRequestParam); err != nil { + c.errorHandler(w, r, &ParsingError{Err: err}, nil) + return + } + if err := AssertDevstateExecCommandPostRequestRequired(devstateExecCommandPostRequestParam); err != nil { + c.errorHandler(w, r, err, nil) + return + } + result, err := c.service.DevstateExecCommandPost(r.Context(), devstateExecCommandPostRequestParam) + // If an error occurred, encode the error with the status code + if err != nil { + c.errorHandler(w, r, err, &result) + return + } + // If no error, encode the body and the result code + EncodeJSONResponse(result.Body, &result.Code, w) + +} + +// DevstateImageImageNameDelete - +func (c *DefaultApiController) DevstateImageImageNameDelete(w http.ResponseWriter, r *http.Request) { + params := mux.Vars(r) + imageNameParam := params["imageName"] + result, err := c.service.DevstateImageImageNameDelete(r.Context(), imageNameParam) + // If an error occurred, encode the error with the status code + if err != nil { + c.errorHandler(w, r, err, &result) + return + } + // If no error, encode the body and the result code + EncodeJSONResponse(result.Body, &result.Code, w) + +} + +// DevstateImagePost - +func (c *DefaultApiController) DevstateImagePost(w http.ResponseWriter, r *http.Request) { + devstateImagePostRequestParam := DevstateImagePostRequest{} + d := json.NewDecoder(r.Body) + d.DisallowUnknownFields() + if err := d.Decode(&devstateImagePostRequestParam); err != nil { + c.errorHandler(w, r, &ParsingError{Err: err}, nil) + return + } + if err := AssertDevstateImagePostRequestRequired(devstateImagePostRequestParam); err != nil { + c.errorHandler(w, r, err, nil) + return + } + result, err := c.service.DevstateImagePost(r.Context(), devstateImagePostRequestParam) + // If an error occurred, encode the error with the status code + if err != nil { + c.errorHandler(w, r, err, &result) + return + } + // If no error, encode the body and the result code + EncodeJSONResponse(result.Body, &result.Code, w) + +} + +// DevstateMetadataPut - +func (c *DefaultApiController) DevstateMetadataPut(w http.ResponseWriter, r *http.Request) { + metadataParam := Metadata{} + d := json.NewDecoder(r.Body) + d.DisallowUnknownFields() + if err := d.Decode(&metadataParam); err != nil { + c.errorHandler(w, r, &ParsingError{Err: err}, nil) + return + } + if err := AssertMetadataRequired(metadataParam); err != nil { + c.errorHandler(w, r, err, nil) + return + } + result, err := c.service.DevstateMetadataPut(r.Context(), metadataParam) + // If an error occurred, encode the error with the status code + if err != nil { + c.errorHandler(w, r, err, &result) + return + } + // If no error, encode the body and the result code + EncodeJSONResponse(result.Body, &result.Code, w) + +} + +// DevstateQuantityValidPost - +func (c *DefaultApiController) DevstateQuantityValidPost(w http.ResponseWriter, r *http.Request) { + devstateQuantityValidPostRequestParam := DevstateQuantityValidPostRequest{} + d := json.NewDecoder(r.Body) + d.DisallowUnknownFields() + if err := d.Decode(&devstateQuantityValidPostRequestParam); err != nil { + c.errorHandler(w, r, &ParsingError{Err: err}, nil) + return + } + if err := AssertDevstateQuantityValidPostRequestRequired(devstateQuantityValidPostRequestParam); err != nil { + c.errorHandler(w, r, err, nil) + return + } + result, err := c.service.DevstateQuantityValidPost(r.Context(), devstateQuantityValidPostRequestParam) + // If an error occurred, encode the error with the status code + if err != nil { + c.errorHandler(w, r, err, &result) + return + } + // If no error, encode the body and the result code + EncodeJSONResponse(result.Body, &result.Code, w) + +} + +// DevstateResourcePost - +func (c *DefaultApiController) DevstateResourcePost(w http.ResponseWriter, r *http.Request) { + devstateResourcePostRequestParam := DevstateResourcePostRequest{} + d := json.NewDecoder(r.Body) + d.DisallowUnknownFields() + if err := d.Decode(&devstateResourcePostRequestParam); err != nil { + c.errorHandler(w, r, &ParsingError{Err: err}, nil) + return + } + if err := AssertDevstateResourcePostRequestRequired(devstateResourcePostRequestParam); err != nil { + c.errorHandler(w, r, err, nil) + return + } + result, err := c.service.DevstateResourcePost(r.Context(), devstateResourcePostRequestParam) + // If an error occurred, encode the error with the status code + if err != nil { + c.errorHandler(w, r, err, &result) + return + } + // If no error, encode the body and the result code + EncodeJSONResponse(result.Body, &result.Code, w) + +} + +// DevstateResourceResourceNameDelete - +func (c *DefaultApiController) DevstateResourceResourceNameDelete(w http.ResponseWriter, r *http.Request) { + params := mux.Vars(r) + resourceNameParam := params["resourceName"] + result, err := c.service.DevstateResourceResourceNameDelete(r.Context(), resourceNameParam) + // If an error occurred, encode the error with the status code + if err != nil { + c.errorHandler(w, r, err, &result) + return + } + // If no error, encode the body and the result code + EncodeJSONResponse(result.Body, &result.Code, w) + +} + // InstanceDelete - func (c *DefaultApiController) InstanceDelete(w http.ResponseWriter, r *http.Request) { result, err := c.service.InstanceDelete(r.Context()) diff --git a/pkg/apiserver-gen/go/model__devstate_apply_command_post_request.go b/pkg/apiserver-gen/go/model__devstate_apply_command_post_request.go new file mode 100644 index 00000000000..cdcaad32564 --- /dev/null +++ b/pkg/apiserver-gen/go/model__devstate_apply_command_post_request.go @@ -0,0 +1,35 @@ +/* + * odo dev + * + * API interface for 'odo dev' + * + * API version: 0.1 + * Generated by: OpenAPI Generator (https://openapi-generator.tech) + */ + +package openapi + +type DevstateApplyCommandPostRequest struct { + + // Name of the command + Name string `json:"name,omitempty"` + + Component string `json:"component,omitempty"` +} + +// AssertDevstateApplyCommandPostRequestRequired checks if the required fields are not zero-ed +func AssertDevstateApplyCommandPostRequestRequired(obj DevstateApplyCommandPostRequest) error { + return nil +} + +// AssertRecurseDevstateApplyCommandPostRequestRequired recursively checks if required fields are not zero-ed in a nested slice. +// Accepts only nested slice of DevstateApplyCommandPostRequest (e.g. [][]DevstateApplyCommandPostRequest), otherwise ErrTypeAssertionError is thrown. +func AssertRecurseDevstateApplyCommandPostRequestRequired(objSlice interface{}) error { + return AssertRecurseInterfaceRequired(objSlice, func(obj interface{}) error { + aDevstateApplyCommandPostRequest, ok := obj.(DevstateApplyCommandPostRequest) + if !ok { + return ErrTypeAssertionError + } + return AssertDevstateApplyCommandPostRequestRequired(aDevstateApplyCommandPostRequest) + }) +} diff --git a/pkg/apiserver-gen/go/model__devstate_chart_get_200_response.go b/pkg/apiserver-gen/go/model__devstate_chart_get_200_response.go new file mode 100644 index 00000000000..68de784a615 --- /dev/null +++ b/pkg/apiserver-gen/go/model__devstate_chart_get_200_response.go @@ -0,0 +1,42 @@ +/* + * odo dev + * + * API interface for 'odo dev' + * + * API version: 0.1 + * Generated by: OpenAPI Generator (https://openapi-generator.tech) + */ + +package openapi + +type DevstateChartGet200Response struct { + + // chart in mermaid format + Chart string `json:"chart"` +} + +// AssertDevstateChartGet200ResponseRequired checks if the required fields are not zero-ed +func AssertDevstateChartGet200ResponseRequired(obj DevstateChartGet200Response) error { + elements := map[string]interface{}{ + "chart": obj.Chart, + } + for name, el := range elements { + if isZero := IsZeroValue(el); isZero { + return &RequiredError{Field: name} + } + } + + return nil +} + +// AssertRecurseDevstateChartGet200ResponseRequired recursively checks if required fields are not zero-ed in a nested slice. +// Accepts only nested slice of DevstateChartGet200Response (e.g. [][]DevstateChartGet200Response), otherwise ErrTypeAssertionError is thrown. +func AssertRecurseDevstateChartGet200ResponseRequired(objSlice interface{}) error { + return AssertRecurseInterfaceRequired(objSlice, func(obj interface{}) error { + aDevstateChartGet200Response, ok := obj.(DevstateChartGet200Response) + if !ok { + return ErrTypeAssertionError + } + return AssertDevstateChartGet200ResponseRequired(aDevstateChartGet200Response) + }) +} diff --git a/pkg/apiserver-gen/go/model__devstate_command__command_name__move_post_request.go b/pkg/apiserver-gen/go/model__devstate_command__command_name__move_post_request.go new file mode 100644 index 00000000000..3f42103ff6e --- /dev/null +++ b/pkg/apiserver-gen/go/model__devstate_command__command_name__move_post_request.go @@ -0,0 +1,42 @@ +/* + * odo dev + * + * API interface for 'odo dev' + * + * API version: 0.1 + * Generated by: OpenAPI Generator (https://openapi-generator.tech) + */ + +package openapi + +type DevstateCommandCommandNameMovePostRequest struct { + + // Initial group of the command + FromGroup string `json:"fromGroup,omitempty"` + + // Initial index of the command in the group + FromIndex int32 `json:"fromIndex,omitempty"` + + // Target group of the command + ToGroup string `json:"toGroup,omitempty"` + + // Target index of the command in the group + ToIndex int32 `json:"toIndex,omitempty"` +} + +// AssertDevstateCommandCommandNameMovePostRequestRequired checks if the required fields are not zero-ed +func AssertDevstateCommandCommandNameMovePostRequestRequired(obj DevstateCommandCommandNameMovePostRequest) error { + return nil +} + +// AssertRecurseDevstateCommandCommandNameMovePostRequestRequired recursively checks if required fields are not zero-ed in a nested slice. +// Accepts only nested slice of DevstateCommandCommandNameMovePostRequest (e.g. [][]DevstateCommandCommandNameMovePostRequest), otherwise ErrTypeAssertionError is thrown. +func AssertRecurseDevstateCommandCommandNameMovePostRequestRequired(objSlice interface{}) error { + return AssertRecurseInterfaceRequired(objSlice, func(obj interface{}) error { + aDevstateCommandCommandNameMovePostRequest, ok := obj.(DevstateCommandCommandNameMovePostRequest) + if !ok { + return ErrTypeAssertionError + } + return AssertDevstateCommandCommandNameMovePostRequestRequired(aDevstateCommandCommandNameMovePostRequest) + }) +} diff --git a/pkg/apiserver-gen/go/model__devstate_command__command_name__set_default_post_request.go b/pkg/apiserver-gen/go/model__devstate_command__command_name__set_default_post_request.go new file mode 100644 index 00000000000..4fe994d3442 --- /dev/null +++ b/pkg/apiserver-gen/go/model__devstate_command__command_name__set_default_post_request.go @@ -0,0 +1,33 @@ +/* + * odo dev + * + * API interface for 'odo dev' + * + * API version: 0.1 + * Generated by: OpenAPI Generator (https://openapi-generator.tech) + */ + +package openapi + +type DevstateCommandCommandNameSetDefaultPostRequest struct { + + // group of the command + Group string `json:"group,omitempty"` +} + +// AssertDevstateCommandCommandNameSetDefaultPostRequestRequired checks if the required fields are not zero-ed +func AssertDevstateCommandCommandNameSetDefaultPostRequestRequired(obj DevstateCommandCommandNameSetDefaultPostRequest) error { + return nil +} + +// AssertRecurseDevstateCommandCommandNameSetDefaultPostRequestRequired recursively checks if required fields are not zero-ed in a nested slice. +// Accepts only nested slice of DevstateCommandCommandNameSetDefaultPostRequest (e.g. [][]DevstateCommandCommandNameSetDefaultPostRequest), otherwise ErrTypeAssertionError is thrown. +func AssertRecurseDevstateCommandCommandNameSetDefaultPostRequestRequired(objSlice interface{}) error { + return AssertRecurseInterfaceRequired(objSlice, func(obj interface{}) error { + aDevstateCommandCommandNameSetDefaultPostRequest, ok := obj.(DevstateCommandCommandNameSetDefaultPostRequest) + if !ok { + return ErrTypeAssertionError + } + return AssertDevstateCommandCommandNameSetDefaultPostRequestRequired(aDevstateCommandCommandNameSetDefaultPostRequest) + }) +} diff --git a/pkg/apiserver-gen/go/model__devstate_composite_command_post_request.go b/pkg/apiserver-gen/go/model__devstate_composite_command_post_request.go new file mode 100644 index 00000000000..aff84ca08b1 --- /dev/null +++ b/pkg/apiserver-gen/go/model__devstate_composite_command_post_request.go @@ -0,0 +1,37 @@ +/* + * odo dev + * + * API interface for 'odo dev' + * + * API version: 0.1 + * Generated by: OpenAPI Generator (https://openapi-generator.tech) + */ + +package openapi + +type DevstateCompositeCommandPostRequest struct { + + // Name of the command + Name string `json:"name,omitempty"` + + Parallel bool `json:"parallel,omitempty"` + + Commands []string `json:"commands,omitempty"` +} + +// AssertDevstateCompositeCommandPostRequestRequired checks if the required fields are not zero-ed +func AssertDevstateCompositeCommandPostRequestRequired(obj DevstateCompositeCommandPostRequest) error { + return nil +} + +// AssertRecurseDevstateCompositeCommandPostRequestRequired recursively checks if required fields are not zero-ed in a nested slice. +// Accepts only nested slice of DevstateCompositeCommandPostRequest (e.g. [][]DevstateCompositeCommandPostRequest), otherwise ErrTypeAssertionError is thrown. +func AssertRecurseDevstateCompositeCommandPostRequestRequired(objSlice interface{}) error { + return AssertRecurseInterfaceRequired(objSlice, func(obj interface{}) error { + aDevstateCompositeCommandPostRequest, ok := obj.(DevstateCompositeCommandPostRequest) + if !ok { + return ErrTypeAssertionError + } + return AssertDevstateCompositeCommandPostRequestRequired(aDevstateCompositeCommandPostRequest) + }) +} diff --git a/pkg/apiserver-gen/go/model__devstate_container_post_200_response.go b/pkg/apiserver-gen/go/model__devstate_container_post_200_response.go new file mode 100644 index 00000000000..68941382966 --- /dev/null +++ b/pkg/apiserver-gen/go/model__devstate_container_post_200_response.go @@ -0,0 +1,33 @@ +/* + * odo dev + * + * API interface for 'odo dev' + * + * API version: 0.1 + * Generated by: OpenAPI Generator (https://openapi-generator.tech) + */ + +package openapi + +type DevstateContainerPost200Response struct { + + // Content of the Devfile + Component map[string]interface{} `json:"component,omitempty"` +} + +// AssertDevstateContainerPost200ResponseRequired checks if the required fields are not zero-ed +func AssertDevstateContainerPost200ResponseRequired(obj DevstateContainerPost200Response) error { + return nil +} + +// AssertRecurseDevstateContainerPost200ResponseRequired recursively checks if required fields are not zero-ed in a nested slice. +// Accepts only nested slice of DevstateContainerPost200Response (e.g. [][]DevstateContainerPost200Response), otherwise ErrTypeAssertionError is thrown. +func AssertRecurseDevstateContainerPost200ResponseRequired(objSlice interface{}) error { + return AssertRecurseInterfaceRequired(objSlice, func(obj interface{}) error { + aDevstateContainerPost200Response, ok := obj.(DevstateContainerPost200Response) + if !ok { + return ErrTypeAssertionError + } + return AssertDevstateContainerPost200ResponseRequired(aDevstateContainerPost200Response) + }) +} diff --git a/pkg/apiserver-gen/go/model__devstate_container_post_request.go b/pkg/apiserver-gen/go/model__devstate_container_post_request.go new file mode 100644 index 00000000000..e6ea6f75cf2 --- /dev/null +++ b/pkg/apiserver-gen/go/model__devstate_container_post_request.go @@ -0,0 +1,54 @@ +/* + * odo dev + * + * API interface for 'odo dev' + * + * API version: 0.1 + * Generated by: OpenAPI Generator (https://openapi-generator.tech) + */ + +package openapi + +type DevstateContainerPostRequest struct { + + // Name of the container + Name string `json:"name,omitempty"` + + // Container image + Image string `json:"image,omitempty"` + + // Entrypoint of the container + Command []string `json:"command,omitempty"` + + // Args passed to the Container entrypoint + Args []string `json:"args,omitempty"` + + // Requested memory for the deployed container + MemReq string `json:"memReq,omitempty"` + + // Memory limit for the deployed container + MemLimit string `json:"memLimit,omitempty"` + + // Requested CPU for the deployed container + CpuReq string `json:"cpuReq,omitempty"` + + // CPU limit for the deployed container + CpuLimit string `json:"cpuLimit,omitempty"` +} + +// AssertDevstateContainerPostRequestRequired checks if the required fields are not zero-ed +func AssertDevstateContainerPostRequestRequired(obj DevstateContainerPostRequest) error { + return nil +} + +// AssertRecurseDevstateContainerPostRequestRequired recursively checks if required fields are not zero-ed in a nested slice. +// Accepts only nested slice of DevstateContainerPostRequest (e.g. [][]DevstateContainerPostRequest), otherwise ErrTypeAssertionError is thrown. +func AssertRecurseDevstateContainerPostRequestRequired(objSlice interface{}) error { + return AssertRecurseInterfaceRequired(objSlice, func(obj interface{}) error { + aDevstateContainerPostRequest, ok := obj.(DevstateContainerPostRequest) + if !ok { + return ErrTypeAssertionError + } + return AssertDevstateContainerPostRequestRequired(aDevstateContainerPostRequest) + }) +} diff --git a/pkg/apiserver-gen/go/model__devstate_devfile_put_request.go b/pkg/apiserver-gen/go/model__devstate_devfile_put_request.go new file mode 100644 index 00000000000..6ed2c52645c --- /dev/null +++ b/pkg/apiserver-gen/go/model__devstate_devfile_put_request.go @@ -0,0 +1,40 @@ +/* + * odo dev + * + * API interface for 'odo dev' + * + * API version: 0.1 + * Generated by: OpenAPI Generator (https://openapi-generator.tech) + */ + +package openapi + +type DevstateDevfilePutRequest struct { + Content string `json:"content"` +} + +// AssertDevstateDevfilePutRequestRequired checks if the required fields are not zero-ed +func AssertDevstateDevfilePutRequestRequired(obj DevstateDevfilePutRequest) error { + elements := map[string]interface{}{ + "content": obj.Content, + } + for name, el := range elements { + if isZero := IsZeroValue(el); isZero { + return &RequiredError{Field: name} + } + } + + return nil +} + +// AssertRecurseDevstateDevfilePutRequestRequired recursively checks if required fields are not zero-ed in a nested slice. +// Accepts only nested slice of DevstateDevfilePutRequest (e.g. [][]DevstateDevfilePutRequest), otherwise ErrTypeAssertionError is thrown. +func AssertRecurseDevstateDevfilePutRequestRequired(objSlice interface{}) error { + return AssertRecurseInterfaceRequired(objSlice, func(obj interface{}) error { + aDevstateDevfilePutRequest, ok := obj.(DevstateDevfilePutRequest) + if !ok { + return ErrTypeAssertionError + } + return AssertDevstateDevfilePutRequestRequired(aDevstateDevfilePutRequest) + }) +} diff --git a/pkg/apiserver-gen/go/model__devstate_events_put_request.go b/pkg/apiserver-gen/go/model__devstate_events_put_request.go new file mode 100644 index 00000000000..236e2a00621 --- /dev/null +++ b/pkg/apiserver-gen/go/model__devstate_events_put_request.go @@ -0,0 +1,33 @@ +/* + * odo dev + * + * API interface for 'odo dev' + * + * API version: 0.1 + * Generated by: OpenAPI Generator (https://openapi-generator.tech) + */ + +package openapi + +type DevstateEventsPutRequest struct { + EventName string `json:"eventName,omitempty"` + + Commands []string `json:"commands,omitempty"` +} + +// AssertDevstateEventsPutRequestRequired checks if the required fields are not zero-ed +func AssertDevstateEventsPutRequestRequired(obj DevstateEventsPutRequest) error { + return nil +} + +// AssertRecurseDevstateEventsPutRequestRequired recursively checks if required fields are not zero-ed in a nested slice. +// Accepts only nested slice of DevstateEventsPutRequest (e.g. [][]DevstateEventsPutRequest), otherwise ErrTypeAssertionError is thrown. +func AssertRecurseDevstateEventsPutRequestRequired(objSlice interface{}) error { + return AssertRecurseInterfaceRequired(objSlice, func(obj interface{}) error { + aDevstateEventsPutRequest, ok := obj.(DevstateEventsPutRequest) + if !ok { + return ErrTypeAssertionError + } + return AssertDevstateEventsPutRequestRequired(aDevstateEventsPutRequest) + }) +} diff --git a/pkg/apiserver-gen/go/model__devstate_exec_command_post_request.go b/pkg/apiserver-gen/go/model__devstate_exec_command_post_request.go new file mode 100644 index 00000000000..77c8fa322d3 --- /dev/null +++ b/pkg/apiserver-gen/go/model__devstate_exec_command_post_request.go @@ -0,0 +1,41 @@ +/* + * odo dev + * + * API interface for 'odo dev' + * + * API version: 0.1 + * Generated by: OpenAPI Generator (https://openapi-generator.tech) + */ + +package openapi + +type DevstateExecCommandPostRequest struct { + + // Name of the command + Name string `json:"name,omitempty"` + + Component string `json:"component,omitempty"` + + CommandLine string `json:"commandLine,omitempty"` + + WorkingDir string `json:"workingDir,omitempty"` + + HotReloadCapable bool `json:"hotReloadCapable,omitempty"` +} + +// AssertDevstateExecCommandPostRequestRequired checks if the required fields are not zero-ed +func AssertDevstateExecCommandPostRequestRequired(obj DevstateExecCommandPostRequest) error { + return nil +} + +// AssertRecurseDevstateExecCommandPostRequestRequired recursively checks if required fields are not zero-ed in a nested slice. +// Accepts only nested slice of DevstateExecCommandPostRequest (e.g. [][]DevstateExecCommandPostRequest), otherwise ErrTypeAssertionError is thrown. +func AssertRecurseDevstateExecCommandPostRequestRequired(objSlice interface{}) error { + return AssertRecurseInterfaceRequired(objSlice, func(obj interface{}) error { + aDevstateExecCommandPostRequest, ok := obj.(DevstateExecCommandPostRequest) + if !ok { + return ErrTypeAssertionError + } + return AssertDevstateExecCommandPostRequestRequired(aDevstateExecCommandPostRequest) + }) +} diff --git a/pkg/apiserver-gen/go/model__devstate_image_post_request.go b/pkg/apiserver-gen/go/model__devstate_image_post_request.go new file mode 100644 index 00000000000..2284d970be6 --- /dev/null +++ b/pkg/apiserver-gen/go/model__devstate_image_post_request.go @@ -0,0 +1,43 @@ +/* + * odo dev + * + * API interface for 'odo dev' + * + * API version: 0.1 + * Generated by: OpenAPI Generator (https://openapi-generator.tech) + */ + +package openapi + +type DevstateImagePostRequest struct { + + // Name of the image + Name string `json:"name,omitempty"` + + ImageName string `json:"imageName,omitempty"` + + Args []string `json:"args,omitempty"` + + BuildContext string `json:"buildContext,omitempty"` + + RootRequired bool `json:"rootRequired,omitempty"` + + Uri string `json:"uri,omitempty"` +} + +// AssertDevstateImagePostRequestRequired checks if the required fields are not zero-ed +func AssertDevstateImagePostRequestRequired(obj DevstateImagePostRequest) error { + return nil +} + +// AssertRecurseDevstateImagePostRequestRequired recursively checks if required fields are not zero-ed in a nested slice. +// Accepts only nested slice of DevstateImagePostRequest (e.g. [][]DevstateImagePostRequest), otherwise ErrTypeAssertionError is thrown. +func AssertRecurseDevstateImagePostRequestRequired(objSlice interface{}) error { + return AssertRecurseInterfaceRequired(objSlice, func(obj interface{}) error { + aDevstateImagePostRequest, ok := obj.(DevstateImagePostRequest) + if !ok { + return ErrTypeAssertionError + } + return AssertDevstateImagePostRequestRequired(aDevstateImagePostRequest) + }) +} diff --git a/pkg/apiserver-gen/go/model__devstate_metadata_put_200_response.go b/pkg/apiserver-gen/go/model__devstate_metadata_put_200_response.go new file mode 100644 index 00000000000..64e9b8345e4 --- /dev/null +++ b/pkg/apiserver-gen/go/model__devstate_metadata_put_200_response.go @@ -0,0 +1,33 @@ +/* + * odo dev + * + * API interface for 'odo dev' + * + * API version: 0.1 + * Generated by: OpenAPI Generator (https://openapi-generator.tech) + */ + +package openapi + +type DevstateMetadataPut200Response struct { + + // Content of the Devfile + Component map[string]interface{} `json:"component,omitempty"` +} + +// AssertDevstateMetadataPut200ResponseRequired checks if the required fields are not zero-ed +func AssertDevstateMetadataPut200ResponseRequired(obj DevstateMetadataPut200Response) error { + return nil +} + +// AssertRecurseDevstateMetadataPut200ResponseRequired recursively checks if required fields are not zero-ed in a nested slice. +// Accepts only nested slice of DevstateMetadataPut200Response (e.g. [][]DevstateMetadataPut200Response), otherwise ErrTypeAssertionError is thrown. +func AssertRecurseDevstateMetadataPut200ResponseRequired(objSlice interface{}) error { + return AssertRecurseInterfaceRequired(objSlice, func(obj interface{}) error { + aDevstateMetadataPut200Response, ok := obj.(DevstateMetadataPut200Response) + if !ok { + return ErrTypeAssertionError + } + return AssertDevstateMetadataPut200ResponseRequired(aDevstateMetadataPut200Response) + }) +} diff --git a/pkg/apiserver-gen/go/model__devstate_metadata_put_request.go b/pkg/apiserver-gen/go/model__devstate_metadata_put_request.go new file mode 100644 index 00000000000..ac879723023 --- /dev/null +++ b/pkg/apiserver-gen/go/model__devstate_metadata_put_request.go @@ -0,0 +1,55 @@ +/* + * odo dev + * + * API interface for 'odo dev' + * + * API version: 0.1 + * Generated by: OpenAPI Generator (https://openapi-generator.tech) + */ + +package openapi + +type DevstateMetadataPutRequest struct { + Name string `json:"name,omitempty"` + + Version string `json:"version,omitempty"` + + DisplayName string `json:"displayName,omitempty"` + + Description string `json:"description,omitempty"` + + Tags string `json:"tags,omitempty"` + + Architectures string `json:"architectures,omitempty"` + + Icon string `json:"icon,omitempty"` + + GlobalMemoryLimit string `json:"globalMemoryLimit,omitempty"` + + ProjectType string `json:"projectType,omitempty"` + + Language string `json:"language,omitempty"` + + Website string `json:"website,omitempty"` + + Provider string `json:"provider,omitempty"` + + SupportUrl string `json:"supportUrl,omitempty"` +} + +// AssertDevstateMetadataPutRequestRequired checks if the required fields are not zero-ed +func AssertDevstateMetadataPutRequestRequired(obj DevstateMetadataPutRequest) error { + return nil +} + +// AssertRecurseDevstateMetadataPutRequestRequired recursively checks if required fields are not zero-ed in a nested slice. +// Accepts only nested slice of DevstateMetadataPutRequest (e.g. [][]DevstateMetadataPutRequest), otherwise ErrTypeAssertionError is thrown. +func AssertRecurseDevstateMetadataPutRequestRequired(objSlice interface{}) error { + return AssertRecurseInterfaceRequired(objSlice, func(obj interface{}) error { + aDevstateMetadataPutRequest, ok := obj.(DevstateMetadataPutRequest) + if !ok { + return ErrTypeAssertionError + } + return AssertDevstateMetadataPutRequestRequired(aDevstateMetadataPutRequest) + }) +} diff --git a/pkg/apiserver-gen/go/model__devstate_quantity_valid_post_request.go b/pkg/apiserver-gen/go/model__devstate_quantity_valid_post_request.go new file mode 100644 index 00000000000..50c51dd2d80 --- /dev/null +++ b/pkg/apiserver-gen/go/model__devstate_quantity_valid_post_request.go @@ -0,0 +1,40 @@ +/* + * odo dev + * + * API interface for 'odo dev' + * + * API version: 0.1 + * Generated by: OpenAPI Generator (https://openapi-generator.tech) + */ + +package openapi + +type DevstateQuantityValidPostRequest struct { + Quantity string `json:"quantity"` +} + +// AssertDevstateQuantityValidPostRequestRequired checks if the required fields are not zero-ed +func AssertDevstateQuantityValidPostRequestRequired(obj DevstateQuantityValidPostRequest) error { + elements := map[string]interface{}{ + "quantity": obj.Quantity, + } + for name, el := range elements { + if isZero := IsZeroValue(el); isZero { + return &RequiredError{Field: name} + } + } + + return nil +} + +// AssertRecurseDevstateQuantityValidPostRequestRequired recursively checks if required fields are not zero-ed in a nested slice. +// Accepts only nested slice of DevstateQuantityValidPostRequest (e.g. [][]DevstateQuantityValidPostRequest), otherwise ErrTypeAssertionError is thrown. +func AssertRecurseDevstateQuantityValidPostRequestRequired(objSlice interface{}) error { + return AssertRecurseInterfaceRequired(objSlice, func(obj interface{}) error { + aDevstateQuantityValidPostRequest, ok := obj.(DevstateQuantityValidPostRequest) + if !ok { + return ErrTypeAssertionError + } + return AssertDevstateQuantityValidPostRequestRequired(aDevstateQuantityValidPostRequest) + }) +} diff --git a/pkg/apiserver-gen/go/model__devstate_resource_post_request.go b/pkg/apiserver-gen/go/model__devstate_resource_post_request.go new file mode 100644 index 00000000000..9b9a97cfe73 --- /dev/null +++ b/pkg/apiserver-gen/go/model__devstate_resource_post_request.go @@ -0,0 +1,37 @@ +/* + * odo dev + * + * API interface for 'odo dev' + * + * API version: 0.1 + * Generated by: OpenAPI Generator (https://openapi-generator.tech) + */ + +package openapi + +type DevstateResourcePostRequest struct { + + // Name of the resource + Name string `json:"name,omitempty"` + + Inlined string `json:"inlined,omitempty"` + + Uri string `json:"uri,omitempty"` +} + +// AssertDevstateResourcePostRequestRequired checks if the required fields are not zero-ed +func AssertDevstateResourcePostRequestRequired(obj DevstateResourcePostRequest) error { + return nil +} + +// AssertRecurseDevstateResourcePostRequestRequired recursively checks if required fields are not zero-ed in a nested slice. +// Accepts only nested slice of DevstateResourcePostRequest (e.g. [][]DevstateResourcePostRequest), otherwise ErrTypeAssertionError is thrown. +func AssertRecurseDevstateResourcePostRequestRequired(objSlice interface{}) error { + return AssertRecurseInterfaceRequired(objSlice, func(obj interface{}) error { + aDevstateResourcePostRequest, ok := obj.(DevstateResourcePostRequest) + if !ok { + return ErrTypeAssertionError + } + return AssertDevstateResourcePostRequestRequired(aDevstateResourcePostRequest) + }) +} diff --git a/pkg/apiserver-gen/go/model_apply_command.go b/pkg/apiserver-gen/go/model_apply_command.go new file mode 100644 index 00000000000..b61f312f8da --- /dev/null +++ b/pkg/apiserver-gen/go/model_apply_command.go @@ -0,0 +1,40 @@ +/* + * odo dev + * + * API interface for 'odo dev' + * + * API version: 0.1 + * Generated by: OpenAPI Generator (https://openapi-generator.tech) + */ + +package openapi + +type ApplyCommand struct { + Component string `json:"component"` +} + +// AssertApplyCommandRequired checks if the required fields are not zero-ed +func AssertApplyCommandRequired(obj ApplyCommand) error { + elements := map[string]interface{}{ + "component": obj.Component, + } + for name, el := range elements { + if isZero := IsZeroValue(el); isZero { + return &RequiredError{Field: name} + } + } + + return nil +} + +// AssertRecurseApplyCommandRequired recursively checks if required fields are not zero-ed in a nested slice. +// Accepts only nested slice of ApplyCommand (e.g. [][]ApplyCommand), otherwise ErrTypeAssertionError is thrown. +func AssertRecurseApplyCommandRequired(objSlice interface{}) error { + return AssertRecurseInterfaceRequired(objSlice, func(obj interface{}) error { + aApplyCommand, ok := obj.(ApplyCommand) + if !ok { + return ErrTypeAssertionError + } + return AssertApplyCommandRequired(aApplyCommand) + }) +} diff --git a/pkg/apiserver-gen/go/model_command.go b/pkg/apiserver-gen/go/model_command.go new file mode 100644 index 00000000000..6a724c320a9 --- /dev/null +++ b/pkg/apiserver-gen/go/model_command.go @@ -0,0 +1,68 @@ +/* + * odo dev + * + * API interface for 'odo dev' + * + * API version: 0.1 + * Generated by: OpenAPI Generator (https://openapi-generator.tech) + */ + +package openapi + +type Command struct { + Name string `json:"name"` + + Group string `json:"group"` + + Default bool `json:"default,omitempty"` + + Type string `json:"type"` + + Exec ExecCommand `json:"exec,omitempty"` + + Apply ApplyCommand `json:"apply,omitempty"` + + Image ImageCommand `json:"image,omitempty"` + + Composite CompositeCommand `json:"composite,omitempty"` +} + +// AssertCommandRequired checks if the required fields are not zero-ed +func AssertCommandRequired(obj Command) error { + elements := map[string]interface{}{ + "name": obj.Name, + "group": obj.Group, + "type": obj.Type, + } + for name, el := range elements { + if isZero := IsZeroValue(el); isZero { + return &RequiredError{Field: name} + } + } + + if err := AssertExecCommandRequired(obj.Exec); err != nil { + return err + } + if err := AssertApplyCommandRequired(obj.Apply); err != nil { + return err + } + if err := AssertImageCommandRequired(obj.Image); err != nil { + return err + } + if err := AssertCompositeCommandRequired(obj.Composite); err != nil { + return err + } + return nil +} + +// AssertRecurseCommandRequired recursively checks if required fields are not zero-ed in a nested slice. +// Accepts only nested slice of Command (e.g. [][]Command), otherwise ErrTypeAssertionError is thrown. +func AssertRecurseCommandRequired(objSlice interface{}) error { + return AssertRecurseInterfaceRequired(objSlice, func(obj interface{}) error { + aCommand, ok := obj.(Command) + if !ok { + return ErrTypeAssertionError + } + return AssertCommandRequired(aCommand) + }) +} diff --git a/pkg/apiserver-gen/go/model_composite_command.go b/pkg/apiserver-gen/go/model_composite_command.go new file mode 100644 index 00000000000..f586c357acf --- /dev/null +++ b/pkg/apiserver-gen/go/model_composite_command.go @@ -0,0 +1,43 @@ +/* + * odo dev + * + * API interface for 'odo dev' + * + * API version: 0.1 + * Generated by: OpenAPI Generator (https://openapi-generator.tech) + */ + +package openapi + +type CompositeCommand struct { + Commands []string `json:"commands"` + + Parallel bool `json:"parallel"` +} + +// AssertCompositeCommandRequired checks if the required fields are not zero-ed +func AssertCompositeCommandRequired(obj CompositeCommand) error { + elements := map[string]interface{}{ + "commands": obj.Commands, + "parallel": obj.Parallel, + } + for name, el := range elements { + if isZero := IsZeroValue(el); isZero { + return &RequiredError{Field: name} + } + } + + return nil +} + +// AssertRecurseCompositeCommandRequired recursively checks if required fields are not zero-ed in a nested slice. +// Accepts only nested slice of CompositeCommand (e.g. [][]CompositeCommand), otherwise ErrTypeAssertionError is thrown. +func AssertRecurseCompositeCommandRequired(objSlice interface{}) error { + return AssertRecurseInterfaceRequired(objSlice, func(obj interface{}) error { + aCompositeCommand, ok := obj.(CompositeCommand) + if !ok { + return ErrTypeAssertionError + } + return AssertCompositeCommandRequired(aCompositeCommand) + }) +} diff --git a/pkg/apiserver-gen/go/model_container.go b/pkg/apiserver-gen/go/model_container.go new file mode 100644 index 00000000000..cb725a23849 --- /dev/null +++ b/pkg/apiserver-gen/go/model_container.go @@ -0,0 +1,61 @@ +/* + * odo dev + * + * API interface for 'odo dev' + * + * API version: 0.1 + * Generated by: OpenAPI Generator (https://openapi-generator.tech) + */ + +package openapi + +type Container struct { + Name string `json:"name"` + + Image string `json:"image"` + + Command []string `json:"command"` + + Args []string `json:"args"` + + MemoryRequest string `json:"memoryRequest"` + + MemoryLimit string `json:"memoryLimit"` + + CpuRequest string `json:"cpuRequest"` + + CpuLimit string `json:"cpuLimit"` +} + +// AssertContainerRequired checks if the required fields are not zero-ed +func AssertContainerRequired(obj Container) error { + elements := map[string]interface{}{ + "name": obj.Name, + "image": obj.Image, + "command": obj.Command, + "args": obj.Args, + "memoryRequest": obj.MemoryRequest, + "memoryLimit": obj.MemoryLimit, + "cpuRequest": obj.CpuRequest, + "cpuLimit": obj.CpuLimit, + } + for name, el := range elements { + if isZero := IsZeroValue(el); isZero { + return &RequiredError{Field: name} + } + } + + return nil +} + +// AssertRecurseContainerRequired recursively checks if required fields are not zero-ed in a nested slice. +// Accepts only nested slice of Container (e.g. [][]Container), otherwise ErrTypeAssertionError is thrown. +func AssertRecurseContainerRequired(objSlice interface{}) error { + return AssertRecurseInterfaceRequired(objSlice, func(obj interface{}) error { + aContainer, ok := obj.(Container) + if !ok { + return ErrTypeAssertionError + } + return AssertContainerRequired(aContainer) + }) +} diff --git a/pkg/apiserver-gen/go/model_devfile_content.go b/pkg/apiserver-gen/go/model_devfile_content.go new file mode 100644 index 00000000000..360468cd7d8 --- /dev/null +++ b/pkg/apiserver-gen/go/model_devfile_content.go @@ -0,0 +1,84 @@ +/* + * odo dev + * + * API interface for 'odo dev' + * + * API version: 0.1 + * Generated by: OpenAPI Generator (https://openapi-generator.tech) + */ + +package openapi + +type DevfileContent struct { + Content string `json:"content"` + + Commands []Command `json:"commands"` + + Containers []Container `json:"containers"` + + Images []Image `json:"images"` + + Resources []Resource `json:"resources"` + + Events Events `json:"events"` + + Metadata Metadata `json:"metadata"` +} + +// AssertDevfileContentRequired checks if the required fields are not zero-ed +func AssertDevfileContentRequired(obj DevfileContent) error { + elements := map[string]interface{}{ + "content": obj.Content, + "commands": obj.Commands, + "containers": obj.Containers, + "images": obj.Images, + "resources": obj.Resources, + "events": obj.Events, + "metadata": obj.Metadata, + } + for name, el := range elements { + if isZero := IsZeroValue(el); isZero { + return &RequiredError{Field: name} + } + } + + for _, el := range obj.Commands { + if err := AssertCommandRequired(el); err != nil { + return err + } + } + for _, el := range obj.Containers { + if err := AssertContainerRequired(el); err != nil { + return err + } + } + for _, el := range obj.Images { + if err := AssertImageRequired(el); err != nil { + return err + } + } + for _, el := range obj.Resources { + if err := AssertResourceRequired(el); err != nil { + return err + } + } + if err := AssertEventsRequired(obj.Events); err != nil { + return err + } + if err := AssertMetadataRequired(obj.Metadata); err != nil { + return err + } + return nil +} + +// AssertRecurseDevfileContentRequired recursively checks if required fields are not zero-ed in a nested slice. +// Accepts only nested slice of DevfileContent (e.g. [][]DevfileContent), otherwise ErrTypeAssertionError is thrown. +func AssertRecurseDevfileContentRequired(objSlice interface{}) error { + return AssertRecurseInterfaceRequired(objSlice, func(obj interface{}) error { + aDevfileContent, ok := obj.(DevfileContent) + if !ok { + return ErrTypeAssertionError + } + return AssertDevfileContentRequired(aDevfileContent) + }) +} diff --git a/pkg/apiserver-gen/go/model_events.go b/pkg/apiserver-gen/go/model_events.go new file mode 100644 index 00000000000..7a92a807f0d --- /dev/null +++ b/pkg/apiserver-gen/go/model_events.go @@ -0,0 +1,37 @@ +/* + * odo dev + * + * API interface for 'odo dev' + * + * API version: 0.1 + * Generated by: OpenAPI Generator (https://openapi-generator.tech) + */ + +package openapi + +type Events struct { + PreStart []string `json:"preStart,omitempty"` + + PostStart []string `json:"postStart,omitempty"` + + PreStop []string `json:"preStop,omitempty"` + + PostStop []string `json:"postStop,omitempty"` +} + +// AssertEventsRequired checks if the required fields are not zero-ed +func AssertEventsRequired(obj Events) error { + return nil +} + +// AssertRecurseEventsRequired recursively checks if required fields are not zero-ed in a nested slice. +// Accepts only nested slice of Events (e.g. [][]Events), otherwise ErrTypeAssertionError is thrown. +func AssertRecurseEventsRequired(objSlice interface{}) error { + return AssertRecurseInterfaceRequired(objSlice, func(obj interface{}) error { + aEvents, ok := obj.(Events) + if !ok { + return ErrTypeAssertionError + } + return AssertEventsRequired(aEvents) + }) +} diff --git a/pkg/apiserver-gen/go/model_exec_command.go b/pkg/apiserver-gen/go/model_exec_command.go new file mode 100644 index 00000000000..9385d2cb2e7 --- /dev/null +++ b/pkg/apiserver-gen/go/model_exec_command.go @@ -0,0 +1,49 @@ +/* + * odo dev + * + * API interface for 'odo dev' + * + * API version: 0.1 + * Generated by: OpenAPI Generator (https://openapi-generator.tech) + */ + +package openapi + +type ExecCommand struct { + Component string `json:"component"` + + CommandLine string `json:"commandLine"` + + WorkingDir string `json:"workingDir"` + + HotReloadCapable bool `json:"hotReloadCapable"` +} + +// AssertExecCommandRequired checks if the required fields are not zero-ed +func AssertExecCommandRequired(obj ExecCommand) error { + elements := map[string]interface{}{ + "component": obj.Component, + "commandLine": obj.CommandLine, + "workingDir": obj.WorkingDir, + "hotReloadCapable": obj.HotReloadCapable, + } + for name, el := range elements { + if isZero := IsZeroValue(el); isZero { + return &RequiredError{Field: name} + } + } + + return nil +} + +// AssertRecurseExecCommandRequired recursively checks if required fields are not zero-ed in a nested slice. +// Accepts only nested slice of ExecCommand (e.g. [][]ExecCommand), otherwise ErrTypeAssertionError is thrown. +func AssertRecurseExecCommandRequired(objSlice interface{}) error { + return AssertRecurseInterfaceRequired(objSlice, func(obj interface{}) error { + aExecCommand, ok := obj.(ExecCommand) + if !ok { + return ErrTypeAssertionError + } + return AssertExecCommandRequired(aExecCommand) + }) +} diff --git a/pkg/apiserver-gen/go/model_image.go b/pkg/apiserver-gen/go/model_image.go new file mode 100644 index 00000000000..729e551167c --- /dev/null +++ b/pkg/apiserver-gen/go/model_image.go @@ -0,0 +1,55 @@ +/* + * odo dev + * + * API interface for 'odo dev' + * + * API version: 0.1 + * Generated by: OpenAPI Generator (https://openapi-generator.tech) + */ + +package openapi + +type Image struct { + Name string `json:"name"` + + ImageName string `json:"imageName"` + + Args []string `json:"args"` + + BuildContext string `json:"buildContext"` + + RootRequired bool `json:"rootRequired"` + + Uri string `json:"uri"` +} + +// AssertImageRequired checks if the required fields are not zero-ed +func AssertImageRequired(obj Image) error { + elements := map[string]interface{}{ + "name": obj.Name, + "imageName": obj.ImageName, + "args": obj.Args, + "buildContext": obj.BuildContext, + "rootRequired": obj.RootRequired, + "uri": obj.Uri, + } + for name, el := range elements { + if isZero := IsZeroValue(el); isZero { + return &RequiredError{Field: name} + } + } + + return nil +} + +// AssertRecurseImageRequired recursively checks if required fields are not zero-ed in a nested slice. +// Accepts only nested slice of Image (e.g. [][]Image), otherwise ErrTypeAssertionError is thrown. +func AssertRecurseImageRequired(objSlice interface{}) error { + return AssertRecurseInterfaceRequired(objSlice, func(obj interface{}) error { + aImage, ok := obj.(Image) + if !ok { + return ErrTypeAssertionError + } + return AssertImageRequired(aImage) + }) +} diff --git a/pkg/apiserver-gen/go/model_image_command.go b/pkg/apiserver-gen/go/model_image_command.go new file mode 100644 index 00000000000..d48c25d62f8 --- /dev/null +++ b/pkg/apiserver-gen/go/model_image_command.go @@ -0,0 +1,40 @@ +/* + * odo dev + * + * API interface for 'odo dev' + * + * API version: 0.1 + * Generated by: OpenAPI Generator (https://openapi-generator.tech) + */ + +package openapi + +type ImageCommand struct { + Component string `json:"component"` +} + +// AssertImageCommandRequired checks if the required fields are not zero-ed +func AssertImageCommandRequired(obj ImageCommand) error { + elements := map[string]interface{}{ + "component": obj.Component, + } + for name, el := range elements { + if isZero := IsZeroValue(el); isZero { + return &RequiredError{Field: name} + } + } + + return nil +} + +// AssertRecurseImageCommandRequired recursively checks if required fields are not zero-ed in a nested slice. +// Accepts only nested slice of ImageCommand (e.g. [][]ImageCommand), otherwise ErrTypeAssertionError is thrown. +func AssertRecurseImageCommandRequired(objSlice interface{}) error { + return AssertRecurseInterfaceRequired(objSlice, func(obj interface{}) error { + aImageCommand, ok := obj.(ImageCommand) + if !ok { + return ErrTypeAssertionError + } + return AssertImageCommandRequired(aImageCommand) + }) +} diff --git a/pkg/apiserver-gen/go/model_metadata.go b/pkg/apiserver-gen/go/model_metadata.go new file mode 100644 index 00000000000..6ff59c02312 --- /dev/null +++ b/pkg/apiserver-gen/go/model_metadata.go @@ -0,0 +1,55 @@ +/* + * odo dev + * + * API interface for 'odo dev' + * + * API version: 0.1 + * Generated by: OpenAPI Generator (https://openapi-generator.tech) + */ + +package openapi + +type Metadata struct { + Name string `json:"name,omitempty"` + + Version string `json:"version,omitempty"` + + DisplayName string `json:"displayName,omitempty"` + + Description string `json:"description,omitempty"` + + Tags string `json:"tags,omitempty"` + + Architectures string `json:"architectures,omitempty"` + + Icon string `json:"icon,omitempty"` + + GlobalMemoryLimit string `json:"globalMemoryLimit,omitempty"` + + ProjectType string `json:"projectType,omitempty"` + + Language string `json:"language,omitempty"` + + Website string `json:"website,omitempty"` + + Provider string `json:"provider,omitempty"` + + SupportUrl string `json:"supportUrl,omitempty"` +} + +// AssertMetadataRequired checks if the required fields are not zero-ed +func AssertMetadataRequired(obj Metadata) error { + return nil +} + +// AssertRecurseMetadataRequired recursively checks if required fields are not zero-ed in a nested slice. +// Accepts only nested slice of Metadata (e.g. [][]Metadata), otherwise ErrTypeAssertionError is thrown. +func AssertRecurseMetadataRequired(objSlice interface{}) error { + return AssertRecurseInterfaceRequired(objSlice, func(obj interface{}) error { + aMetadata, ok := obj.(Metadata) + if !ok { + return ErrTypeAssertionError + } + return AssertMetadataRequired(aMetadata) + }) +} diff --git a/pkg/apiserver-gen/go/model_resource.go b/pkg/apiserver-gen/go/model_resource.go new file mode 100644 index 00000000000..4e4e4204c3b --- /dev/null +++ b/pkg/apiserver-gen/go/model_resource.go @@ -0,0 +1,44 @@ +/* + * odo dev + * + * API interface for 'odo dev' + * + * API version: 0.1 + * Generated by: OpenAPI Generator (https://openapi-generator.tech) + */ + +package openapi + +type Resource struct { + Name string `json:"name"` + + Inlined string `json:"inlined,omitempty"` + + Uri string `json:"uri,omitempty"` +} + +// AssertResourceRequired checks if the required fields are not zero-ed +func AssertResourceRequired(obj Resource) error { + elements := map[string]interface{}{ + "name": obj.Name, + } + for name, el := range elements { + if isZero := IsZeroValue(el); isZero { + return &RequiredError{Field: name} + } + } + + return nil +} + +// AssertRecurseResourceRequired recursively checks if required fields are not zero-ed in a nested slice. +// Accepts only nested slice of Resource (e.g. [][]Resource), otherwise ErrTypeAssertionError is thrown. +func AssertRecurseResourceRequired(objSlice interface{}) error { + return AssertRecurseInterfaceRequired(objSlice, func(obj interface{}) error { + aResource, ok := obj.(Resource) + if !ok { + return ErrTypeAssertionError + } + return AssertResourceRequired(aResource) + }) +} diff --git a/pkg/apiserver-gen/go/routers.go b/pkg/apiserver-gen/go/routers.go index e698f8ce1e9..7c84c27ef9f 100644 --- a/pkg/apiserver-gen/go/routers.go +++ b/pkg/apiserver-gen/go/routers.go @@ -12,13 +12,14 @@ package openapi import ( "encoding/json" "errors" - "github.com/gorilla/mux" "io/ioutil" "mime/multipart" "net/http" "os" "strconv" "strings" + + "github.com/gorilla/mux" ) // A Route defines the parameters for an api endpoint diff --git a/pkg/apiserver-impl/api_default_service.go b/pkg/apiserver-impl/api_default_service.go index 9986c14bc05..ac60935de39 100644 --- a/pkg/apiserver-impl/api_default_service.go +++ b/pkg/apiserver-impl/api_default_service.go @@ -6,6 +6,7 @@ import ( "net/http" openapi "github.com/redhat-developer/odo/pkg/apiserver-gen/go" + "github.com/redhat-developer/odo/pkg/apiserver-impl/devstate" "github.com/redhat-developer/odo/pkg/component/describe" "github.com/redhat-developer/odo/pkg/kclient" odocontext "github.com/redhat-developer/odo/pkg/odo/context" @@ -22,6 +23,8 @@ type DefaultApiService struct { kubeClient kclient.ClientInterface podmanClient podman.Client stateClient state.Client + + devfileState devstate.DevfileState } // NewDefaultApiService creates a default api service @@ -38,6 +41,8 @@ func NewDefaultApiService( kubeClient: kubeClient, podmanClient: podmanClient, stateClient: stateClient, + + devfileState: devstate.NewDevfileState(), } } diff --git a/pkg/apiserver-impl/devstate.go b/pkg/apiserver-impl/devstate.go new file mode 100644 index 00000000000..f437f954225 --- /dev/null +++ b/pkg/apiserver-impl/devstate.go @@ -0,0 +1,268 @@ +package apiserver_impl + +import ( + "context" + "fmt" + "net/http" + + openapi "github.com/redhat-developer/odo/pkg/apiserver-gen/go" + "github.com/redhat-developer/odo/pkg/apiserver-impl/devstate" +) + +func (s *DefaultApiService) DevstateContainerPost(ctx context.Context, container openapi.DevstateContainerPostRequest) (openapi.ImplResponse, error) { + newContent, err := s.devfileState.AddContainer( + container.Name, + container.Image, + container.Command, + container.Args, + container.MemReq, + container.MemLimit, + container.CpuReq, + container.CpuLimit, + ) + if err != nil { + return openapi.Response(http.StatusInternalServerError, openapi.GeneralError{ + Message: fmt.Sprintf("Error adding the container: %s", err), + }), nil + } + return openapi.Response(http.StatusOK, newContent), nil +} + +func (s *DefaultApiService) DevstateContainerContainerNameDelete(ctx context.Context, containerName string) (openapi.ImplResponse, error) { + newContent, err := s.devfileState.DeleteContainer(containerName) + if err != nil { + return openapi.Response(http.StatusInternalServerError, openapi.GeneralError{ + Message: fmt.Sprintf("Error deleting the container: %s", err), + }), nil + } + return openapi.Response(http.StatusOK, newContent), nil +} + +func (s *DefaultApiService) DevstateImagePost(ctx context.Context, image openapi.DevstateImagePostRequest) (openapi.ImplResponse, error) { + newContent, err := s.devfileState.AddImage( + image.Name, + image.ImageName, + image.Args, + image.BuildContext, + image.RootRequired, + image.Uri, + ) + if err != nil { + return openapi.Response(http.StatusInternalServerError, openapi.GeneralError{ + Message: fmt.Sprintf("Error adding the image: %s", err), + }), nil + } + return openapi.Response(http.StatusOK, newContent), nil +} + +func (s *DefaultApiService) DevstateImageImageNameDelete(ctx context.Context, imageName string) (openapi.ImplResponse, error) { + newContent, err := s.devfileState.DeleteImage(imageName) + if err != nil { + return openapi.Response(http.StatusInternalServerError, openapi.GeneralError{ + Message: fmt.Sprintf("Error deleting the image: %s", err), + }), nil + } + return openapi.Response(http.StatusOK, newContent), nil +} + +func (s *DefaultApiService) DevstateResourcePost(ctx context.Context, resource openapi.DevstateResourcePostRequest) (openapi.ImplResponse, error) { + newContent, err := s.devfileState.AddResource( + resource.Name, + resource.Inlined, + resource.Uri, + ) + if err != nil { + return openapi.Response(http.StatusInternalServerError, openapi.GeneralError{ + Message: fmt.Sprintf("Error adding the resource: %s", err), + }), nil + } + return openapi.Response(http.StatusOK, newContent), nil + +} + +func (s *DefaultApiService) DevstateResourceResourceNameDelete(ctx context.Context, resourceName string) (openapi.ImplResponse, error) { + newContent, err := s.devfileState.DeleteResource(resourceName) + if err != nil { + return openapi.Response(http.StatusInternalServerError, openapi.GeneralError{ + Message: fmt.Sprintf("Error deleting the resource: %s", err), + }), nil + } + return openapi.Response(http.StatusOK, newContent), nil +} + +func (s *DefaultApiService) DevstateApplyCommandPost(ctx context.Context, command openapi.DevstateApplyCommandPostRequest) (openapi.ImplResponse, error) { + newContent, err := s.devfileState.AddApplyCommand( + command.Name, + command.Component, + ) + if err != nil { + return openapi.Response(http.StatusInternalServerError, openapi.GeneralError{ + Message: fmt.Sprintf("Error adding the Apply command: %s", err), + }), nil + } + return openapi.Response(http.StatusOK, newContent), nil +} + +func (s *DefaultApiService) DevstateCommandCommandNameDelete(ctx context.Context, commandName string) (openapi.ImplResponse, error) { + newContent, err := s.devfileState.DeleteCommand(commandName) + if err != nil { + return openapi.Response(http.StatusInternalServerError, openapi.GeneralError{ + Message: fmt.Sprintf("Error deleting the command: %s", err), + }), nil + } + return openapi.Response(http.StatusOK, newContent), nil +} + +func (s *DefaultApiService) DevstateCompositeCommandPost(ctx context.Context, command openapi.DevstateCompositeCommandPostRequest) (openapi.ImplResponse, error) { + newContent, err := s.devfileState.AddCompositeCommand( + command.Name, + command.Parallel, + command.Commands, + ) + if err != nil { + return openapi.Response(http.StatusInternalServerError, openapi.GeneralError{ + Message: fmt.Sprintf("Error adding the Composite command: %s", err), + }), nil + } + return openapi.Response(http.StatusOK, newContent), nil + +} + +func (s *DefaultApiService) DevstateExecCommandPost(ctx context.Context, command openapi.DevstateExecCommandPostRequest) (openapi.ImplResponse, error) { + newContent, err := s.devfileState.AddExecCommand( + command.Name, + command.Component, + command.CommandLine, + command.WorkingDir, + command.HotReloadCapable, + ) + if err != nil { + return openapi.Response(http.StatusInternalServerError, openapi.GeneralError{ + Message: fmt.Sprintf("Error adding the Exec command: %s", err), + }), nil + } + return openapi.Response(http.StatusOK, newContent), nil +} + +func (s *DefaultApiService) DevstateMetadataPut(ctx context.Context, metadata openapi.Metadata) (openapi.ImplResponse, error) { + newContent, err := s.devfileState.SetMetadata( + metadata.Name, + metadata.Version, + metadata.DisplayName, + metadata.Description, + metadata.Tags, + metadata.Architectures, + metadata.Icon, + metadata.GlobalMemoryLimit, + metadata.ProjectType, + metadata.Language, + metadata.Website, + metadata.Provider, + metadata.SupportUrl, + ) + if err != nil { + return openapi.Response(http.StatusInternalServerError, openapi.GeneralError{ + Message: fmt.Sprintf("Error updating the metadata: %s", err), + }), nil + } + return openapi.Response(http.StatusOK, newContent), nil +} + +func (s *DefaultApiService) DevstateChartGet(context.Context) (openapi.ImplResponse, error) { + chart, err := s.devfileState.GetFlowChart() + if err != nil { + return openapi.Response(http.StatusInternalServerError, openapi.GeneralError{ + Message: fmt.Sprintf("Error building the Devfile cycle chart: %s", err), + }), nil + } + return openapi.Response(http.StatusOK, openapi.DevstateChartGet200Response{ + Chart: chart, + }), nil +} + +func (s *DefaultApiService) DevstateCommandCommandNameMovePost(ctx context.Context, commandName string, params openapi.DevstateCommandCommandNameMovePostRequest) (openapi.ImplResponse, error) { + newContent, err := s.devfileState.MoveCommand( + params.FromGroup, + params.ToGroup, + int(params.FromIndex), + int(params.ToIndex), + ) + if err != nil { + return openapi.Response(http.StatusInternalServerError, openapi.GeneralError{ + Message: fmt.Sprintf("Error moving command to group %q index %d: %s", params.ToGroup, params.ToIndex, err), + }), nil + } + return openapi.Response(http.StatusOK, newContent), nil +} + +func (s *DefaultApiService) DevstateCommandCommandNameSetDefaultPost(ctx context.Context, commandName string, params openapi.DevstateCommandCommandNameSetDefaultPostRequest) (openapi.ImplResponse, error) { + newContent, err := s.devfileState.SetDefaultCommand(commandName, params.Group) + if err != nil { + return openapi.Response(http.StatusInternalServerError, openapi.GeneralError{ + Message: fmt.Sprintf("Error setting command %q as default for group %q: %s", commandName, params.Group, err), + }), nil + } + return openapi.Response(http.StatusOK, newContent), nil +} + +func (s *DefaultApiService) DevstateCommandCommandNameUnsetDefaultPost(ctx context.Context, commandName string) (openapi.ImplResponse, error) { + newContent, err := s.devfileState.UnsetDefaultCommand(commandName) + if err != nil { + return openapi.Response(http.StatusInternalServerError, openapi.GeneralError{ + Message: fmt.Sprintf("Error unsetting command %q as default: %s", commandName, err), + }), nil + } + return openapi.Response(http.StatusOK, newContent), nil +} + +func (s *DefaultApiService) DevstateEventsPut(ctx context.Context, params openapi.DevstateEventsPutRequest) (openapi.ImplResponse, error) { + newContent, err := s.devfileState.UpdateEvents(params.EventName, params.Commands) + if err != nil { + return openapi.Response(http.StatusInternalServerError, openapi.GeneralError{ + Message: fmt.Sprintf("Error updating commands for event %q: %s", params.EventName, err), + }), nil + } + return openapi.Response(http.StatusOK, newContent), nil +} + +func (s *DefaultApiService) DevstateQuantityValidPost(ctx context.Context, params openapi.DevstateQuantityValidPostRequest) (openapi.ImplResponse, error) { + result := devstate.IsQuantityValid(params.Quantity) + if !result { + return openapi.Response(http.StatusBadRequest, openapi.GeneralError{ + Message: fmt.Sprintf("Quantity %q is not valid", params.Quantity), + }), nil + } + return openapi.Response(http.StatusOK, openapi.GeneralSuccess{ + Message: fmt.Sprintf("Quantity %q is valid", params.Quantity), + }), nil +} + +func (s *DefaultApiService) DevstateDevfilePut(ctx context.Context, params openapi.DevstateDevfilePutRequest) (openapi.ImplResponse, error) { + newContent, err := s.devfileState.SetDevfileContent(params.Content) + if err != nil { + return openapi.Response(http.StatusInternalServerError, openapi.GeneralError{ + Message: fmt.Sprintf("Error setting new Devfile content: %s", err), + }), nil + } + return openapi.Response(http.StatusOK, newContent), nil +} + +func (s *DefaultApiService) DevstateDevfileGet(context.Context) (openapi.ImplResponse, error) { + newContent, err := s.devfileState.GetContent() + if err != nil { + return openapi.Response(http.StatusInternalServerError, openapi.GeneralError{ + Message: fmt.Sprintf("Error getting new Devfile content: %s", err), + }), nil + } + return openapi.Response(http.StatusOK, newContent), nil +} + +func (s *DefaultApiService) DevstateDevfileDelete(context.Context) (openapi.ImplResponse, error) { + newContent, err := s.devfileState.SetDevfileContent(`schemaVersion: 2.2.0`) + if err != nil { + return openapi.Response(http.StatusInternalServerError, openapi.GeneralError{ + Message: fmt.Sprintf("Error clearing Devfile content: %s", err), + }), nil + } + return openapi.Response(http.StatusOK, newContent), nil +} diff --git a/pkg/apiserver-impl/devstate/chart.go b/pkg/apiserver-impl/devstate/chart.go new file mode 100644 index 00000000000..b1eba13e3d3 --- /dev/null +++ b/pkg/apiserver-impl/devstate/chart.go @@ -0,0 +1,15 @@ +package devstate + +import ( + "errors" + + "github.com/feloy/devfile-lifecycle/pkg/graph" +) + +func (o *DevfileState) GetFlowChart() (string, error) { + g, err := graph.Build(o.Devfile.Data) + if err != nil { + return "", errors.New("error building graph") + } + return g.ToFlowchart(), nil +} diff --git a/pkg/apiserver-impl/devstate/chart_test.go b/pkg/apiserver-impl/devstate/chart_test.go new file mode 100644 index 00000000000..ce100a46fc2 --- /dev/null +++ b/pkg/apiserver-impl/devstate/chart_test.go @@ -0,0 +1,42 @@ +package devstate + +import ( + "testing" +) + +func TestDevfileState_GetFlowChart(t *testing.T) { + tests := []struct { + name string + state func() DevfileState + want string + wantErr bool + }{ + { + name: "with initial devfile", + state: func() DevfileState { + return NewDevfileState() + }, + want: `graph TB +containers["containers"] +start["start"] +sync-all-containers["Sync All Sources"] +start -->|"dev"| containers +containers -->|"container running"| sync-all-containers +`, + }, + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + o := tt.state() + got, err := o.GetFlowChart() + if (err != nil) != tt.wantErr { + t.Errorf("DevfileState.GetFlowChart() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("DevfileState.GetFlowChart() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/pkg/apiserver-impl/devstate/commands.go b/pkg/apiserver-impl/devstate/commands.go new file mode 100644 index 00000000000..55641aca87a --- /dev/null +++ b/pkg/apiserver-impl/devstate/commands.go @@ -0,0 +1,185 @@ +package devstate + +import ( + "fmt" + + "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" + "github.com/devfile/library/v2/pkg/devfile/parser/data/v2/common" + . "github.com/redhat-developer/odo/pkg/apiserver-gen/go" +) + +func (o *DevfileState) AddExecCommand(name string, component string, commandLine string, workingDir string, hotReloadCapable bool) (DevfileContent, error) { + command := v1alpha2.Command{ + Id: name, + CommandUnion: v1alpha2.CommandUnion{ + Exec: &v1alpha2.ExecCommand{ + Component: component, + CommandLine: commandLine, + WorkingDir: workingDir, + HotReloadCapable: &hotReloadCapable, + }, + }, + } + err := o.Devfile.Data.AddCommands([]v1alpha2.Command{command}) + if err != nil { + return DevfileContent{}, err + } + return o.GetContent() +} + +func (o *DevfileState) AddApplyCommand(name string, component string) (DevfileContent, error) { + command := v1alpha2.Command{ + Id: name, + CommandUnion: v1alpha2.CommandUnion{ + Apply: &v1alpha2.ApplyCommand{ + Component: component, + }, + }, + } + err := o.Devfile.Data.AddCommands([]v1alpha2.Command{command}) + if err != nil { + return DevfileContent{}, err + } + return o.GetContent() +} + +func (o *DevfileState) AddCompositeCommand(name string, parallel bool, commands []string) (DevfileContent, error) { + command := v1alpha2.Command{ + Id: name, + CommandUnion: v1alpha2.CommandUnion{ + Composite: &v1alpha2.CompositeCommand{ + Parallel: ¶llel, + Commands: commands, + }, + }, + } + err := o.Devfile.Data.AddCommands([]v1alpha2.Command{command}) + if err != nil { + return DevfileContent{}, err + } + return o.GetContent() +} + +func (o *DevfileState) DeleteCommand(name string) (DevfileContent, error) { + err := o.checkCommandUsed(name) + if err != nil { + return DevfileContent{}, fmt.Errorf("error deleting command %q: %w", name, err) + } + err = o.Devfile.Data.DeleteCommand(name) + if err != nil { + return DevfileContent{}, err + } + return o.GetContent() +} + +func (o *DevfileState) checkCommandUsed(name string) error { + commands, err := o.Devfile.Data.GetCommands(common.DevfileOptions{ + CommandOptions: common.CommandOptions{ + CommandType: v1alpha2.CompositeCommandType, + }, + }) + if err != nil { + return err + } + for _, command := range commands { + for _, subcommand := range command.Composite.Commands { + if subcommand == name { + return fmt.Errorf("command %q is used by composite command %q", name, command.Id) + } + } + } + return nil +} + +func (o *DevfileState) MoveCommand(previousGroup, newGroup string, previousIndex, newIndex int) (DevfileContent, error) { + commands, err := o.Devfile.Data.GetCommands(common.DevfileOptions{}) + if err != nil { + return DevfileContent{}, err + } + + commandsByGroup, err := subMoveCommand(commands, previousGroup, newGroup, previousIndex, newIndex) + if err != nil { + return DevfileContent{}, err + } + + // Deleting from the end as deleting from the beginning seems buggy + for i := len(commands) - 1; i >= 0; i-- { + err = o.Devfile.Data.DeleteCommand(commands[i].Id) + if err != nil { + return DevfileContent{}, err + } + } + + for _, group := range []string{"build", "run", "test", "debug", "deploy", ""} { + err := o.Devfile.Data.AddCommands(commandsByGroup[group]) + if err != nil { + return DevfileContent{}, err + } + } + return o.GetContent() +} + +func subMoveCommand(commands []v1alpha2.Command, previousGroup, newGroup string, previousIndex, newIndex int) (map[string][]v1alpha2.Command, error) { + commandsByGroup := map[string][]v1alpha2.Command{} + + for _, command := range commands { + group := GetGroup(command) + commandsByGroup[group] = append(commandsByGroup[group], command) + } + + if len(commandsByGroup[previousGroup]) <= previousIndex { + return nil, fmt.Errorf("unable to find command at index #%d in group %q", previousIndex, previousGroup) + } + + commandToMove := commandsByGroup[previousGroup][previousIndex] + SetGroup(&commandToMove, newGroup) + + commandsByGroup[previousGroup] = append( + commandsByGroup[previousGroup][:previousIndex], + commandsByGroup[previousGroup][previousIndex+1:]..., + ) + + end := append([]v1alpha2.Command{}, commandsByGroup[newGroup][newIndex:]...) + commandsByGroup[newGroup] = append(commandsByGroup[newGroup][:newIndex], commandToMove) + commandsByGroup[newGroup] = append(commandsByGroup[newGroup], end...) + + return commandsByGroup, nil +} + +func (o *DevfileState) SetDefaultCommand(commandName string, group string) (DevfileContent, error) { + commands, err := o.Devfile.Data.GetCommands(common.DevfileOptions{}) + if err != nil { + return DevfileContent{}, err + } + + for i, command := range commands { + if GetGroup(command) == group { + isDefault := command.Id == commandName + SetDefault(&commands[i], isDefault) + err = o.Devfile.Data.UpdateCommand(command) + if err != nil { + return DevfileContent{}, err + } + } + } + return o.GetContent() +} + +func (o *DevfileState) UnsetDefaultCommand(commandName string) (DevfileContent, error) { + commands, err := o.Devfile.Data.GetCommands(common.DevfileOptions{}) + if err != nil { + return DevfileContent{}, err + } + + for i, command := range commands { + if command.Id == commandName { + SetDefault(&commands[i], false) + err = o.Devfile.Data.UpdateCommand(command) + if err != nil { + return DevfileContent{}, err + } + break + } + } + return o.GetContent() +} diff --git a/pkg/apiserver-impl/devstate/commands_test.go b/pkg/apiserver-impl/devstate/commands_test.go new file mode 100644 index 00000000000..66db027e248 --- /dev/null +++ b/pkg/apiserver-impl/devstate/commands_test.go @@ -0,0 +1,821 @@ +package devstate + +import ( + "testing" + + "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" + "github.com/google/go-cmp/cmp" + . "github.com/redhat-developer/odo/pkg/apiserver-gen/go" +) + +func TestDevfileState_AddExecCommand(t *testing.T) { + type args struct { + name string + component string + commandLine string + workingDir string + hotReloadCapable bool + } + tests := []struct { + name string + state func() DevfileState + args args + want DevfileContent + wantErr bool + }{ + { + name: "Add an exec command", + state: func() DevfileState { + state := NewDevfileState() + _, err := state.AddContainer( + "a-container", + "an-image", + []string{"run", "command"}, + []string{"arg1", "arg2"}, + "1Gi", + "2Gi", + "100m", + "200m", + ) + if err != nil { + t.Fatal(err) + } + return state + }, + args: args{ + name: "an-exec-command", + component: "a-container", + commandLine: "run command", + workingDir: "/path/to/work", + hotReloadCapable: true, + }, + want: DevfileContent{ + Content: `commands: +- exec: + commandLine: run command + component: a-container + hotReloadCapable: true + workingDir: /path/to/work + id: an-exec-command +components: +- container: + args: + - arg1 + - arg2 + command: + - run + - command + cpuLimit: 200m + cpuRequest: 100m + image: an-image + memoryLimit: 2Gi + memoryRequest: 1Gi + name: a-container +metadata: {} +schemaVersion: 2.2.0 +`, + Commands: []Command{ + { + Name: "an-exec-command", + Type: "exec", + Exec: ExecCommand{ + Component: "a-container", + CommandLine: "run command", + WorkingDir: "/path/to/work", + HotReloadCapable: true, + }, + }, + }, + Containers: []Container{ + { + Name: "a-container", + Image: "an-image", + Command: []string{"run", "command"}, + Args: []string{"arg1", "arg2"}, + MemoryRequest: "1Gi", + MemoryLimit: "2Gi", + CpuRequest: "100m", + CpuLimit: "200m", + }, + }, + Images: []Image{}, + Resources: []Resource{}, + Events: Events{}, + }, + }, + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + o := tt.state() + got, err := o.AddExecCommand(tt.args.name, tt.args.component, tt.args.commandLine, tt.args.workingDir, tt.args.hotReloadCapable) + if (err != nil) != tt.wantErr { + t.Errorf("DevfileState.AddExecCommand() error = %v, wantErr %v", err, tt.wantErr) + return + } + if diff := cmp.Diff(tt.want.Content, got.Content); diff != "" { + t.Errorf("DevfileState.AddExecCommand() mismatch (-want +got):\n%s", diff) + } + if diff := cmp.Diff(tt.want, got); diff != "" { + t.Errorf("DevfileState.AddExecCommand() mismatch (-want +got):\n%s", diff) + } + }) + } +} + +func TestDevfileState_AddApplyCommand(t *testing.T) { + type args struct { + name string + component string + } + tests := []struct { + name string + state func() DevfileState + args args + want DevfileContent + wantErr bool + }{ + { + name: "Add an Apply command", + state: func() DevfileState { + state := NewDevfileState() + _, err := state.AddImage( + "an-image", + "an-image-name", + nil, "/context", false, "", + ) + if err != nil { + t.Fatal(err) + } + return state + }, + args: args{ + name: "an-apply-command", + component: "an-image", + }, + want: DevfileContent{ + Content: `commands: +- apply: + component: an-image + id: an-apply-command +components: +- image: + dockerfile: + buildContext: /context + rootRequired: false + imageName: an-image-name + name: an-image +metadata: {} +schemaVersion: 2.2.0 +`, + Commands: []Command{ + { + Name: "an-apply-command", + Type: "image", + Image: ImageCommand{ + Component: "an-image", + }, + }, + }, + Containers: []Container{}, + Images: []Image{ + { + Name: "an-image", + ImageName: "an-image-name", + BuildContext: "/context", + }, + }, + Resources: []Resource{}, + Events: Events{}, + }, + }, + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + o := tt.state() + got, err := o.AddApplyCommand(tt.args.name, tt.args.component) + if (err != nil) != tt.wantErr { + t.Errorf("DevfileState.AddApplyCommand() error = %v, wantErr %v", err, tt.wantErr) + return + } + if diff := cmp.Diff(tt.want.Content, got.Content); diff != "" { + t.Errorf("DevfileState.AddApplyCommand() mismatch (-want +got):\n%s", diff) + } + if diff := cmp.Diff(tt.want, got); diff != "" { + t.Errorf("DevfileState.AddApplyCommand() mismatch (-want +got):\n%s", diff) + } + }) + } +} + +func TestDevfileState_AddCompositeCommand(t *testing.T) { + type args struct { + name string + parallel bool + commands []string + } + tests := []struct { + name string + state func() DevfileState + args args + want DevfileContent + wantErr bool + }{ + { + name: "Add an Apply command", + state: func() DevfileState { + state := NewDevfileState() + _, err := state.AddContainer( + "a-container", + "an-image", + []string{"run", "command"}, + []string{"arg1", "arg2"}, + "1Gi", + "2Gi", + "100m", + "200m", + ) + if err != nil { + t.Fatal(err) + } + _, err = state.AddExecCommand( + "an-exec-command", + "a-container", + "run command", + "/path/to/work", + true, + ) + if err != nil { + t.Fatal(err) + } + return state + }, + args: args{ + name: "a-composite-command", + parallel: true, + commands: []string{"an-exec-command"}, + }, + want: DevfileContent{ + Content: `commands: +- exec: + commandLine: run command + component: a-container + hotReloadCapable: true + workingDir: /path/to/work + id: an-exec-command +- composite: + commands: + - an-exec-command + parallel: true + id: a-composite-command +components: +- container: + args: + - arg1 + - arg2 + command: + - run + - command + cpuLimit: 200m + cpuRequest: 100m + image: an-image + memoryLimit: 2Gi + memoryRequest: 1Gi + name: a-container +metadata: {} +schemaVersion: 2.2.0 +`, + Commands: []Command{ + { + Name: "an-exec-command", + Type: "exec", + Exec: ExecCommand{ + Component: "a-container", + CommandLine: "run command", + WorkingDir: "/path/to/work", + HotReloadCapable: true, + }, + }, + { + Name: "a-composite-command", + Type: "composite", + Composite: CompositeCommand{Commands: []string{"an-exec-command"}, Parallel: true}, + }, + }, + Containers: []Container{ + { + Name: "a-container", + Image: "an-image", + Command: []string{"run", "command"}, + Args: []string{"arg1", "arg2"}, + MemoryRequest: "1Gi", + MemoryLimit: "2Gi", + CpuRequest: "100m", + CpuLimit: "200m", + }, + }, + Images: []Image{}, + Resources: []Resource{}, + Events: Events{}, + }, + }, + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + o := tt.state() + got, err := o.AddCompositeCommand(tt.args.name, tt.args.parallel, tt.args.commands) + if (err != nil) != tt.wantErr { + t.Errorf("DevfileState.AddApplyCommand() error = %v, wantErr %v", err, tt.wantErr) + return + } + if diff := cmp.Diff(tt.want.Content, got.Content); diff != "" { + t.Errorf("DevfileState.AddApplyCommand() mismatch (-want +got):\n%s", diff) + } + if diff := cmp.Diff(tt.want, got); diff != "" { + t.Errorf("DevfileState.AddApplyCommand() mismatch (-want +got):\n%s", diff) + } + }) + } +} + +func TestDevfileState_DeleteCommand(t *testing.T) { + type args struct { + name string + } + tests := []struct { + name string + state func(t *testing.T) DevfileState + args args + want DevfileContent + wantErr bool + }{ + { + name: "Delete an existing command", + state: func(t *testing.T) DevfileState { + state := NewDevfileState() + _, err := state.AddContainer( + "a-container", + "an-image", + []string{"run", "command"}, + []string{"arg1", "arg2"}, + "1Gi", + "2Gi", + "100m", + "200m", + ) + if err != nil { + t.Fatal(err) + } + _, err = state.AddExecCommand( + "an-exec-command", + "a-container", + "run command", + "/path/to/work", + true, + ) + if err != nil { + t.Fatal(err) + } + return state + }, + args: args{ + name: "an-exec-command", + }, + want: DevfileContent{ + Content: `components: +- container: + args: + - arg1 + - arg2 + command: + - run + - command + cpuLimit: 200m + cpuRequest: 100m + image: an-image + memoryLimit: 2Gi + memoryRequest: 1Gi + name: a-container +metadata: {} +schemaVersion: 2.2.0 +`, + Commands: []Command{}, + Containers: []Container{ + { + Name: "a-container", + Image: "an-image", + Command: []string{"run", "command"}, + Args: []string{"arg1", "arg2"}, + MemoryRequest: "1Gi", + MemoryLimit: "2Gi", + CpuRequest: "100m", + CpuLimit: "200m", + }, + }, + Images: []Image{}, + Resources: []Resource{}, + Events: Events{}, + }, + }, + { + name: "Delete a non existing command", + state: func(t *testing.T) DevfileState { + state := NewDevfileState() + return state + }, + args: args{ + name: "another-name", + }, + want: DevfileContent{}, + wantErr: true, + }, + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + o := tt.state(t) + got, err := o.DeleteCommand(tt.args.name) + if (err != nil) != tt.wantErr { + t.Errorf("DevfileState.DeleteCommand() error = %v, wantErr %v", err, tt.wantErr) + return + } + if diff := cmp.Diff(tt.want.Content, got.Content); diff != "" { + t.Errorf("DevfileState.DeleteCommand() mismatch (-want +got):\n%s", diff) + } + if diff := cmp.Diff(tt.want, got); diff != "" { + t.Errorf("DevfileState.DeleteCommand() mismatch (-want +got):\n%s", diff) + } + }) + } +} + +func newCommand(group string, id string) v1alpha2.Command { + return v1alpha2.Command{ + Id: id, + CommandUnion: v1alpha2.CommandUnion{ + Exec: &v1alpha2.ExecCommand{ + LabeledCommand: v1alpha2.LabeledCommand{ + BaseCommand: v1alpha2.BaseCommand{ + Group: &v1alpha2.CommandGroup{ + Kind: v1alpha2.CommandGroupKind(group), + }, + }, + }, + }, + }, + } +} + +func Test_subMoveCommand(t *testing.T) { + type args struct { + commands []v1alpha2.Command + previousGroup string + newGroup string + previousIndex int + newIndex int + } + tests := []struct { + name string + args args + want map[string][]v1alpha2.Command + wantErr bool + }{ + { + name: "Move from run to test", + args: args{ + commands: []v1alpha2.Command{ + newCommand("build", "build1"), + newCommand("run", "runToTest"), + newCommand("", "other1"), + }, + previousGroup: "run", + previousIndex: 0, + newGroup: "test", + newIndex: 0, + }, + want: map[string][]v1alpha2.Command{ + "build": { + newCommand("build", "build1"), + }, + "run": {}, + "test": { + newCommand("test", "runToTest"), + }, + "": { + newCommand("", "other1"), + }, + }, + }, + { + name: "Move from other to build", + args: args{ + commands: []v1alpha2.Command{ + newCommand("build", "build1"), + newCommand("run", "run"), + newCommand("other", "otherToBuild"), + }, + previousGroup: "other", + previousIndex: 0, + newGroup: "build", + newIndex: 1, + }, + want: map[string][]v1alpha2.Command{ + "build": { + newCommand("build", "build1"), + newCommand("build", "otherToBuild"), + }, + "run": { + newCommand("run", "run"), + }, + "other": {}, + }, + }, + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := subMoveCommand(tt.args.commands, tt.args.previousGroup, tt.args.newGroup, tt.args.previousIndex, tt.args.newIndex) + if (err != nil) != tt.wantErr { + t.Errorf("moveCommand() error = %v, wantErr %v", err, tt.wantErr) + return + } + if diff := cmp.Diff(tt.want, got); diff != "" { + t.Errorf("moveCommand() mismatch (-want +got):\n%s", diff) + } + }) + } +} + +func TestDevfileState_MoveCommand(t *testing.T) { + type args struct { + previousGroup string + newGroup string + previousIndex int + newIndex int + } + tests := []struct { + name string + state func(t *testing.T) DevfileState + args args + want DevfileContent + wantErr bool + }{ + { + name: "not found command", + state: func(t *testing.T) DevfileState { + return NewDevfileState() + }, + args: args{ + previousGroup: "build", + previousIndex: 0, + newGroup: "run", + newIndex: 0, + }, + wantErr: true, + }, + { + name: "command moved from no group to run group", + state: func(t *testing.T) DevfileState { + state := NewDevfileState() + _, err := state.AddExecCommand( + "an-exec-command", + "a-container", + "run command", + "/path/to/work", + true, + ) + if err != nil { + t.Fatal(err) + } + return state + }, + args: args{ + previousGroup: "", + previousIndex: 0, + newGroup: "run", + newIndex: 0, + }, + want: DevfileContent{ + Content: `commands: +- exec: + commandLine: run command + component: a-container + group: + kind: run + hotReloadCapable: true + workingDir: /path/to/work + id: an-exec-command +metadata: {} +schemaVersion: 2.2.0 +`, + Commands: []Command{ + { + Name: "an-exec-command", + Group: "run", + Default: false, + Type: "exec", + Exec: ExecCommand{ + Component: "a-container", + CommandLine: "run command", + WorkingDir: "/path/to/work", + HotReloadCapable: true, + }, + }, + }, + Containers: []Container{}, + Images: []Image{}, + Resources: []Resource{}, + }, + }, + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + o := tt.state(t) + got, err := o.MoveCommand(tt.args.previousGroup, tt.args.newGroup, tt.args.previousIndex, tt.args.newIndex) + if (err != nil) != tt.wantErr { + t.Errorf("DevfileState.MoveCommand() error = %v, wantErr %v", err, tt.wantErr) + return + } + if diff := cmp.Diff(tt.want, got); diff != "" { + t.Errorf("DevfileState.MoveCommand() mismatch (-want +got):\n%s", diff) + } + }) + } +} + +func TestDevfileState_SetDefaultCommand(t *testing.T) { + type args struct { + commandName string + group string + } + tests := []struct { + name string + state func(t *testing.T) DevfileState + args args + want DevfileContent + wantErr bool + }{ + { + name: "command set to default in run group", + state: func(t *testing.T) DevfileState { + state := NewDevfileState() + _, err := state.AddExecCommand( + "an-exec-command", + "a-container", + "run command", + "/path/to/work", + true, + ) + if err != nil { + t.Fatal(err) + } + _, err = state.MoveCommand("", "run", 0, 0) + if err != nil { + t.Fatal(err) + } + return state + }, + args: args{ + commandName: "an-exec-command", + group: "run", + }, + want: DevfileContent{ + Content: `commands: +- exec: + commandLine: run command + component: a-container + group: + isDefault: true + kind: run + hotReloadCapable: true + workingDir: /path/to/work + id: an-exec-command +metadata: {} +schemaVersion: 2.2.0 +`, + Commands: []Command{ + { + Name: "an-exec-command", + Group: "run", + Default: true, + Type: "exec", + Exec: ExecCommand{ + Component: "a-container", + CommandLine: "run command", + WorkingDir: "/path/to/work", + HotReloadCapable: true, + }, + }, + }, + Containers: []Container{}, + Images: []Image{}, + Resources: []Resource{}, + }, + }, + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + o := tt.state(t) + got, err := o.SetDefaultCommand(tt.args.commandName, tt.args.group) + if (err != nil) != tt.wantErr { + t.Errorf("DevfileState.SetDefaultCommand() error = %v, wantErr %v", err, tt.wantErr) + return + } + if diff := cmp.Diff(tt.want, got); diff != "" { + t.Errorf("DevfileState.SetDefaultCommand() mismatch (-want +got):\n%s", diff) + } + }) + } +} + +func TestDevfileState_UnsetDefaultCommand(t *testing.T) { + type args struct { + commandName string + } + tests := []struct { + name string + state func(t *testing.T) DevfileState + args args + want DevfileContent + wantErr bool + }{ + { + name: "command unset default", + state: func(t *testing.T) DevfileState { + state := NewDevfileState() + _, err := state.AddExecCommand( + "an-exec-command", + "a-container", + "run command", + "/path/to/work", + true, + ) + if err != nil { + t.Fatal(err) + } + _, err = state.MoveCommand("", "run", 0, 0) + if err != nil { + t.Fatal(err) + } + _, err = state.SetDefaultCommand("an-exec-command", "run") + if err != nil { + t.Fatal(err) + } + return state + }, + args: args{ + commandName: "an-exec-command", + }, + want: DevfileContent{ + Content: `commands: +- exec: + commandLine: run command + component: a-container + group: + isDefault: false + kind: run + hotReloadCapable: true + workingDir: /path/to/work + id: an-exec-command +metadata: {} +schemaVersion: 2.2.0 +`, + Commands: []Command{ + { + Name: "an-exec-command", + Group: "run", + Default: false, + Type: "exec", + Exec: ExecCommand{ + Component: "a-container", + CommandLine: "run command", + WorkingDir: "/path/to/work", + HotReloadCapable: true, + }, + }, + }, + Containers: []Container{}, + Images: []Image{}, + Resources: []Resource{}, + }, + }, + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + o := tt.state(t) + got, err := o.UnsetDefaultCommand(tt.args.commandName) + if (err != nil) != tt.wantErr { + t.Errorf("DevfileState.UnsetDefaultCommand() error = %v, wantErr %v", err, tt.wantErr) + return + } + if diff := cmp.Diff(tt.want, got); diff != "" { + t.Errorf("DevfileState.UnsetDefaultCommand() mismatch (-want +got):\n%s", diff) + } + }) + } +} diff --git a/pkg/apiserver-impl/devstate/components.go b/pkg/apiserver-impl/devstate/components.go new file mode 100644 index 00000000000..3d1b4790adc --- /dev/null +++ b/pkg/apiserver-impl/devstate/components.go @@ -0,0 +1,186 @@ +package devstate + +import ( + "errors" + "fmt" + + "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" + "github.com/devfile/library/v2/pkg/devfile/parser/data/v2/common" + . "github.com/redhat-developer/odo/pkg/apiserver-gen/go" +) + +func (o *DevfileState) AddContainer(name string, image string, command []string, args []string, memRequest string, memLimit string, cpuRequest string, cpuLimit string) (DevfileContent, error) { + container := v1alpha2.Component{ + Name: name, + ComponentUnion: v1alpha2.ComponentUnion{ + Container: &v1alpha2.ContainerComponent{ + Container: v1alpha2.Container{ + Image: image, + Command: command, + Args: args, + MemoryRequest: memRequest, + MemoryLimit: memLimit, + CpuRequest: cpuRequest, + CpuLimit: cpuLimit, + }, + }, + }, + } + err := o.Devfile.Data.AddComponents([]v1alpha2.Component{container}) + if err != nil { + return DevfileContent{}, err + } + return o.GetContent() +} + +func (o *DevfileState) DeleteContainer(name string) (DevfileContent, error) { + + err := o.checkContainerUsed(name) + if err != nil { + return DevfileContent{}, fmt.Errorf("error deleting container %q: %w", name, err) + } + + // TODO check if it is a Container, not another component + + err = o.Devfile.Data.DeleteComponent(name) + if err != nil { + return DevfileContent{}, err + } + return o.GetContent() +} + +func (o *DevfileState) checkContainerUsed(name string) error { + commands, err := o.Devfile.Data.GetCommands(common.DevfileOptions{ + CommandOptions: common.CommandOptions{ + CommandType: v1alpha2.ExecCommandType, + }, + }) + if err != nil { + return err + } + for _, command := range commands { + if command.Exec.Component == name { + return fmt.Errorf("container %q is used by exec command %q", name, command.Id) + } + } + return nil +} + +func (o *DevfileState) AddImage(name string, imageName string, args []string, buildContext string, rootRequired bool, uri string) (DevfileContent, error) { + container := v1alpha2.Component{ + Name: name, + ComponentUnion: v1alpha2.ComponentUnion{ + Image: &v1alpha2.ImageComponent{ + Image: v1alpha2.Image{ + ImageName: imageName, + ImageUnion: v1alpha2.ImageUnion{ + Dockerfile: &v1alpha2.DockerfileImage{ + Dockerfile: v1alpha2.Dockerfile{ + Args: args, + BuildContext: buildContext, + RootRequired: &rootRequired, + }, + DockerfileSrc: v1alpha2.DockerfileSrc{ + Uri: uri, + }, + }, + }, + }, + }, + }, + } + err := o.Devfile.Data.AddComponents([]v1alpha2.Component{container}) + if err != nil { + return DevfileContent{}, err + } + return o.GetContent() +} + +func (o *DevfileState) DeleteImage(name string) (DevfileContent, error) { + + err := o.checkImageUsed(name) + if err != nil { + return DevfileContent{}, fmt.Errorf("error deleting image %q: %w", name, err) + } + + // TODO check if it is an Image, not another component + + err = o.Devfile.Data.DeleteComponent(name) + if err != nil { + return DevfileContent{}, err + } + return o.GetContent() +} + +func (o *DevfileState) checkImageUsed(name string) error { + commands, err := o.Devfile.Data.GetCommands(common.DevfileOptions{ + CommandOptions: common.CommandOptions{ + CommandType: v1alpha2.ApplyCommandType, + }, + }) + if err != nil { + return err + } + for _, command := range commands { + if command.Apply.Component == name { + return fmt.Errorf("image %q is used by Image Command %q", name, command.Id) + } + } + return nil +} + +func (o *DevfileState) AddResource(name string, inlined string, uri string) (DevfileContent, error) { + if inlined != "" && uri != "" { + return DevfileContent{}, errors.New("both inlined and uri cannot be set at the same time") + } + container := v1alpha2.Component{ + Name: name, + ComponentUnion: v1alpha2.ComponentUnion{ + Kubernetes: &v1alpha2.KubernetesComponent{ + K8sLikeComponent: v1alpha2.K8sLikeComponent{ + K8sLikeComponentLocation: v1alpha2.K8sLikeComponentLocation{ + Inlined: inlined, + Uri: uri, + }, + }, + }, + }, + } + err := o.Devfile.Data.AddComponents([]v1alpha2.Component{container}) + if err != nil { + return DevfileContent{}, err + } + return o.GetContent() +} + +func (o *DevfileState) DeleteResource(name string) (DevfileContent, error) { + + err := o.checkResourceUsed(name) + if err != nil { + return DevfileContent{}, fmt.Errorf("error deleting resource %q: %w", name, err) + } + // TODO check if it is a Resource, not another component + + err = o.Devfile.Data.DeleteComponent(name) + if err != nil { + return DevfileContent{}, err + } + return o.GetContent() +} + +func (o *DevfileState) checkResourceUsed(name string) error { + commands, err := o.Devfile.Data.GetCommands(common.DevfileOptions{ + CommandOptions: common.CommandOptions{ + CommandType: v1alpha2.ApplyCommandType, + }, + }) + if err != nil { + return err + } + for _, command := range commands { + if command.Apply.Component == name { + return fmt.Errorf("resource %q is used by Apply Command %q", name, command.Id) + } + } + return nil +} diff --git a/pkg/apiserver-impl/devstate/components_test.go b/pkg/apiserver-impl/devstate/components_test.go new file mode 100644 index 00000000000..9cf45c0a096 --- /dev/null +++ b/pkg/apiserver-impl/devstate/components_test.go @@ -0,0 +1,521 @@ +package devstate + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + . "github.com/redhat-developer/odo/pkg/apiserver-gen/go" +) + +func TestDevfileState_AddContainer(t *testing.T) { + type args struct { + name string + image string + command []string + args []string + memRequest string + memLimit string + cpuRequest string + cpuLimit string + } + tests := []struct { + name string + state func() DevfileState + args args + want DevfileContent + wantErr bool + }{ + { + name: "Add a container", + state: func() DevfileState { + return NewDevfileState() + }, + args: args{ + name: "a-name", + image: "an-image", + command: []string{"run", "command"}, + args: []string{"arg1", "arg2"}, + memRequest: "1Gi", + memLimit: "2Gi", + cpuRequest: "100m", + cpuLimit: "200m", + }, + want: DevfileContent{ + Content: `components: +- container: + args: + - arg1 + - arg2 + command: + - run + - command + cpuLimit: 200m + cpuRequest: 100m + image: an-image + memoryLimit: 2Gi + memoryRequest: 1Gi + name: a-name +metadata: {} +schemaVersion: 2.2.0 +`, + Commands: []Command{}, + Containers: []Container{ + { + Name: "a-name", + Image: "an-image", + Command: []string{"run", "command"}, + Args: []string{"arg1", "arg2"}, + MemoryRequest: "1Gi", + MemoryLimit: "2Gi", + CpuRequest: "100m", + CpuLimit: "200m", + }, + }, + Images: []Image{}, + Resources: []Resource{}, + Events: Events{}, + }, + }, + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + o := tt.state() + got, err := o.AddContainer(tt.args.name, tt.args.image, tt.args.command, tt.args.args, tt.args.memRequest, tt.args.memLimit, tt.args.cpuRequest, tt.args.cpuLimit) + if (err != nil) != tt.wantErr { + t.Errorf("DevfileState.AddContainer() error = %v, wantErr %v", err, tt.wantErr) + return + } + if diff := cmp.Diff(tt.want.Content, got.Content); diff != "" { + t.Errorf("DevfileState.AddContainer() mismatch (-want +got):\n%s", diff) + } + if diff := cmp.Diff(tt.want, got); diff != "" { + t.Errorf("DevfileState.AddContainer() mismatch (-want +got):\n%s", diff) + } + }) + } +} + +func TestDevfileState_DeleteContainer(t *testing.T) { + type args struct { + name string + } + tests := []struct { + name string + state func(t *testing.T) DevfileState + args args + want DevfileContent + wantErr bool + }{ + { + name: "Delete an existing container", + state: func(t *testing.T) DevfileState { + state := NewDevfileState() + _, err := state.AddContainer( + "a-name", + "an-image", + []string{"run", "command"}, + []string{"arg1", "arg2"}, + "1Gi", + "2Gi", + "100m", + "200m", + ) + if err != nil { + t.Fatal(err) + } + return state + }, + args: args{ + name: "a-name", + }, + want: DevfileContent{ + Content: `metadata: {} +schemaVersion: 2.2.0 +`, + Commands: []Command{}, + Containers: []Container{}, + Images: []Image{}, + Resources: []Resource{}, + Events: Events{}, + }, + }, + { + name: "Delete a non existing container", + state: func(t *testing.T) DevfileState { + state := NewDevfileState() + _, err := state.AddContainer( + "a-name", + "an-image", + []string{"run", "command"}, + []string{"arg1", "arg2"}, + "1Gi", + "2Gi", + "100m", + "200m", + ) + if err != nil { + t.Fatal(err) + } + return state + }, + args: args{ + name: "another-name", + }, + want: DevfileContent{}, + wantErr: true, + }, + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + o := tt.state(t) + got, err := o.DeleteContainer(tt.args.name) + if (err != nil) != tt.wantErr { + t.Errorf("DevfileState.DeleteContainer() error = %v, wantErr %v", err, tt.wantErr) + return + } + if diff := cmp.Diff(tt.want.Content, got.Content); diff != "" { + t.Errorf("DevfileState.DeleteContainer() mismatch (-want +got):\n%s", diff) + } + if diff := cmp.Diff(tt.want, got); diff != "" { + t.Errorf("DevfileState.DeleteContainer() mismatch (-want +got):\n%s", diff) + } + }) + } +} + +func TestDevfileState_AddImage(t *testing.T) { + type args struct { + name string + imageName string + args []string + buildContext string + rootRequired bool + uri string + } + tests := []struct { + name string + state func() DevfileState + args args + want DevfileContent + wantErr bool + }{ + { + name: "Add an image", + state: func() DevfileState { + return NewDevfileState() + }, + args: args{ + name: "a-name", + imageName: "an-image-name", + args: []string{"start", "command"}, + buildContext: "path/to/context", + rootRequired: true, + uri: "an-uri", + }, + want: DevfileContent{ + Content: `components: +- image: + dockerfile: + args: + - start + - command + buildContext: path/to/context + rootRequired: true + uri: an-uri + imageName: an-image-name + name: a-name +metadata: {} +schemaVersion: 2.2.0 +`, + Commands: []Command{}, + Containers: []Container{}, + Images: []Image{ + { + Name: "a-name", + ImageName: "an-image-name", + Args: []string{"start", "command"}, + BuildContext: "path/to/context", + RootRequired: true, + Uri: "an-uri", + }, + }, + Resources: []Resource{}, + Events: Events{}, + }, + }, + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + o := tt.state() + got, err := o.AddImage(tt.args.name, tt.args.imageName, tt.args.args, tt.args.buildContext, tt.args.rootRequired, tt.args.uri) + if (err != nil) != tt.wantErr { + t.Errorf("DevfileState.AddImage() error = %v, wantErr %v", err, tt.wantErr) + return + } + if diff := cmp.Diff(tt.want.Content, got.Content); diff != "" { + t.Errorf("DevfileState.AddImage() mismatch (-want +got):\n%s", diff) + } + if diff := cmp.Diff(tt.want, got); diff != "" { + t.Errorf("DevfileState.AddImage() mismatch (-want +got):\n%s", diff) + } + }) + } +} + +func TestDevfileState_DeleteImage(t *testing.T) { + type args struct { + name string + } + tests := []struct { + name string + state func(t *testing.T) DevfileState + args args + want DevfileContent + wantErr bool + }{ + { + name: "Delete an existing image", + state: func(t *testing.T) DevfileState { + state := NewDevfileState() + _, err := state.AddImage( + "a-name", + "an-image-name", + []string{"start", "command"}, + "path/to/context", + true, + "an-uri", + ) + if err != nil { + t.Fatal(err) + } + return state + }, + args: args{ + name: "a-name", + }, + want: DevfileContent{ + Content: `metadata: {} +schemaVersion: 2.2.0 +`, + Commands: []Command{}, + Containers: []Container{}, + Images: []Image{}, + Resources: []Resource{}, + Events: Events{}, + }, + }, + { + name: "Delete a non existing image", + state: func(t *testing.T) DevfileState { + state := NewDevfileState() + _, err := state.AddImage( + "a-name", + "an-image-name", + []string{"start", "command"}, + "path/to/context", + true, + "an-uri", + ) + if err != nil { + t.Fatal(err) + } + return state + }, + args: args{ + name: "another-name", + }, + want: DevfileContent{}, + wantErr: true, + }, + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + o := tt.state(t) + got, err := o.DeleteImage(tt.args.name) + if (err != nil) != tt.wantErr { + t.Errorf("DevfileState.DeleteImage() error = %v, wantErr %v", err, tt.wantErr) + return + } + if diff := cmp.Diff(tt.want.Content, got.Content); diff != "" { + t.Errorf("DevfileState.DeleteImage() mismatch (-want +got):\n%s", diff) + } + if diff := cmp.Diff(tt.want, got); diff != "" { + t.Errorf("DevfileState.DeleteImage() mismatch (-want +got):\n%s", diff) + } + }) + } +} + +func TestDevfileState_AddResource(t *testing.T) { + type args struct { + name string + inline string + uri string + } + tests := []struct { + name string + state func() DevfileState + args args + want DevfileContent + wantErr bool + }{ + { + name: "Add a resource with uri", + state: func() DevfileState { + return NewDevfileState() + }, + args: args{ + name: "a-name", + uri: "an-uri", + }, + want: DevfileContent{ + Content: `components: +- kubernetes: + uri: an-uri + name: a-name +metadata: {} +schemaVersion: 2.2.0 +`, + Commands: []Command{}, + Containers: []Container{}, + Images: []Image{}, + Resources: []Resource{ + { + Name: "a-name", + Uri: "an-uri", + }, + }, + Events: Events{}, + }, + }, + { + name: "Add an inline resource", + state: func() DevfileState { + return NewDevfileState() + }, + args: args{ + name: "a-name", + inline: "inline resource...", + }, + want: DevfileContent{ + Content: `components: +- kubernetes: + inlined: inline resource... + name: a-name +metadata: {} +schemaVersion: 2.2.0 +`, + Commands: []Command{}, + Containers: []Container{}, + Images: []Image{}, + Resources: []Resource{ + { + Name: "a-name", + Inlined: "inline resource...", + }, + }, + Events: Events{}, + }, + }, + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + o := tt.state() + got, err := o.AddResource(tt.args.name, tt.args.inline, tt.args.uri) + if (err != nil) != tt.wantErr { + t.Errorf("DevfileState.AddResource() error = %v, wantErr %v", err, tt.wantErr) + return + } + if diff := cmp.Diff(tt.want.Content, got.Content); diff != "" { + t.Errorf("DevfileState.AddResource() mismatch (-want +got):\n%s", diff) + } + if diff := cmp.Diff(tt.want, got); diff != "" { + t.Errorf("DevfileState.AddResource() mismatch (-want +got):\n%s", diff) + } + }) + } +} + +func TestDevfileState_Deleteresource(t *testing.T) { + type args struct { + name string + } + tests := []struct { + name string + state func(t *testing.T) DevfileState + args args + want DevfileContent + wantErr bool + }{ + { + name: "Delete an existing resource", + state: func(t *testing.T) DevfileState { + state := NewDevfileState() + _, err := state.AddResource( + "a-name", + "", + "an-uri", + ) + if err != nil { + t.Fatal(err) + } + return state + }, + args: args{ + name: "a-name", + }, + want: DevfileContent{ + Content: `metadata: {} +schemaVersion: 2.2.0 +`, + Commands: []Command{}, + Containers: []Container{}, + Images: []Image{}, + Resources: []Resource{}, + Events: Events{}, + }, + }, + { + name: "Delete a non existing resource", + state: func(t *testing.T) DevfileState { + state := NewDevfileState() + _, err := state.AddResource( + "a-name", + "", + "an-uri", + ) + if err != nil { + t.Fatal(err) + } + return state + }, + args: args{ + name: "another-name", + }, + want: DevfileContent{}, + wantErr: true, + }, + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + o := tt.state(t) + got, err := o.DeleteResource(tt.args.name) + if (err != nil) != tt.wantErr { + t.Errorf("DevfileState.DeleteResource() error = %v, wantErr %v", err, tt.wantErr) + return + } + if diff := cmp.Diff(tt.want.Content, got.Content); diff != "" { + t.Errorf("DevfileState.DeleteResource() mismatch (-want +got):\n%s", diff) + } + if diff := cmp.Diff(tt.want, got); diff != "" { + t.Errorf("DevfileState.DeleteResource() mismatch (-want +got):\n%s", diff) + } + }) + } +} diff --git a/pkg/apiserver-impl/devstate/content.go b/pkg/apiserver-impl/devstate/content.go new file mode 100644 index 00000000000..2b64e89a92a --- /dev/null +++ b/pkg/apiserver-impl/devstate/content.go @@ -0,0 +1,314 @@ +package devstate + +import ( + "errors" + "fmt" + "strings" + + "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" + "github.com/devfile/api/v2/pkg/devfile" + "github.com/devfile/library/v2/pkg/devfile/parser/data/v2/common" + . "github.com/redhat-developer/odo/pkg/apiserver-gen/go" + "k8s.io/utils/pointer" +) + +const ( + SEPARATOR = "," +) + +/* +type DevfileContent struct { + Content string `json:"content"` + Commands []Command `json:"commands"` + Containers []Container `json:"containers"` + Images []Image `json:"images"` + Resources []Resource `json:"resources"` + Events Events `json:"events"` + Metadata Metadata `json:"metadata"` +} + +type Metadata struct { + Name string `json:"name"` + Version string `json:"version"` + DisplayName string `json:"displayName"` + Description string `json:"description"` + Tags string `json:"tags"` + Architectures string `json:"architectures"` + Icon string `json:"icon"` + GlobalMemoryLimit string `json:"globalMemoryLimit"` + ProjectType string `json:"projectType"` + Language string `json:"language"` + Website string `json:"website"` + Provider string `json:"provider"` + SupportUrl string `json:"supportUrl"` +} + +type Command struct { + Name string `json:"name"` + Group string `json:"group"` + Default bool `json:"default"` + Type string `json:"type"` + Exec *ExecCommand `json:"exec"` + Apply *ApplyCommand `json:"apply"` + Image *ImageCommand `json:"image"` + Composite *CompositeCommand `json:"composite"` +} + +type ExecCommand struct { + Component string `json:"component"` + CommandLine string `json:"commandLine"` + WorkingDir string `json:"workingDir"` + HotReloadCapable bool `json:"hotReloadCapable"` +} + +type ApplyCommand struct { + Component string `json:"component"` +} + +type ImageCommand struct { + Component string `json:"component"` +} + +type CompositeCommand struct { + Commands []string `json:"commands"` + Parallel bool `json:"parallel"` +} + +type Container struct { + Name string `json:"name"` + Image string `json:"image"` + Command []string `json:"command"` + Args []string `json:"args"` + MemoryRequest string `json:"memoryRequest"` + MemoryLimit string `json:"memoryLimit"` + CpuRequest string `json:"cpuRequest"` + CpuLimit string `json:"cpuLimit"` +} + +type Image struct { + Name string `json:"name"` + ImageName string `json:"imageName"` + Args []string `json:"args"` + BuildContext string `json:"buildContext"` + RootRequired bool `json:"rootRequired"` + URI string `json:"uri"` +} + +type Resource struct { + Name string `json:"name"` + Inlined string `json:"inlined"` + URI string `json:"uri"` +} + +type Events struct { + PreStart []string `json:"preStart"` + PostStart []string `json:"postStart"` + PreStop []string `json:"preStop"` + PostStop []string `json:"postStop"` +} +*/ +// getContent returns the YAML content of the global devfile as string +func (o *DevfileState) GetContent() (DevfileContent, error) { + err := o.Devfile.WriteYamlDevfile() + if err != nil { + return DevfileContent{}, errors.New("error writing file") + } + result, err := o.FS.ReadFile("/devfile.yaml") + if err != nil { + return DevfileContent{}, errors.New("error reading file") + } + + commands, err := o.getCommands() + if err != nil { + return DevfileContent{}, fmt.Errorf("error getting commands: %w", err) + } + + containers, err := o.getContainers() + if err != nil { + return DevfileContent{}, errors.New("error getting containers") + } + images, err := o.getImages() + if err != nil { + return DevfileContent{}, errors.New("error getting images") + } + + resources, err := o.getResources() + if err != nil { + return DevfileContent{}, errors.New("error getting Kubernetes resources") + } + + return DevfileContent{ + Content: string(result), + Commands: commands, + Containers: containers, + Images: images, + Resources: resources, + Events: o.getEvents(), + Metadata: o.getMetadata(), + }, nil +} + +func (o *DevfileState) getMetadata() Metadata { + metadata := o.Devfile.Data.GetMetadata() + return Metadata{ + Name: metadata.Name, + Version: metadata.Version, + DisplayName: metadata.DisplayName, + Description: metadata.Description, + Tags: strings.Join(metadata.Tags, SEPARATOR), + Architectures: joinArchitectures(metadata.Architectures), + Icon: metadata.Icon, + GlobalMemoryLimit: metadata.GlobalMemoryLimit, + ProjectType: metadata.ProjectType, + Language: metadata.Language, + Website: metadata.Website, + Provider: metadata.Provider, + SupportUrl: metadata.SupportUrl, + } +} + +func joinArchitectures(architectures []devfile.Architecture) string { + strArchs := make([]string, len(architectures)) + for i, arch := range architectures { + strArchs[i] = string(arch) + } + return strings.Join(strArchs, SEPARATOR) +} + +func (o *DevfileState) getCommands() ([]Command, error) { + commands, err := o.Devfile.Data.GetCommands(common.DevfileOptions{}) + if err != nil { + return nil, err + } + result := make([]Command, 0, len(commands)) + for _, command := range commands { + newCommand := Command{ + Name: command.Id, + Group: GetGroup(command), + Default: GetDefault(command), + } + + if command.Exec != nil { + newCommand.Type = "exec" + newCommand.Exec = ExecCommand{ + Component: command.Exec.Component, + CommandLine: command.Exec.CommandLine, + WorkingDir: command.Exec.WorkingDir, + HotReloadCapable: pointer.BoolDeref(command.Exec.HotReloadCapable, false), + } + } + + if command.Apply != nil { + components, err := o.Devfile.Data.GetComponents(common.DevfileOptions{ + FilterByName: command.Apply.Component, + }) + if err != nil { + return nil, err + } + if len(components) == 0 { + return nil, fmt.Errorf("component %q not found", command.Apply.Component) + } + component := components[0] + if component.Kubernetes != nil || component.Openshift != nil { + newCommand.Type = "apply" + newCommand.Apply = ApplyCommand{ + Component: command.Apply.Component, + } + } + if component.Image != nil { + newCommand.Type = "image" + newCommand.Image = ImageCommand{ + Component: command.Apply.Component, + } + } + } + + if command.Composite != nil { + newCommand.Type = "composite" + newCommand.Composite = CompositeCommand{ + Commands: command.Composite.Commands, + Parallel: pointer.BoolDeref(command.Composite.Parallel, false), + } + } + result = append(result, newCommand) + } + return result, nil +} + +func (o *DevfileState) getContainers() ([]Container, error) { + containers, err := o.Devfile.Data.GetComponents(common.DevfileOptions{ + ComponentOptions: common.ComponentOptions{ + ComponentType: v1alpha2.ContainerComponentType, + }, + }) + if err != nil { + return nil, err + } + result := make([]Container, 0, len(containers)) + for _, container := range containers { + result = append(result, Container{ + Name: container.Name, + Image: container.ComponentUnion.Container.Image, + Command: container.ComponentUnion.Container.Command, + Args: container.ComponentUnion.Container.Args, + MemoryRequest: container.ComponentUnion.Container.MemoryRequest, + MemoryLimit: container.ComponentUnion.Container.MemoryLimit, + CpuRequest: container.ComponentUnion.Container.CpuRequest, + CpuLimit: container.ComponentUnion.Container.CpuLimit, + }) + } + return result, nil +} + +func (o *DevfileState) getImages() ([]Image, error) { + images, err := o.Devfile.Data.GetComponents(common.DevfileOptions{ + ComponentOptions: common.ComponentOptions{ + ComponentType: v1alpha2.ImageComponentType, + }, + }) + if err != nil { + return nil, err + } + result := make([]Image, 0, len(images)) + for _, image := range images { + result = append(result, Image{ + Name: image.Name, + ImageName: image.Image.ImageName, + Args: image.Image.Dockerfile.Args, + BuildContext: image.Image.Dockerfile.BuildContext, + RootRequired: pointer.BoolDeref(image.Image.Dockerfile.RootRequired, false), + Uri: image.Image.Dockerfile.Uri, + }) + } + return result, nil +} + +func (o *DevfileState) getResources() ([]Resource, error) { + resources, err := o.Devfile.Data.GetComponents(common.DevfileOptions{ + ComponentOptions: common.ComponentOptions{ + ComponentType: v1alpha2.KubernetesComponentType, + }, + }) + if err != nil { + return nil, err + } + result := make([]Resource, 0, len(resources)) + for _, resource := range resources { + result = append(result, Resource{ + Name: resource.Name, + Inlined: resource.ComponentUnion.Kubernetes.Inlined, + Uri: resource.ComponentUnion.Kubernetes.Uri, + }) + } + return result, nil +} + +func (o *DevfileState) getEvents() Events { + events := o.Devfile.Data.GetEvents() + return Events{ + PreStart: events.PreStart, + PostStart: events.PostStart, + PreStop: events.PreStop, + PostStop: events.PostStop, + } +} diff --git a/pkg/apiserver-impl/devstate/content_test.go b/pkg/apiserver-impl/devstate/content_test.go new file mode 100644 index 00000000000..608d19c9297 --- /dev/null +++ b/pkg/apiserver-impl/devstate/content_test.go @@ -0,0 +1,44 @@ +package devstate + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + . "github.com/redhat-developer/odo/pkg/apiserver-gen/go" +) + +func TestDevfileState_GetContent(t *testing.T) { + tests := []struct { + name string + state func() DevfileState + want DevfileContent + wantErr bool + }{ + { + state: func() DevfileState { + return NewDevfileState() + }, + want: DevfileContent{ + Content: "metadata: {}\nschemaVersion: 2.2.0\n", + Commands: []Command{}, + Containers: []Container{}, + Images: []Image{}, + Resources: []Resource{}, + Events: Events{}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + o := tt.state() + got, err := o.GetContent() + if (err != nil) != tt.wantErr { + t.Errorf("DevfileState.GetContent() error = %v, wantErr %v", err, tt.wantErr) + return + } + if diff := cmp.Diff(tt.want, got); diff != "" { + t.Errorf("DevfileState.GetContent() mismatch (-want +got):\n%s", diff) + } + }) + } +} diff --git a/pkg/apiserver-impl/devstate/events.go b/pkg/apiserver-impl/devstate/events.go new file mode 100644 index 00000000000..407290c5deb --- /dev/null +++ b/pkg/apiserver-impl/devstate/events.go @@ -0,0 +1,19 @@ +package devstate + +import ( + . "github.com/redhat-developer/odo/pkg/apiserver-gen/go" +) + +func (o *DevfileState) UpdateEvents(event string, commands []string) (DevfileContent, error) { + switch event { + case "postStart": + o.Devfile.Data.UpdateEvents(commands, nil, nil, nil) + case "postStop": + o.Devfile.Data.UpdateEvents(nil, commands, nil, nil) + case "preStart": + o.Devfile.Data.UpdateEvents(nil, nil, commands, nil) + case "preStop": + o.Devfile.Data.UpdateEvents(nil, nil, nil, commands) + } + return o.GetContent() +} diff --git a/pkg/apiserver-impl/devstate/events_test.go b/pkg/apiserver-impl/devstate/events_test.go new file mode 100644 index 00000000000..cb74761ac34 --- /dev/null +++ b/pkg/apiserver-impl/devstate/events_test.go @@ -0,0 +1,94 @@ +package devstate + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + . "github.com/redhat-developer/odo/pkg/apiserver-gen/go" +) + +func TestDevfileState_UpdateEvents(t *testing.T) { + type args struct { + event string + commands []string + } + tests := []struct { + name string + state func(t *testing.T) DevfileState + args args + want DevfileContent + wantErr bool + }{ + { + name: "set preStart event", + state: func(t *testing.T) DevfileState { + return NewDevfileState() + }, + args: args{ + event: "preStart", + commands: []string{"command1"}, + }, + want: DevfileContent{ + Content: `events: + preStart: + - command1 +metadata: {} +schemaVersion: 2.2.0 +`, + Commands: []Command{}, + Containers: []Container{}, + Images: []Image{}, + Resources: []Resource{}, + Events: Events{ + PreStart: []string{"command1"}, + }, + }, + }, { + name: "set postStart event when preStart is already set", + state: func(t *testing.T) DevfileState { + state := NewDevfileState() + _, err := state.UpdateEvents("preStart", []string{"command1"}) + if err != nil { + t.Fatal(err) + } + return state + }, + args: args{ + event: "postStart", + commands: []string{"command2"}, + }, + want: DevfileContent{ + Content: `events: + postStart: + - command2 + preStart: + - command1 +metadata: {} +schemaVersion: 2.2.0 +`, + Commands: []Command{}, + Containers: []Container{}, + Images: []Image{}, + Resources: []Resource{}, + Events: Events{ + PreStart: []string{"command1"}, + PostStart: []string{"command2"}, + }, + }, + }, + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + o := tt.state(t) + got, err := o.UpdateEvents(tt.args.event, tt.args.commands) + if (err != nil) != tt.wantErr { + t.Errorf("DevfileState.UpdateEvents() error = %v, wantErr %v", err, tt.wantErr) + return + } + if diff := cmp.Diff(tt.want, got); diff != "" { + t.Errorf("DevfileState.UpdateEvents() mismatch (-want +got):\n%s", diff) + } + }) + } +} diff --git a/ui/wasm/pkg/utils/groups.go b/pkg/apiserver-impl/devstate/groups.go similarity index 92% rename from ui/wasm/pkg/utils/groups.go rename to pkg/apiserver-impl/devstate/groups.go index bd6b44f1d55..2072cfb34a3 100644 --- a/ui/wasm/pkg/utils/groups.go +++ b/pkg/apiserver-impl/devstate/groups.go @@ -1,4 +1,4 @@ -package utils +package devstate import ( "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" @@ -73,21 +73,21 @@ func SetDefault(command *v1alpha2.Command, def bool) { if command.Exec.Group == nil { command.Exec.Group = &v1alpha2.CommandGroup{} } - command.Exec.Group.IsDefault = pointer.BoolPtr(def) + command.Exec.Group.IsDefault = pointer.Bool(def) return } if command.Apply != nil { if command.Apply.Group == nil { command.Apply.Group = &v1alpha2.CommandGroup{} } - command.Apply.Group.IsDefault = pointer.BoolPtr(def) + command.Apply.Group.IsDefault = pointer.Bool(def) return } if command.Composite != nil { if command.Composite.Group == nil { command.Composite.Group = &v1alpha2.CommandGroup{} } - command.Composite.Group.IsDefault = pointer.BoolPtr(def) + command.Composite.Group.IsDefault = pointer.Bool(def) return } } diff --git a/pkg/apiserver-impl/devstate/quantity.go b/pkg/apiserver-impl/devstate/quantity.go new file mode 100644 index 00000000000..7e4d6ca9c21 --- /dev/null +++ b/pkg/apiserver-impl/devstate/quantity.go @@ -0,0 +1,8 @@ +package devstate + +import "k8s.io/apimachinery/pkg/api/resource" + +func IsQuantityValid(quantity string) bool { + _, err := resource.ParseQuantity(quantity) + return err == nil +} diff --git a/pkg/apiserver-impl/devstate/state.go b/pkg/apiserver-impl/devstate/state.go new file mode 100644 index 00000000000..ed11b5d8bd9 --- /dev/null +++ b/pkg/apiserver-impl/devstate/state.go @@ -0,0 +1,102 @@ +package devstate + +import ( + "fmt" + "strings" + + apidevfile "github.com/devfile/api/v2/pkg/devfile" + "github.com/devfile/library/v2/pkg/devfile" + "github.com/devfile/library/v2/pkg/devfile/parser" + context "github.com/devfile/library/v2/pkg/devfile/parser/context" + "github.com/devfile/library/v2/pkg/testingutil/filesystem" + . "github.com/redhat-developer/odo/pkg/apiserver-gen/go" + + "k8s.io/utils/pointer" +) + +type DevfileState struct { + Devfile parser.DevfileObj + FS filesystem.Filesystem +} + +func NewDevfileState() DevfileState { + s := DevfileState{ + FS: filesystem.NewFakeFs(), + } + // this should never fail, as the parameters are constant + _, _ = s.SetDevfileContent(`schemaVersion: 2.2.0`) + return s +} + +// SetDevfileContent replaces the devfile with a new content +// If an error occurs, the Devfile is not modified +func (o *DevfileState) SetDevfileContent(content string) (DevfileContent, error) { + parserArgs := parser.ParserArgs{ + Data: []byte(content), + ConvertKubernetesContentInUri: pointer.Bool(false), + } + var err error + devfile, _, err := devfile.ParseDevfileAndValidate(parserArgs) + if err != nil { + return DevfileContent{}, fmt.Errorf("error parsing devfile YAML: %w", err) + } + o.Devfile = devfile + o.Devfile.Ctx = context.FakeContext(o.FS, "/devfile.yaml") + return o.GetContent() +} + +func (o *DevfileState) SetMetadata( + name string, + version string, + displayName string, + description string, + tags string, + architectures string, + icon string, + globalMemoryLimit string, + projectType string, + language string, + website string, + provider string, + supportUrl string, +) (DevfileContent, error) { + o.Devfile.Data.SetMetadata(apidevfile.DevfileMetadata{ + Name: name, + Version: version, + DisplayName: displayName, + Description: description, + Tags: splitTags(tags), + Architectures: splitArchitectures(architectures), + Icon: icon, + GlobalMemoryLimit: globalMemoryLimit, + ProjectType: projectType, + Language: language, + Website: website, + Provider: provider, + SupportUrl: supportUrl, + }) + return o.GetContent() +} +func splitArchitectures(architectures string) []apidevfile.Architecture { + if architectures == "" { + return nil + } + parts := strings.Split(architectures, SEPARATOR) + result := make([]apidevfile.Architecture, len(parts)) + for i, arch := range parts { + result[i] = apidevfile.Architecture(strings.Trim(arch, " ")) + } + return result +} + +func splitTags(tags string) []string { + if tags == "" { + return nil + } + parts := strings.Split(tags, SEPARATOR) + result := make([]string, len(parts)) + for i, tag := range parts { + result[i] = strings.Trim(tag, " ") + } + return result +} diff --git a/pkg/apiserver-impl/devstate/state_test.go b/pkg/apiserver-impl/devstate/state_test.go new file mode 100644 index 00000000000..ba201cbd2fd --- /dev/null +++ b/pkg/apiserver-impl/devstate/state_test.go @@ -0,0 +1,110 @@ +package devstate + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + . "github.com/redhat-developer/odo/pkg/apiserver-gen/go" +) + +func TestDevfileState_SetMetadata(t *testing.T) { + type args struct { + name string + version string + displayName string + description string + tags string + architectures string + icon string + globalMemoryLimit string + projectType string + language string + website string + provider string + supportUrl string + } + tests := []struct { + name string + state func() DevfileState + args args + want DevfileContent + wantErr bool + }{ + { + name: "Set metadata", + state: func() DevfileState { + return NewDevfileState() + }, + args: args{ + name: "a-name", + version: "v1.1.1", + displayName: "a display name", + description: "a description", + tags: "tag1,tag2", + architectures: "arch1,arch2", + icon: "an.ico", + globalMemoryLimit: "1Gi", + projectType: "a project type", + language: "a language", + website: "http://example.com", + provider: "a provider", + supportUrl: "http://support.example.com", + }, + want: DevfileContent{ + Content: `metadata: + architectures: + - arch1 + - arch2 + description: a description + displayName: a display name + globalMemoryLimit: 1Gi + icon: an.ico + language: a language + name: a-name + projectType: a project type + provider: a provider + supportUrl: http://support.example.com + tags: + - tag1 + - tag2 + version: v1.1.1 + website: http://example.com +schemaVersion: 2.2.0 +`, + Commands: []Command{}, + Containers: []Container{}, + Images: []Image{}, + Resources: []Resource{}, + Metadata: Metadata{ + Name: "a-name", + Version: "v1.1.1", + DisplayName: "a display name", + Description: "a description", + Tags: "tag1,tag2", + Architectures: "arch1,arch2", + Icon: "an.ico", + GlobalMemoryLimit: "1Gi", + ProjectType: "a project type", + Language: "a language", + Website: "http://example.com", + Provider: "a provider", + SupportUrl: "http://support.example.com", + }, + }, + }, + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + o := tt.state() + got, err := o.SetMetadata(tt.args.name, tt.args.version, tt.args.displayName, tt.args.description, tt.args.tags, tt.args.architectures, tt.args.icon, tt.args.globalMemoryLimit, tt.args.projectType, tt.args.language, tt.args.website, tt.args.provider, tt.args.supportUrl) + if (err != nil) != tt.wantErr { + t.Errorf("DevfileState.SetMetadata() error = %v, wantErr %v", err, tt.wantErr) + return + } + if diff := cmp.Diff(tt.want, got); diff != "" { + t.Errorf("DevfileState.SetMetadata() mismatch (-want +got):\n%s", diff) + } + }) + } +} diff --git a/ui/Makefile b/ui/Makefile deleted file mode 100644 index 508042ce0c1..00000000000 --- a/ui/Makefile +++ /dev/null @@ -1,12 +0,0 @@ -build-wasm: - ( \ - cd wasm/ && \ - GOOS=js GOARCH=wasm go build -o ../src/assets/devfile.wasm && \ - HASH=$$(md5sum ../src/assets/devfile.wasm | awk '{ print $$1 }') && \ - echo $${HASH} && \ - mv ../src/assets/devfile.wasm ../src/assets/devfile.$${HASH}.wasm && \ - sed -i "s/devfile\.[a-z0-9]*\.wasm/devfile\.$${HASH}.wasm/" ../src/app/app.module.ts \ - ) - -deploy: - npm run deploy diff --git a/ui/angular.json b/ui/angular.json index 73802b41b96..507b049aea5 100644 --- a/ui/angular.json +++ b/ui/angular.json @@ -28,9 +28,6 @@ "styles": [ "@angular/material/prebuilt-themes/indigo-pink.css", "src/styles.css" - ], - "scripts": [ - "src/assets/wasm_exec.js" ] }, "configurations": { @@ -70,7 +67,10 @@ "browserTarget": "devfile-builder:build:development" } }, - "defaultConfiguration": "development" + "defaultConfiguration": "development", + "options": { + "proxyConfig": "src/proxy.conf.json" + }, }, "extract-i18n": { "builder": "@angular-devkit/build-angular:extract-i18n", diff --git a/ui/devfile.yaml b/ui/devfile.yaml index ab4ca579559..bb44962e231 100644 --- a/ui/devfile.yaml +++ b/ui/devfile.yaml @@ -1,13 +1,4 @@ commands: -- composite: - commands: - - go-build - - angular-install - group: - isDefault: true - kind: build - parallel: true - id: build - exec: commandLine: npm run start component: node @@ -17,31 +8,14 @@ commands: hotReloadCapable: true workingDir: ${PROJECT_SOURCE} id: run -- exec: - commandLine: make deploy - component: node - group: - isDefault: true - kind: deploy - hotReloadCapable: false - workingDir: ${PROJECTS_ROOT} - id: deploy -- exec: - commandLine: make build-wasm - component: go - env: - - name: GOPATH - value: ${PROJECT_SOURCE}/.go - - name: GOCACHE - value: ${PROJECT_SOURCE}/.cache - hotReloadCapable: false - workingDir: ${PROJECT_SOURCE} - id: go-build - exec: commandLine: npm install component: node hotReloadCapable: false workingDir: ${PROJECT_SOURCE} + group: + isDefault: true + kind: build id: angular-install components: - container: @@ -58,15 +32,5 @@ components: memoryLimit: 4096Mi mountSources: true name: node -- container: - args: - - tail - - -f - - /dev/null - dedicatedPod: false - image: registry.access.redhat.com/ubi9/go-toolset:1.18.9-14 - memoryLimit: 1024Mi - mountSources: true - name: go metadata: {} schemaVersion: 2.2.0 diff --git a/ui/src/app/api-gen/.gitignore b/ui/src/app/api-gen/.gitignore new file mode 100644 index 00000000000..149b5765472 --- /dev/null +++ b/ui/src/app/api-gen/.gitignore @@ -0,0 +1,4 @@ +wwwroot/*.js +node_modules +typings +dist diff --git a/ui/src/app/api-gen/.openapi-generator-ignore b/ui/src/app/api-gen/.openapi-generator-ignore new file mode 100644 index 00000000000..7484ee590a3 --- /dev/null +++ b/ui/src/app/api-gen/.openapi-generator-ignore @@ -0,0 +1,23 @@ +# OpenAPI Generator Ignore +# Generated by openapi-generator https://github.com/openapitools/openapi-generator + +# Use this file to prevent files from being overwritten by the generator. +# The patterns follow closely to .gitignore or .dockerignore. + +# As an example, the C# client generator defines ApiClient.cs. +# You can make changes and tell OpenAPI Generator to ignore just this file by uncommenting the following line: +#ApiClient.cs + +# You can match any string of characters against a directory, file or extension with a single asterisk (*): +#foo/*/qux +# The above matches foo/bar/qux and foo/baz/qux, but not foo/bar/baz/qux + +# You can recursively match patterns against a directory, file or extension with a double asterisk (**): +#foo/**/qux +# This matches foo/bar/qux, foo/baz/qux, and foo/bar/baz/qux + +# You can also negate patterns with an exclamation (!). +# For example, you can ignore all files in a docs folder with the file extension .md: +#docs/*.md +# Then explicitly reverse the ignore rule for a single file: +#!docs/README.md diff --git a/ui/src/app/api-gen/.openapi-generator/FILES b/ui/src/app/api-gen/.openapi-generator/FILES new file mode 100644 index 00000000000..df67b7c93bb --- /dev/null +++ b/ui/src/app/api-gen/.openapi-generator/FILES @@ -0,0 +1,40 @@ +.gitignore +README.md +api.module.ts +api/api.ts +api/default.service.ts +configuration.ts +encoder.ts +git_push.sh +index.ts +model/applyCommand.ts +model/command.ts +model/componentCommandPostRequest.ts +model/componentGet200Response.ts +model/compositeCommand.ts +model/container.ts +model/devfileContent.ts +model/devstateApplyCommandPostRequest.ts +model/devstateChartGet200Response.ts +model/devstateCommandCommandNameMovePostRequest.ts +model/devstateCommandCommandNameSetDefaultPostRequest.ts +model/devstateCompositeCommandPostRequest.ts +model/devstateContainerPostRequest.ts +model/devstateDevfilePutRequest.ts +model/devstateEventsPutRequest.ts +model/devstateExecCommandPostRequest.ts +model/devstateImagePostRequest.ts +model/devstateQuantityValidPostRequest.ts +model/devstateResourcePostRequest.ts +model/events.ts +model/execCommand.ts +model/generalError.ts +model/generalSuccess.ts +model/image.ts +model/imageCommand.ts +model/instanceGet200Response.ts +model/metadata.ts +model/models.ts +model/resource.ts +param.ts +variables.ts diff --git a/ui/src/app/api-gen/.openapi-generator/VERSION b/ui/src/app/api-gen/.openapi-generator/VERSION new file mode 100644 index 00000000000..cd802a1ec4e --- /dev/null +++ b/ui/src/app/api-gen/.openapi-generator/VERSION @@ -0,0 +1 @@ +6.6.0 \ No newline at end of file diff --git a/ui/src/app/api-gen/README.md b/ui/src/app/api-gen/README.md new file mode 100644 index 00000000000..de16f95a8bd --- /dev/null +++ b/ui/src/app/api-gen/README.md @@ -0,0 +1,226 @@ +## @ + +### Building + +To install the required dependencies and to build the typescript sources run: +``` +npm install +npm run build +``` + +### publishing + +First build the package then run ```npm publish dist``` (don't forget to specify the `dist` folder!) + +### consuming + +Navigate to the folder of your consuming project and run one of next commands. + +_published:_ + +``` +npm install @ --save +``` + +_without publishing (not recommended):_ + +``` +npm install PATH_TO_GENERATED_PACKAGE/dist.tgz --save +``` + +_It's important to take the tgz file, otherwise you'll get trouble with links on windows_ + +_using `npm link`:_ + +In PATH_TO_GENERATED_PACKAGE/dist: +``` +npm link +``` + +In your project: +``` +npm link +``` + +__Note for Windows users:__ The Angular CLI has troubles to use linked npm packages. +Please refer to this issue https://github.com/angular/angular-cli/issues/8284 for a solution / workaround. +Published packages are not effected by this issue. + + +#### General usage + +In your Angular project: + + +``` +// without configuring providers +import { ApiModule } from ''; +import { HttpClientModule } from '@angular/common/http'; + +@NgModule({ + imports: [ + ApiModule, + // make sure to import the HttpClientModule in the AppModule only, + // see https://github.com/angular/angular/issues/20575 + HttpClientModule + ], + declarations: [ AppComponent ], + providers: [], + bootstrap: [ AppComponent ] +}) +export class AppModule {} +``` + +``` +// configuring providers +import { ApiModule, Configuration, ConfigurationParameters } from ''; + +export function apiConfigFactory (): Configuration { + const params: ConfigurationParameters = { + // set configuration parameters here. + } + return new Configuration(params); +} + +@NgModule({ + imports: [ ApiModule.forRoot(apiConfigFactory) ], + declarations: [ AppComponent ], + providers: [], + bootstrap: [ AppComponent ] +}) +export class AppModule {} +``` + +``` +// configuring providers with an authentication service that manages your access tokens +import { ApiModule, Configuration } from ''; + +@NgModule({ + imports: [ ApiModule ], + declarations: [ AppComponent ], + providers: [ + { + provide: Configuration, + useFactory: (authService: AuthService) => new Configuration( + { + basePath: environment.apiUrl, + accessToken: authService.getAccessToken.bind(authService) + } + ), + deps: [AuthService], + multi: false + } + ], + bootstrap: [ AppComponent ] +}) +export class AppModule {} +``` + +``` +import { DefaultApi } from ''; + +export class AppComponent { + constructor(private apiGateway: DefaultApi) { } +} +``` + +Note: The ApiModule is restricted to being instantiated once app wide. +This is to ensure that all services are treated as singletons. + +#### Using multiple OpenAPI files / APIs / ApiModules +In order to use multiple `ApiModules` generated from different OpenAPI files, +you can create an alias name when importing the modules +in order to avoid naming conflicts: +``` +import { ApiModule } from 'my-api-path'; +import { ApiModule as OtherApiModule } from 'my-other-api-path'; +import { HttpClientModule } from '@angular/common/http'; + +@NgModule({ + imports: [ + ApiModule, + OtherApiModule, + // make sure to import the HttpClientModule in the AppModule only, + // see https://github.com/angular/angular/issues/20575 + HttpClientModule + ] +}) +export class AppModule { + +} +``` + + +### Set service base path +If different than the generated base path, during app bootstrap, you can provide the base path to your service. + +``` +import { BASE_PATH } from ''; + +bootstrap(AppComponent, [ + { provide: BASE_PATH, useValue: 'https://your-web-service.com' }, +]); +``` +or + +``` +import { BASE_PATH } from ''; + +@NgModule({ + imports: [], + declarations: [ AppComponent ], + providers: [ provide: BASE_PATH, useValue: 'https://your-web-service.com' ], + bootstrap: [ AppComponent ] +}) +export class AppModule {} +``` + + +#### Using @angular/cli +First extend your `src/environments/*.ts` files by adding the corresponding base path: + +``` +export const environment = { + production: false, + API_BASE_PATH: 'http://127.0.0.1:8080' +}; +``` + +In the src/app/app.module.ts: +``` +import { BASE_PATH } from ''; +import { environment } from '../environments/environment'; + +@NgModule({ + declarations: [ + AppComponent + ], + imports: [ ], + providers: [{ provide: BASE_PATH, useValue: environment.API_BASE_PATH }], + bootstrap: [ AppComponent ] +}) +export class AppModule { } +``` + +### Customizing path parameter encoding + +Without further customization, only [path-parameters][parameter-locations-url] of [style][style-values-url] 'simple' +and Dates for format 'date-time' are encoded correctly. + +Other styles (e.g. "matrix") are not that easy to encode +and thus are best delegated to other libraries (e.g.: [@honoluluhenk/http-param-expander]). + +To implement your own parameter encoding (or call another library), +pass an arrow-function or method-reference to the `encodeParam` property of the Configuration-object +(see [General Usage](#general-usage) above). + +Example value for use in your Configuration-Provider: +```typescript +new Configuration({ + encodeParam: (param: Param) => myFancyParamEncoder(param), +}) +``` + +[parameter-locations-url]: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#parameter-locations +[style-values-url]: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#style-values +[@honoluluhenk/http-param-expander]: https://www.npmjs.com/package/@honoluluhenk/http-param-expander diff --git a/ui/src/app/api-gen/api.module.ts b/ui/src/app/api-gen/api.module.ts new file mode 100644 index 00000000000..58d341fbd2e --- /dev/null +++ b/ui/src/app/api-gen/api.module.ts @@ -0,0 +1,30 @@ +import { NgModule, ModuleWithProviders, SkipSelf, Optional } from '@angular/core'; +import { Configuration } from './configuration'; +import { HttpClient } from '@angular/common/http'; + + +@NgModule({ + imports: [], + declarations: [], + exports: [], + providers: [] +}) +export class ApiModule { + public static forRoot(configurationFactory: () => Configuration): ModuleWithProviders { + return { + ngModule: ApiModule, + providers: [ { provide: Configuration, useFactory: configurationFactory } ] + }; + } + + constructor( @Optional() @SkipSelf() parentModule: ApiModule, + @Optional() http: HttpClient) { + if (parentModule) { + throw new Error('ApiModule is already loaded. Import in your base AppModule only.'); + } + if (!http) { + throw new Error('You need to import the HttpClientModule in your AppModule! \n' + + 'See also https://github.com/angular/angular/issues/20575'); + } + } +} diff --git a/ui/src/app/api-gen/api/api.ts b/ui/src/app/api-gen/api/api.ts new file mode 100644 index 00000000000..8e76619647f --- /dev/null +++ b/ui/src/app/api-gen/api/api.ts @@ -0,0 +1,3 @@ +export * from './default.service'; +import { DefaultService } from './default.service'; +export const APIS = [DefaultService]; diff --git a/ui/src/app/api-gen/api/default.service.ts b/ui/src/app/api-gen/api/default.service.ts new file mode 100644 index 00000000000..df518b77d54 --- /dev/null +++ b/ui/src/app/api-gen/api/default.service.ts @@ -0,0 +1,1597 @@ +/** + * odo dev + * API interface for \'odo dev\' + * + * The version of the OpenAPI document: 0.1 + * + * + * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * https://openapi-generator.tech + * Do not edit the class manually. + */ +/* tslint:disable:no-unused-variable member-ordering */ + +import { Inject, Injectable, Optional } from '@angular/core'; +import { HttpClient, HttpHeaders, HttpParams, + HttpResponse, HttpEvent, HttpParameterCodec, HttpContext + } from '@angular/common/http'; +import { CustomHttpParameterCodec } from '../encoder'; +import { Observable } from 'rxjs'; + +// @ts-ignore +import { ComponentCommandPostRequest } from '../model/componentCommandPostRequest'; +// @ts-ignore +import { ComponentGet200Response } from '../model/componentGet200Response'; +// @ts-ignore +import { DevfileContent } from '../model/devfileContent'; +// @ts-ignore +import { DevstateApplyCommandPostRequest } from '../model/devstateApplyCommandPostRequest'; +// @ts-ignore +import { DevstateChartGet200Response } from '../model/devstateChartGet200Response'; +// @ts-ignore +import { DevstateCommandCommandNameMovePostRequest } from '../model/devstateCommandCommandNameMovePostRequest'; +// @ts-ignore +import { DevstateCommandCommandNameSetDefaultPostRequest } from '../model/devstateCommandCommandNameSetDefaultPostRequest'; +// @ts-ignore +import { DevstateCompositeCommandPostRequest } from '../model/devstateCompositeCommandPostRequest'; +// @ts-ignore +import { DevstateContainerPostRequest } from '../model/devstateContainerPostRequest'; +// @ts-ignore +import { DevstateDevfilePutRequest } from '../model/devstateDevfilePutRequest'; +// @ts-ignore +import { DevstateEventsPutRequest } from '../model/devstateEventsPutRequest'; +// @ts-ignore +import { DevstateExecCommandPostRequest } from '../model/devstateExecCommandPostRequest'; +// @ts-ignore +import { DevstateImagePostRequest } from '../model/devstateImagePostRequest'; +// @ts-ignore +import { DevstateQuantityValidPostRequest } from '../model/devstateQuantityValidPostRequest'; +// @ts-ignore +import { DevstateResourcePostRequest } from '../model/devstateResourcePostRequest'; +// @ts-ignore +import { GeneralError } from '../model/generalError'; +// @ts-ignore +import { GeneralSuccess } from '../model/generalSuccess'; +// @ts-ignore +import { InstanceGet200Response } from '../model/instanceGet200Response'; +// @ts-ignore +import { Metadata } from '../model/metadata'; + +// @ts-ignore +import { BASE_PATH, COLLECTION_FORMATS } from '../variables'; +import { Configuration } from '../configuration'; + + + +@Injectable({ + providedIn: 'root' +}) +export class DefaultService { + + protected basePath = '/api/v1'; + public defaultHeaders = new HttpHeaders(); + public configuration = new Configuration(); + public encoder: HttpParameterCodec; + + constructor(protected httpClient: HttpClient, @Optional()@Inject(BASE_PATH) basePath: string|string[], @Optional() configuration: Configuration) { + if (configuration) { + this.configuration = configuration; + } + if (typeof this.configuration.basePath !== 'string') { + if (Array.isArray(basePath) && basePath.length > 0) { + basePath = basePath[0]; + } + + if (typeof basePath !== 'string') { + basePath = this.basePath; + } + this.configuration.basePath = basePath; + } + this.encoder = this.configuration.encoder || new CustomHttpParameterCodec(); + } + + + // @ts-ignore + private addToHttpParams(httpParams: HttpParams, value: any, key?: string): HttpParams { + if (typeof value === "object" && value instanceof Date === false) { + httpParams = this.addToHttpParamsRecursive(httpParams, value); + } else { + httpParams = this.addToHttpParamsRecursive(httpParams, value, key); + } + return httpParams; + } + + private addToHttpParamsRecursive(httpParams: HttpParams, value?: any, key?: string): HttpParams { + if (value == null) { + return httpParams; + } + + if (typeof value === "object") { + if (Array.isArray(value)) { + (value as any[]).forEach( elem => httpParams = this.addToHttpParamsRecursive(httpParams, elem, key)); + } else if (value instanceof Date) { + if (key != null) { + httpParams = httpParams.append(key, (value as Date).toISOString().substr(0, 10)); + } else { + throw Error("key may not be null if value is Date"); + } + } else { + Object.keys(value).forEach( k => httpParams = this.addToHttpParamsRecursive( + httpParams, value[k], key != null ? `${key}.${k}` : k)); + } + } else if (key != null) { + httpParams = httpParams.append(key, value); + } else { + throw Error("key may not be null if value is not object or array"); + } + return httpParams; + } + + /** + * Instruct \'odo dev\' to perform given command on the component + * @param componentCommandPostRequest + * @param observe set whether or not to return the data Observable as the body, response or events. defaults to returning the body. + * @param reportProgress flag to report request and response progress. + */ + public componentCommandPost(componentCommandPostRequest?: ComponentCommandPostRequest, observe?: 'body', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable; + public componentCommandPost(componentCommandPostRequest?: ComponentCommandPostRequest, observe?: 'response', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public componentCommandPost(componentCommandPostRequest?: ComponentCommandPostRequest, observe?: 'events', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public componentCommandPost(componentCommandPostRequest?: ComponentCommandPostRequest, observe: any = 'body', reportProgress: boolean = false, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable { + + let localVarHeaders = this.defaultHeaders; + + let localVarHttpHeaderAcceptSelected: string | undefined = options && options.httpHeaderAccept; + if (localVarHttpHeaderAcceptSelected === undefined) { + // to determine the Accept header + const httpHeaderAccepts: string[] = [ + 'application/json' + ]; + localVarHttpHeaderAcceptSelected = this.configuration.selectHeaderAccept(httpHeaderAccepts); + } + if (localVarHttpHeaderAcceptSelected !== undefined) { + localVarHeaders = localVarHeaders.set('Accept', localVarHttpHeaderAcceptSelected); + } + + let localVarHttpContext: HttpContext | undefined = options && options.context; + if (localVarHttpContext === undefined) { + localVarHttpContext = new HttpContext(); + } + + + // to determine the Content-Type header + const consumes: string[] = [ + 'application/json' + ]; + const httpContentTypeSelected: string | undefined = this.configuration.selectHeaderContentType(consumes); + if (httpContentTypeSelected !== undefined) { + localVarHeaders = localVarHeaders.set('Content-Type', httpContentTypeSelected); + } + + let responseType_: 'text' | 'json' | 'blob' = 'json'; + if (localVarHttpHeaderAcceptSelected) { + if (localVarHttpHeaderAcceptSelected.startsWith('text')) { + responseType_ = 'text'; + } else if (this.configuration.isJsonMime(localVarHttpHeaderAcceptSelected)) { + responseType_ = 'json'; + } else { + responseType_ = 'blob'; + } + } + + let localVarPath = `/component/command`; + return this.httpClient.request('post', `${this.configuration.basePath}${localVarPath}`, + { + context: localVarHttpContext, + body: componentCommandPostRequest, + responseType: responseType_, + withCredentials: this.configuration.withCredentials, + headers: localVarHeaders, + observe: observe, + reportProgress: reportProgress + } + ); + } + + /** + * Get the Information about the component controlled by this \'odo dev\' instance. + * @param observe set whether or not to return the data Observable as the body, response or events. defaults to returning the body. + * @param reportProgress flag to report request and response progress. + */ + public componentGet(observe?: 'body', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable; + public componentGet(observe?: 'response', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public componentGet(observe?: 'events', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public componentGet(observe: any = 'body', reportProgress: boolean = false, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable { + + let localVarHeaders = this.defaultHeaders; + + let localVarHttpHeaderAcceptSelected: string | undefined = options && options.httpHeaderAccept; + if (localVarHttpHeaderAcceptSelected === undefined) { + // to determine the Accept header + const httpHeaderAccepts: string[] = [ + 'application/json' + ]; + localVarHttpHeaderAcceptSelected = this.configuration.selectHeaderAccept(httpHeaderAccepts); + } + if (localVarHttpHeaderAcceptSelected !== undefined) { + localVarHeaders = localVarHeaders.set('Accept', localVarHttpHeaderAcceptSelected); + } + + let localVarHttpContext: HttpContext | undefined = options && options.context; + if (localVarHttpContext === undefined) { + localVarHttpContext = new HttpContext(); + } + + + let responseType_: 'text' | 'json' | 'blob' = 'json'; + if (localVarHttpHeaderAcceptSelected) { + if (localVarHttpHeaderAcceptSelected.startsWith('text')) { + responseType_ = 'text'; + } else if (this.configuration.isJsonMime(localVarHttpHeaderAcceptSelected)) { + responseType_ = 'json'; + } else { + responseType_ = 'blob'; + } + } + + let localVarPath = `/component`; + return this.httpClient.request('get', `${this.configuration.basePath}${localVarPath}`, + { + context: localVarHttpContext, + responseType: responseType_, + withCredentials: this.configuration.withCredentials, + headers: localVarHeaders, + observe: observe, + reportProgress: reportProgress + } + ); + } + + /** + * Add a new Apply Command to the Devfile + * @param devstateApplyCommandPostRequest + * @param observe set whether or not to return the data Observable as the body, response or events. defaults to returning the body. + * @param reportProgress flag to report request and response progress. + */ + public devstateApplyCommandPost(devstateApplyCommandPostRequest?: DevstateApplyCommandPostRequest, observe?: 'body', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable; + public devstateApplyCommandPost(devstateApplyCommandPostRequest?: DevstateApplyCommandPostRequest, observe?: 'response', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public devstateApplyCommandPost(devstateApplyCommandPostRequest?: DevstateApplyCommandPostRequest, observe?: 'events', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public devstateApplyCommandPost(devstateApplyCommandPostRequest?: DevstateApplyCommandPostRequest, observe: any = 'body', reportProgress: boolean = false, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable { + + let localVarHeaders = this.defaultHeaders; + + let localVarHttpHeaderAcceptSelected: string | undefined = options && options.httpHeaderAccept; + if (localVarHttpHeaderAcceptSelected === undefined) { + // to determine the Accept header + const httpHeaderAccepts: string[] = [ + 'application/json' + ]; + localVarHttpHeaderAcceptSelected = this.configuration.selectHeaderAccept(httpHeaderAccepts); + } + if (localVarHttpHeaderAcceptSelected !== undefined) { + localVarHeaders = localVarHeaders.set('Accept', localVarHttpHeaderAcceptSelected); + } + + let localVarHttpContext: HttpContext | undefined = options && options.context; + if (localVarHttpContext === undefined) { + localVarHttpContext = new HttpContext(); + } + + + // to determine the Content-Type header + const consumes: string[] = [ + 'application/json' + ]; + const httpContentTypeSelected: string | undefined = this.configuration.selectHeaderContentType(consumes); + if (httpContentTypeSelected !== undefined) { + localVarHeaders = localVarHeaders.set('Content-Type', httpContentTypeSelected); + } + + let responseType_: 'text' | 'json' | 'blob' = 'json'; + if (localVarHttpHeaderAcceptSelected) { + if (localVarHttpHeaderAcceptSelected.startsWith('text')) { + responseType_ = 'text'; + } else if (this.configuration.isJsonMime(localVarHttpHeaderAcceptSelected)) { + responseType_ = 'json'; + } else { + responseType_ = 'blob'; + } + } + + let localVarPath = `/devstate/applyCommand`; + return this.httpClient.request('post', `${this.configuration.basePath}${localVarPath}`, + { + context: localVarHttpContext, + body: devstateApplyCommandPostRequest, + responseType: responseType_, + withCredentials: this.configuration.withCredentials, + headers: localVarHeaders, + observe: observe, + reportProgress: reportProgress + } + ); + } + + /** + * Get chart representing the Devfile cycle in mermaid format + * @param observe set whether or not to return the data Observable as the body, response or events. defaults to returning the body. + * @param reportProgress flag to report request and response progress. + */ + public devstateChartGet(observe?: 'body', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable; + public devstateChartGet(observe?: 'response', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public devstateChartGet(observe?: 'events', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public devstateChartGet(observe: any = 'body', reportProgress: boolean = false, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable { + + let localVarHeaders = this.defaultHeaders; + + let localVarHttpHeaderAcceptSelected: string | undefined = options && options.httpHeaderAccept; + if (localVarHttpHeaderAcceptSelected === undefined) { + // to determine the Accept header + const httpHeaderAccepts: string[] = [ + 'application/json' + ]; + localVarHttpHeaderAcceptSelected = this.configuration.selectHeaderAccept(httpHeaderAccepts); + } + if (localVarHttpHeaderAcceptSelected !== undefined) { + localVarHeaders = localVarHeaders.set('Accept', localVarHttpHeaderAcceptSelected); + } + + let localVarHttpContext: HttpContext | undefined = options && options.context; + if (localVarHttpContext === undefined) { + localVarHttpContext = new HttpContext(); + } + + + let responseType_: 'text' | 'json' | 'blob' = 'json'; + if (localVarHttpHeaderAcceptSelected) { + if (localVarHttpHeaderAcceptSelected.startsWith('text')) { + responseType_ = 'text'; + } else if (this.configuration.isJsonMime(localVarHttpHeaderAcceptSelected)) { + responseType_ = 'json'; + } else { + responseType_ = 'blob'; + } + } + + let localVarPath = `/devstate/chart`; + return this.httpClient.request('get', `${this.configuration.basePath}${localVarPath}`, + { + context: localVarHttpContext, + responseType: responseType_, + withCredentials: this.configuration.withCredentials, + headers: localVarHeaders, + observe: observe, + reportProgress: reportProgress + } + ); + } + + /** + * Delete a command from the Devfile + * @param commandName Command name to delete + * @param observe set whether or not to return the data Observable as the body, response or events. defaults to returning the body. + * @param reportProgress flag to report request and response progress. + */ + public devstateCommandCommandNameDelete(commandName: string, observe?: 'body', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable; + public devstateCommandCommandNameDelete(commandName: string, observe?: 'response', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public devstateCommandCommandNameDelete(commandName: string, observe?: 'events', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public devstateCommandCommandNameDelete(commandName: string, observe: any = 'body', reportProgress: boolean = false, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable { + if (commandName === null || commandName === undefined) { + throw new Error('Required parameter commandName was null or undefined when calling devstateCommandCommandNameDelete.'); + } + + let localVarHeaders = this.defaultHeaders; + + let localVarHttpHeaderAcceptSelected: string | undefined = options && options.httpHeaderAccept; + if (localVarHttpHeaderAcceptSelected === undefined) { + // to determine the Accept header + const httpHeaderAccepts: string[] = [ + 'application/json' + ]; + localVarHttpHeaderAcceptSelected = this.configuration.selectHeaderAccept(httpHeaderAccepts); + } + if (localVarHttpHeaderAcceptSelected !== undefined) { + localVarHeaders = localVarHeaders.set('Accept', localVarHttpHeaderAcceptSelected); + } + + let localVarHttpContext: HttpContext | undefined = options && options.context; + if (localVarHttpContext === undefined) { + localVarHttpContext = new HttpContext(); + } + + + let responseType_: 'text' | 'json' | 'blob' = 'json'; + if (localVarHttpHeaderAcceptSelected) { + if (localVarHttpHeaderAcceptSelected.startsWith('text')) { + responseType_ = 'text'; + } else if (this.configuration.isJsonMime(localVarHttpHeaderAcceptSelected)) { + responseType_ = 'json'; + } else { + responseType_ = 'blob'; + } + } + + let localVarPath = `/devstate/command/${this.configuration.encodeParam({name: "commandName", value: commandName, in: "path", style: "simple", explode: false, dataType: "string", dataFormat: undefined})}`; + return this.httpClient.request('delete', `${this.configuration.basePath}${localVarPath}`, + { + context: localVarHttpContext, + responseType: responseType_, + withCredentials: this.configuration.withCredentials, + headers: localVarHeaders, + observe: observe, + reportProgress: reportProgress + } + ); + } + + /** + * Move a command + * @param commandName Command name to move + * @param devstateCommandCommandNameMovePostRequest + * @param observe set whether or not to return the data Observable as the body, response or events. defaults to returning the body. + * @param reportProgress flag to report request and response progress. + */ + public devstateCommandCommandNameMovePost(commandName: string, devstateCommandCommandNameMovePostRequest?: DevstateCommandCommandNameMovePostRequest, observe?: 'body', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable; + public devstateCommandCommandNameMovePost(commandName: string, devstateCommandCommandNameMovePostRequest?: DevstateCommandCommandNameMovePostRequest, observe?: 'response', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public devstateCommandCommandNameMovePost(commandName: string, devstateCommandCommandNameMovePostRequest?: DevstateCommandCommandNameMovePostRequest, observe?: 'events', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public devstateCommandCommandNameMovePost(commandName: string, devstateCommandCommandNameMovePostRequest?: DevstateCommandCommandNameMovePostRequest, observe: any = 'body', reportProgress: boolean = false, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable { + if (commandName === null || commandName === undefined) { + throw new Error('Required parameter commandName was null or undefined when calling devstateCommandCommandNameMovePost.'); + } + + let localVarHeaders = this.defaultHeaders; + + let localVarHttpHeaderAcceptSelected: string | undefined = options && options.httpHeaderAccept; + if (localVarHttpHeaderAcceptSelected === undefined) { + // to determine the Accept header + const httpHeaderAccepts: string[] = [ + 'application/json' + ]; + localVarHttpHeaderAcceptSelected = this.configuration.selectHeaderAccept(httpHeaderAccepts); + } + if (localVarHttpHeaderAcceptSelected !== undefined) { + localVarHeaders = localVarHeaders.set('Accept', localVarHttpHeaderAcceptSelected); + } + + let localVarHttpContext: HttpContext | undefined = options && options.context; + if (localVarHttpContext === undefined) { + localVarHttpContext = new HttpContext(); + } + + + // to determine the Content-Type header + const consumes: string[] = [ + 'application/json' + ]; + const httpContentTypeSelected: string | undefined = this.configuration.selectHeaderContentType(consumes); + if (httpContentTypeSelected !== undefined) { + localVarHeaders = localVarHeaders.set('Content-Type', httpContentTypeSelected); + } + + let responseType_: 'text' | 'json' | 'blob' = 'json'; + if (localVarHttpHeaderAcceptSelected) { + if (localVarHttpHeaderAcceptSelected.startsWith('text')) { + responseType_ = 'text'; + } else if (this.configuration.isJsonMime(localVarHttpHeaderAcceptSelected)) { + responseType_ = 'json'; + } else { + responseType_ = 'blob'; + } + } + + let localVarPath = `/devstate/command/${this.configuration.encodeParam({name: "commandName", value: commandName, in: "path", style: "simple", explode: false, dataType: "string", dataFormat: undefined})}/move`; + return this.httpClient.request('post', `${this.configuration.basePath}${localVarPath}`, + { + context: localVarHttpContext, + body: devstateCommandCommandNameMovePostRequest, + responseType: responseType_, + withCredentials: this.configuration.withCredentials, + headers: localVarHeaders, + observe: observe, + reportProgress: reportProgress + } + ); + } + + /** + * Set a command as default command for a group + * @param commandName Command name to set as default + * @param devstateCommandCommandNameSetDefaultPostRequest + * @param observe set whether or not to return the data Observable as the body, response or events. defaults to returning the body. + * @param reportProgress flag to report request and response progress. + */ + public devstateCommandCommandNameSetDefaultPost(commandName: string, devstateCommandCommandNameSetDefaultPostRequest?: DevstateCommandCommandNameSetDefaultPostRequest, observe?: 'body', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable; + public devstateCommandCommandNameSetDefaultPost(commandName: string, devstateCommandCommandNameSetDefaultPostRequest?: DevstateCommandCommandNameSetDefaultPostRequest, observe?: 'response', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public devstateCommandCommandNameSetDefaultPost(commandName: string, devstateCommandCommandNameSetDefaultPostRequest?: DevstateCommandCommandNameSetDefaultPostRequest, observe?: 'events', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public devstateCommandCommandNameSetDefaultPost(commandName: string, devstateCommandCommandNameSetDefaultPostRequest?: DevstateCommandCommandNameSetDefaultPostRequest, observe: any = 'body', reportProgress: boolean = false, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable { + if (commandName === null || commandName === undefined) { + throw new Error('Required parameter commandName was null or undefined when calling devstateCommandCommandNameSetDefaultPost.'); + } + + let localVarHeaders = this.defaultHeaders; + + let localVarHttpHeaderAcceptSelected: string | undefined = options && options.httpHeaderAccept; + if (localVarHttpHeaderAcceptSelected === undefined) { + // to determine the Accept header + const httpHeaderAccepts: string[] = [ + 'application/json' + ]; + localVarHttpHeaderAcceptSelected = this.configuration.selectHeaderAccept(httpHeaderAccepts); + } + if (localVarHttpHeaderAcceptSelected !== undefined) { + localVarHeaders = localVarHeaders.set('Accept', localVarHttpHeaderAcceptSelected); + } + + let localVarHttpContext: HttpContext | undefined = options && options.context; + if (localVarHttpContext === undefined) { + localVarHttpContext = new HttpContext(); + } + + + // to determine the Content-Type header + const consumes: string[] = [ + 'application/json' + ]; + const httpContentTypeSelected: string | undefined = this.configuration.selectHeaderContentType(consumes); + if (httpContentTypeSelected !== undefined) { + localVarHeaders = localVarHeaders.set('Content-Type', httpContentTypeSelected); + } + + let responseType_: 'text' | 'json' | 'blob' = 'json'; + if (localVarHttpHeaderAcceptSelected) { + if (localVarHttpHeaderAcceptSelected.startsWith('text')) { + responseType_ = 'text'; + } else if (this.configuration.isJsonMime(localVarHttpHeaderAcceptSelected)) { + responseType_ = 'json'; + } else { + responseType_ = 'blob'; + } + } + + let localVarPath = `/devstate/command/${this.configuration.encodeParam({name: "commandName", value: commandName, in: "path", style: "simple", explode: false, dataType: "string", dataFormat: undefined})}/setDefault`; + return this.httpClient.request('post', `${this.configuration.basePath}${localVarPath}`, + { + context: localVarHttpContext, + body: devstateCommandCommandNameSetDefaultPostRequest, + responseType: responseType_, + withCredentials: this.configuration.withCredentials, + headers: localVarHeaders, + observe: observe, + reportProgress: reportProgress + } + ); + } + + /** + * Unset a command as default command for its group + * @param commandName Command name to set as default + * @param observe set whether or not to return the data Observable as the body, response or events. defaults to returning the body. + * @param reportProgress flag to report request and response progress. + */ + public devstateCommandCommandNameUnsetDefaultPost(commandName: string, observe?: 'body', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable; + public devstateCommandCommandNameUnsetDefaultPost(commandName: string, observe?: 'response', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public devstateCommandCommandNameUnsetDefaultPost(commandName: string, observe?: 'events', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public devstateCommandCommandNameUnsetDefaultPost(commandName: string, observe: any = 'body', reportProgress: boolean = false, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable { + if (commandName === null || commandName === undefined) { + throw new Error('Required parameter commandName was null or undefined when calling devstateCommandCommandNameUnsetDefaultPost.'); + } + + let localVarHeaders = this.defaultHeaders; + + let localVarHttpHeaderAcceptSelected: string | undefined = options && options.httpHeaderAccept; + if (localVarHttpHeaderAcceptSelected === undefined) { + // to determine the Accept header + const httpHeaderAccepts: string[] = [ + 'application/json' + ]; + localVarHttpHeaderAcceptSelected = this.configuration.selectHeaderAccept(httpHeaderAccepts); + } + if (localVarHttpHeaderAcceptSelected !== undefined) { + localVarHeaders = localVarHeaders.set('Accept', localVarHttpHeaderAcceptSelected); + } + + let localVarHttpContext: HttpContext | undefined = options && options.context; + if (localVarHttpContext === undefined) { + localVarHttpContext = new HttpContext(); + } + + + let responseType_: 'text' | 'json' | 'blob' = 'json'; + if (localVarHttpHeaderAcceptSelected) { + if (localVarHttpHeaderAcceptSelected.startsWith('text')) { + responseType_ = 'text'; + } else if (this.configuration.isJsonMime(localVarHttpHeaderAcceptSelected)) { + responseType_ = 'json'; + } else { + responseType_ = 'blob'; + } + } + + let localVarPath = `/devstate/command/${this.configuration.encodeParam({name: "commandName", value: commandName, in: "path", style: "simple", explode: false, dataType: "string", dataFormat: undefined})}/unsetDefault`; + return this.httpClient.request('post', `${this.configuration.basePath}${localVarPath}`, + { + context: localVarHttpContext, + responseType: responseType_, + withCredentials: this.configuration.withCredentials, + headers: localVarHeaders, + observe: observe, + reportProgress: reportProgress + } + ); + } + + /** + * Add a new Composite Command to the Devfile + * @param devstateCompositeCommandPostRequest + * @param observe set whether or not to return the data Observable as the body, response or events. defaults to returning the body. + * @param reportProgress flag to report request and response progress. + */ + public devstateCompositeCommandPost(devstateCompositeCommandPostRequest?: DevstateCompositeCommandPostRequest, observe?: 'body', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable; + public devstateCompositeCommandPost(devstateCompositeCommandPostRequest?: DevstateCompositeCommandPostRequest, observe?: 'response', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public devstateCompositeCommandPost(devstateCompositeCommandPostRequest?: DevstateCompositeCommandPostRequest, observe?: 'events', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public devstateCompositeCommandPost(devstateCompositeCommandPostRequest?: DevstateCompositeCommandPostRequest, observe: any = 'body', reportProgress: boolean = false, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable { + + let localVarHeaders = this.defaultHeaders; + + let localVarHttpHeaderAcceptSelected: string | undefined = options && options.httpHeaderAccept; + if (localVarHttpHeaderAcceptSelected === undefined) { + // to determine the Accept header + const httpHeaderAccepts: string[] = [ + 'application/json' + ]; + localVarHttpHeaderAcceptSelected = this.configuration.selectHeaderAccept(httpHeaderAccepts); + } + if (localVarHttpHeaderAcceptSelected !== undefined) { + localVarHeaders = localVarHeaders.set('Accept', localVarHttpHeaderAcceptSelected); + } + + let localVarHttpContext: HttpContext | undefined = options && options.context; + if (localVarHttpContext === undefined) { + localVarHttpContext = new HttpContext(); + } + + + // to determine the Content-Type header + const consumes: string[] = [ + 'application/json' + ]; + const httpContentTypeSelected: string | undefined = this.configuration.selectHeaderContentType(consumes); + if (httpContentTypeSelected !== undefined) { + localVarHeaders = localVarHeaders.set('Content-Type', httpContentTypeSelected); + } + + let responseType_: 'text' | 'json' | 'blob' = 'json'; + if (localVarHttpHeaderAcceptSelected) { + if (localVarHttpHeaderAcceptSelected.startsWith('text')) { + responseType_ = 'text'; + } else if (this.configuration.isJsonMime(localVarHttpHeaderAcceptSelected)) { + responseType_ = 'json'; + } else { + responseType_ = 'blob'; + } + } + + let localVarPath = `/devstate/compositeCommand`; + return this.httpClient.request('post', `${this.configuration.basePath}${localVarPath}`, + { + context: localVarHttpContext, + body: devstateCompositeCommandPostRequest, + responseType: responseType_, + withCredentials: this.configuration.withCredentials, + headers: localVarHeaders, + observe: observe, + reportProgress: reportProgress + } + ); + } + + /** + * Delete a container from the Devfile + * @param containerName Container name to delete + * @param observe set whether or not to return the data Observable as the body, response or events. defaults to returning the body. + * @param reportProgress flag to report request and response progress. + */ + public devstateContainerContainerNameDelete(containerName: string, observe?: 'body', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable; + public devstateContainerContainerNameDelete(containerName: string, observe?: 'response', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public devstateContainerContainerNameDelete(containerName: string, observe?: 'events', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public devstateContainerContainerNameDelete(containerName: string, observe: any = 'body', reportProgress: boolean = false, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable { + if (containerName === null || containerName === undefined) { + throw new Error('Required parameter containerName was null or undefined when calling devstateContainerContainerNameDelete.'); + } + + let localVarHeaders = this.defaultHeaders; + + let localVarHttpHeaderAcceptSelected: string | undefined = options && options.httpHeaderAccept; + if (localVarHttpHeaderAcceptSelected === undefined) { + // to determine the Accept header + const httpHeaderAccepts: string[] = [ + 'application/json' + ]; + localVarHttpHeaderAcceptSelected = this.configuration.selectHeaderAccept(httpHeaderAccepts); + } + if (localVarHttpHeaderAcceptSelected !== undefined) { + localVarHeaders = localVarHeaders.set('Accept', localVarHttpHeaderAcceptSelected); + } + + let localVarHttpContext: HttpContext | undefined = options && options.context; + if (localVarHttpContext === undefined) { + localVarHttpContext = new HttpContext(); + } + + + let responseType_: 'text' | 'json' | 'blob' = 'json'; + if (localVarHttpHeaderAcceptSelected) { + if (localVarHttpHeaderAcceptSelected.startsWith('text')) { + responseType_ = 'text'; + } else if (this.configuration.isJsonMime(localVarHttpHeaderAcceptSelected)) { + responseType_ = 'json'; + } else { + responseType_ = 'blob'; + } + } + + let localVarPath = `/devstate/container/${this.configuration.encodeParam({name: "containerName", value: containerName, in: "path", style: "simple", explode: false, dataType: "string", dataFormat: undefined})}`; + return this.httpClient.request('delete', `${this.configuration.basePath}${localVarPath}`, + { + context: localVarHttpContext, + responseType: responseType_, + withCredentials: this.configuration.withCredentials, + headers: localVarHeaders, + observe: observe, + reportProgress: reportProgress + } + ); + } + + /** + * Add a new container to the Devfile + * @param devstateContainerPostRequest + * @param observe set whether or not to return the data Observable as the body, response or events. defaults to returning the body. + * @param reportProgress flag to report request and response progress. + */ + public devstateContainerPost(devstateContainerPostRequest?: DevstateContainerPostRequest, observe?: 'body', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable; + public devstateContainerPost(devstateContainerPostRequest?: DevstateContainerPostRequest, observe?: 'response', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public devstateContainerPost(devstateContainerPostRequest?: DevstateContainerPostRequest, observe?: 'events', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public devstateContainerPost(devstateContainerPostRequest?: DevstateContainerPostRequest, observe: any = 'body', reportProgress: boolean = false, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable { + + let localVarHeaders = this.defaultHeaders; + + let localVarHttpHeaderAcceptSelected: string | undefined = options && options.httpHeaderAccept; + if (localVarHttpHeaderAcceptSelected === undefined) { + // to determine the Accept header + const httpHeaderAccepts: string[] = [ + 'application/json' + ]; + localVarHttpHeaderAcceptSelected = this.configuration.selectHeaderAccept(httpHeaderAccepts); + } + if (localVarHttpHeaderAcceptSelected !== undefined) { + localVarHeaders = localVarHeaders.set('Accept', localVarHttpHeaderAcceptSelected); + } + + let localVarHttpContext: HttpContext | undefined = options && options.context; + if (localVarHttpContext === undefined) { + localVarHttpContext = new HttpContext(); + } + + + // to determine the Content-Type header + const consumes: string[] = [ + 'application/json' + ]; + const httpContentTypeSelected: string | undefined = this.configuration.selectHeaderContentType(consumes); + if (httpContentTypeSelected !== undefined) { + localVarHeaders = localVarHeaders.set('Content-Type', httpContentTypeSelected); + } + + let responseType_: 'text' | 'json' | 'blob' = 'json'; + if (localVarHttpHeaderAcceptSelected) { + if (localVarHttpHeaderAcceptSelected.startsWith('text')) { + responseType_ = 'text'; + } else if (this.configuration.isJsonMime(localVarHttpHeaderAcceptSelected)) { + responseType_ = 'json'; + } else { + responseType_ = 'blob'; + } + } + + let localVarPath = `/devstate/container`; + return this.httpClient.request('post', `${this.configuration.basePath}${localVarPath}`, + { + context: localVarHttpContext, + body: devstateContainerPostRequest, + responseType: responseType_, + withCredentials: this.configuration.withCredentials, + headers: localVarHeaders, + observe: observe, + reportProgress: reportProgress + } + ); + } + + /** + * Clear the Devfile content + * @param observe set whether or not to return the data Observable as the body, response or events. defaults to returning the body. + * @param reportProgress flag to report request and response progress. + */ + public devstateDevfileDelete(observe?: 'body', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable; + public devstateDevfileDelete(observe?: 'response', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public devstateDevfileDelete(observe?: 'events', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public devstateDevfileDelete(observe: any = 'body', reportProgress: boolean = false, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable { + + let localVarHeaders = this.defaultHeaders; + + let localVarHttpHeaderAcceptSelected: string | undefined = options && options.httpHeaderAccept; + if (localVarHttpHeaderAcceptSelected === undefined) { + // to determine the Accept header + const httpHeaderAccepts: string[] = [ + 'application/json' + ]; + localVarHttpHeaderAcceptSelected = this.configuration.selectHeaderAccept(httpHeaderAccepts); + } + if (localVarHttpHeaderAcceptSelected !== undefined) { + localVarHeaders = localVarHeaders.set('Accept', localVarHttpHeaderAcceptSelected); + } + + let localVarHttpContext: HttpContext | undefined = options && options.context; + if (localVarHttpContext === undefined) { + localVarHttpContext = new HttpContext(); + } + + + let responseType_: 'text' | 'json' | 'blob' = 'json'; + if (localVarHttpHeaderAcceptSelected) { + if (localVarHttpHeaderAcceptSelected.startsWith('text')) { + responseType_ = 'text'; + } else if (this.configuration.isJsonMime(localVarHttpHeaderAcceptSelected)) { + responseType_ = 'json'; + } else { + responseType_ = 'blob'; + } + } + + let localVarPath = `/devstate/devfile`; + return this.httpClient.request('delete', `${this.configuration.basePath}${localVarPath}`, + { + context: localVarHttpContext, + responseType: responseType_, + withCredentials: this.configuration.withCredentials, + headers: localVarHeaders, + observe: observe, + reportProgress: reportProgress + } + ); + } + + /** + * Get the complete Devfile content + * @param observe set whether or not to return the data Observable as the body, response or events. defaults to returning the body. + * @param reportProgress flag to report request and response progress. + */ + public devstateDevfileGet(observe?: 'body', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable; + public devstateDevfileGet(observe?: 'response', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public devstateDevfileGet(observe?: 'events', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public devstateDevfileGet(observe: any = 'body', reportProgress: boolean = false, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable { + + let localVarHeaders = this.defaultHeaders; + + let localVarHttpHeaderAcceptSelected: string | undefined = options && options.httpHeaderAccept; + if (localVarHttpHeaderAcceptSelected === undefined) { + // to determine the Accept header + const httpHeaderAccepts: string[] = [ + 'application/json' + ]; + localVarHttpHeaderAcceptSelected = this.configuration.selectHeaderAccept(httpHeaderAccepts); + } + if (localVarHttpHeaderAcceptSelected !== undefined) { + localVarHeaders = localVarHeaders.set('Accept', localVarHttpHeaderAcceptSelected); + } + + let localVarHttpContext: HttpContext | undefined = options && options.context; + if (localVarHttpContext === undefined) { + localVarHttpContext = new HttpContext(); + } + + + let responseType_: 'text' | 'json' | 'blob' = 'json'; + if (localVarHttpHeaderAcceptSelected) { + if (localVarHttpHeaderAcceptSelected.startsWith('text')) { + responseType_ = 'text'; + } else if (this.configuration.isJsonMime(localVarHttpHeaderAcceptSelected)) { + responseType_ = 'json'; + } else { + responseType_ = 'blob'; + } + } + + let localVarPath = `/devstate/devfile`; + return this.httpClient.request('get', `${this.configuration.basePath}${localVarPath}`, + { + context: localVarHttpContext, + responseType: responseType_, + withCredentials: this.configuration.withCredentials, + headers: localVarHeaders, + observe: observe, + reportProgress: reportProgress + } + ); + } + + /** + * Updates the complete Devfile content + * @param devstateDevfilePutRequest + * @param observe set whether or not to return the data Observable as the body, response or events. defaults to returning the body. + * @param reportProgress flag to report request and response progress. + */ + public devstateDevfilePut(devstateDevfilePutRequest?: DevstateDevfilePutRequest, observe?: 'body', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable; + public devstateDevfilePut(devstateDevfilePutRequest?: DevstateDevfilePutRequest, observe?: 'response', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public devstateDevfilePut(devstateDevfilePutRequest?: DevstateDevfilePutRequest, observe?: 'events', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public devstateDevfilePut(devstateDevfilePutRequest?: DevstateDevfilePutRequest, observe: any = 'body', reportProgress: boolean = false, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable { + + let localVarHeaders = this.defaultHeaders; + + let localVarHttpHeaderAcceptSelected: string | undefined = options && options.httpHeaderAccept; + if (localVarHttpHeaderAcceptSelected === undefined) { + // to determine the Accept header + const httpHeaderAccepts: string[] = [ + 'application/json' + ]; + localVarHttpHeaderAcceptSelected = this.configuration.selectHeaderAccept(httpHeaderAccepts); + } + if (localVarHttpHeaderAcceptSelected !== undefined) { + localVarHeaders = localVarHeaders.set('Accept', localVarHttpHeaderAcceptSelected); + } + + let localVarHttpContext: HttpContext | undefined = options && options.context; + if (localVarHttpContext === undefined) { + localVarHttpContext = new HttpContext(); + } + + + // to determine the Content-Type header + const consumes: string[] = [ + 'application/json' + ]; + const httpContentTypeSelected: string | undefined = this.configuration.selectHeaderContentType(consumes); + if (httpContentTypeSelected !== undefined) { + localVarHeaders = localVarHeaders.set('Content-Type', httpContentTypeSelected); + } + + let responseType_: 'text' | 'json' | 'blob' = 'json'; + if (localVarHttpHeaderAcceptSelected) { + if (localVarHttpHeaderAcceptSelected.startsWith('text')) { + responseType_ = 'text'; + } else if (this.configuration.isJsonMime(localVarHttpHeaderAcceptSelected)) { + responseType_ = 'json'; + } else { + responseType_ = 'blob'; + } + } + + let localVarPath = `/devstate/devfile`; + return this.httpClient.request('put', `${this.configuration.basePath}${localVarPath}`, + { + context: localVarHttpContext, + body: devstateDevfilePutRequest, + responseType: responseType_, + withCredentials: this.configuration.withCredentials, + headers: localVarHeaders, + observe: observe, + reportProgress: reportProgress + } + ); + } + + /** + * Updates the Events for the Devfile + * @param devstateEventsPutRequest + * @param observe set whether or not to return the data Observable as the body, response or events. defaults to returning the body. + * @param reportProgress flag to report request and response progress. + */ + public devstateEventsPut(devstateEventsPutRequest?: DevstateEventsPutRequest, observe?: 'body', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable; + public devstateEventsPut(devstateEventsPutRequest?: DevstateEventsPutRequest, observe?: 'response', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public devstateEventsPut(devstateEventsPutRequest?: DevstateEventsPutRequest, observe?: 'events', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public devstateEventsPut(devstateEventsPutRequest?: DevstateEventsPutRequest, observe: any = 'body', reportProgress: boolean = false, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable { + + let localVarHeaders = this.defaultHeaders; + + let localVarHttpHeaderAcceptSelected: string | undefined = options && options.httpHeaderAccept; + if (localVarHttpHeaderAcceptSelected === undefined) { + // to determine the Accept header + const httpHeaderAccepts: string[] = [ + 'application/json' + ]; + localVarHttpHeaderAcceptSelected = this.configuration.selectHeaderAccept(httpHeaderAccepts); + } + if (localVarHttpHeaderAcceptSelected !== undefined) { + localVarHeaders = localVarHeaders.set('Accept', localVarHttpHeaderAcceptSelected); + } + + let localVarHttpContext: HttpContext | undefined = options && options.context; + if (localVarHttpContext === undefined) { + localVarHttpContext = new HttpContext(); + } + + + // to determine the Content-Type header + const consumes: string[] = [ + 'application/json' + ]; + const httpContentTypeSelected: string | undefined = this.configuration.selectHeaderContentType(consumes); + if (httpContentTypeSelected !== undefined) { + localVarHeaders = localVarHeaders.set('Content-Type', httpContentTypeSelected); + } + + let responseType_: 'text' | 'json' | 'blob' = 'json'; + if (localVarHttpHeaderAcceptSelected) { + if (localVarHttpHeaderAcceptSelected.startsWith('text')) { + responseType_ = 'text'; + } else if (this.configuration.isJsonMime(localVarHttpHeaderAcceptSelected)) { + responseType_ = 'json'; + } else { + responseType_ = 'blob'; + } + } + + let localVarPath = `/devstate/events`; + return this.httpClient.request('put', `${this.configuration.basePath}${localVarPath}`, + { + context: localVarHttpContext, + body: devstateEventsPutRequest, + responseType: responseType_, + withCredentials: this.configuration.withCredentials, + headers: localVarHeaders, + observe: observe, + reportProgress: reportProgress + } + ); + } + + /** + * Add a new Exec Command to the Devfile + * @param devstateExecCommandPostRequest + * @param observe set whether or not to return the data Observable as the body, response or events. defaults to returning the body. + * @param reportProgress flag to report request and response progress. + */ + public devstateExecCommandPost(devstateExecCommandPostRequest?: DevstateExecCommandPostRequest, observe?: 'body', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable; + public devstateExecCommandPost(devstateExecCommandPostRequest?: DevstateExecCommandPostRequest, observe?: 'response', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public devstateExecCommandPost(devstateExecCommandPostRequest?: DevstateExecCommandPostRequest, observe?: 'events', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public devstateExecCommandPost(devstateExecCommandPostRequest?: DevstateExecCommandPostRequest, observe: any = 'body', reportProgress: boolean = false, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable { + + let localVarHeaders = this.defaultHeaders; + + let localVarHttpHeaderAcceptSelected: string | undefined = options && options.httpHeaderAccept; + if (localVarHttpHeaderAcceptSelected === undefined) { + // to determine the Accept header + const httpHeaderAccepts: string[] = [ + 'application/json' + ]; + localVarHttpHeaderAcceptSelected = this.configuration.selectHeaderAccept(httpHeaderAccepts); + } + if (localVarHttpHeaderAcceptSelected !== undefined) { + localVarHeaders = localVarHeaders.set('Accept', localVarHttpHeaderAcceptSelected); + } + + let localVarHttpContext: HttpContext | undefined = options && options.context; + if (localVarHttpContext === undefined) { + localVarHttpContext = new HttpContext(); + } + + + // to determine the Content-Type header + const consumes: string[] = [ + 'application/json' + ]; + const httpContentTypeSelected: string | undefined = this.configuration.selectHeaderContentType(consumes); + if (httpContentTypeSelected !== undefined) { + localVarHeaders = localVarHeaders.set('Content-Type', httpContentTypeSelected); + } + + let responseType_: 'text' | 'json' | 'blob' = 'json'; + if (localVarHttpHeaderAcceptSelected) { + if (localVarHttpHeaderAcceptSelected.startsWith('text')) { + responseType_ = 'text'; + } else if (this.configuration.isJsonMime(localVarHttpHeaderAcceptSelected)) { + responseType_ = 'json'; + } else { + responseType_ = 'blob'; + } + } + + let localVarPath = `/devstate/execCommand`; + return this.httpClient.request('post', `${this.configuration.basePath}${localVarPath}`, + { + context: localVarHttpContext, + body: devstateExecCommandPostRequest, + responseType: responseType_, + withCredentials: this.configuration.withCredentials, + headers: localVarHeaders, + observe: observe, + reportProgress: reportProgress + } + ); + } + + /** + * Delete an image from the Devfile + * @param imageName Image name to delete + * @param observe set whether or not to return the data Observable as the body, response or events. defaults to returning the body. + * @param reportProgress flag to report request and response progress. + */ + public devstateImageImageNameDelete(imageName: string, observe?: 'body', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable; + public devstateImageImageNameDelete(imageName: string, observe?: 'response', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public devstateImageImageNameDelete(imageName: string, observe?: 'events', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public devstateImageImageNameDelete(imageName: string, observe: any = 'body', reportProgress: boolean = false, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable { + if (imageName === null || imageName === undefined) { + throw new Error('Required parameter imageName was null or undefined when calling devstateImageImageNameDelete.'); + } + + let localVarHeaders = this.defaultHeaders; + + let localVarHttpHeaderAcceptSelected: string | undefined = options && options.httpHeaderAccept; + if (localVarHttpHeaderAcceptSelected === undefined) { + // to determine the Accept header + const httpHeaderAccepts: string[] = [ + 'application/json' + ]; + localVarHttpHeaderAcceptSelected = this.configuration.selectHeaderAccept(httpHeaderAccepts); + } + if (localVarHttpHeaderAcceptSelected !== undefined) { + localVarHeaders = localVarHeaders.set('Accept', localVarHttpHeaderAcceptSelected); + } + + let localVarHttpContext: HttpContext | undefined = options && options.context; + if (localVarHttpContext === undefined) { + localVarHttpContext = new HttpContext(); + } + + + let responseType_: 'text' | 'json' | 'blob' = 'json'; + if (localVarHttpHeaderAcceptSelected) { + if (localVarHttpHeaderAcceptSelected.startsWith('text')) { + responseType_ = 'text'; + } else if (this.configuration.isJsonMime(localVarHttpHeaderAcceptSelected)) { + responseType_ = 'json'; + } else { + responseType_ = 'blob'; + } + } + + let localVarPath = `/devstate/image/${this.configuration.encodeParam({name: "imageName", value: imageName, in: "path", style: "simple", explode: false, dataType: "string", dataFormat: undefined})}`; + return this.httpClient.request('delete', `${this.configuration.basePath}${localVarPath}`, + { + context: localVarHttpContext, + responseType: responseType_, + withCredentials: this.configuration.withCredentials, + headers: localVarHeaders, + observe: observe, + reportProgress: reportProgress + } + ); + } + + /** + * Add a new image to the Devfile + * @param devstateImagePostRequest + * @param observe set whether or not to return the data Observable as the body, response or events. defaults to returning the body. + * @param reportProgress flag to report request and response progress. + */ + public devstateImagePost(devstateImagePostRequest?: DevstateImagePostRequest, observe?: 'body', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable; + public devstateImagePost(devstateImagePostRequest?: DevstateImagePostRequest, observe?: 'response', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public devstateImagePost(devstateImagePostRequest?: DevstateImagePostRequest, observe?: 'events', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public devstateImagePost(devstateImagePostRequest?: DevstateImagePostRequest, observe: any = 'body', reportProgress: boolean = false, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable { + + let localVarHeaders = this.defaultHeaders; + + let localVarHttpHeaderAcceptSelected: string | undefined = options && options.httpHeaderAccept; + if (localVarHttpHeaderAcceptSelected === undefined) { + // to determine the Accept header + const httpHeaderAccepts: string[] = [ + 'application/json' + ]; + localVarHttpHeaderAcceptSelected = this.configuration.selectHeaderAccept(httpHeaderAccepts); + } + if (localVarHttpHeaderAcceptSelected !== undefined) { + localVarHeaders = localVarHeaders.set('Accept', localVarHttpHeaderAcceptSelected); + } + + let localVarHttpContext: HttpContext | undefined = options && options.context; + if (localVarHttpContext === undefined) { + localVarHttpContext = new HttpContext(); + } + + + // to determine the Content-Type header + const consumes: string[] = [ + 'application/json' + ]; + const httpContentTypeSelected: string | undefined = this.configuration.selectHeaderContentType(consumes); + if (httpContentTypeSelected !== undefined) { + localVarHeaders = localVarHeaders.set('Content-Type', httpContentTypeSelected); + } + + let responseType_: 'text' | 'json' | 'blob' = 'json'; + if (localVarHttpHeaderAcceptSelected) { + if (localVarHttpHeaderAcceptSelected.startsWith('text')) { + responseType_ = 'text'; + } else if (this.configuration.isJsonMime(localVarHttpHeaderAcceptSelected)) { + responseType_ = 'json'; + } else { + responseType_ = 'blob'; + } + } + + let localVarPath = `/devstate/image`; + return this.httpClient.request('post', `${this.configuration.basePath}${localVarPath}`, + { + context: localVarHttpContext, + body: devstateImagePostRequest, + responseType: responseType_, + withCredentials: this.configuration.withCredentials, + headers: localVarHeaders, + observe: observe, + reportProgress: reportProgress + } + ); + } + + /** + * Updates the metadata for the Devfile + * @param metadata + * @param observe set whether or not to return the data Observable as the body, response or events. defaults to returning the body. + * @param reportProgress flag to report request and response progress. + */ + public devstateMetadataPut(metadata?: Metadata, observe?: 'body', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable; + public devstateMetadataPut(metadata?: Metadata, observe?: 'response', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public devstateMetadataPut(metadata?: Metadata, observe?: 'events', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public devstateMetadataPut(metadata?: Metadata, observe: any = 'body', reportProgress: boolean = false, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable { + + let localVarHeaders = this.defaultHeaders; + + let localVarHttpHeaderAcceptSelected: string | undefined = options && options.httpHeaderAccept; + if (localVarHttpHeaderAcceptSelected === undefined) { + // to determine the Accept header + const httpHeaderAccepts: string[] = [ + 'application/json' + ]; + localVarHttpHeaderAcceptSelected = this.configuration.selectHeaderAccept(httpHeaderAccepts); + } + if (localVarHttpHeaderAcceptSelected !== undefined) { + localVarHeaders = localVarHeaders.set('Accept', localVarHttpHeaderAcceptSelected); + } + + let localVarHttpContext: HttpContext | undefined = options && options.context; + if (localVarHttpContext === undefined) { + localVarHttpContext = new HttpContext(); + } + + + // to determine the Content-Type header + const consumes: string[] = [ + 'application/json' + ]; + const httpContentTypeSelected: string | undefined = this.configuration.selectHeaderContentType(consumes); + if (httpContentTypeSelected !== undefined) { + localVarHeaders = localVarHeaders.set('Content-Type', httpContentTypeSelected); + } + + let responseType_: 'text' | 'json' | 'blob' = 'json'; + if (localVarHttpHeaderAcceptSelected) { + if (localVarHttpHeaderAcceptSelected.startsWith('text')) { + responseType_ = 'text'; + } else if (this.configuration.isJsonMime(localVarHttpHeaderAcceptSelected)) { + responseType_ = 'json'; + } else { + responseType_ = 'blob'; + } + } + + let localVarPath = `/devstate/metadata`; + return this.httpClient.request('put', `${this.configuration.basePath}${localVarPath}`, + { + context: localVarHttpContext, + body: metadata, + responseType: responseType_, + withCredentials: this.configuration.withCredentials, + headers: localVarHeaders, + observe: observe, + reportProgress: reportProgress + } + ); + } + + /** + * Check if a quantity is valid + * @param devstateQuantityValidPostRequest + * @param observe set whether or not to return the data Observable as the body, response or events. defaults to returning the body. + * @param reportProgress flag to report request and response progress. + */ + public devstateQuantityValidPost(devstateQuantityValidPostRequest?: DevstateQuantityValidPostRequest, observe?: 'body', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable; + public devstateQuantityValidPost(devstateQuantityValidPostRequest?: DevstateQuantityValidPostRequest, observe?: 'response', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public devstateQuantityValidPost(devstateQuantityValidPostRequest?: DevstateQuantityValidPostRequest, observe?: 'events', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public devstateQuantityValidPost(devstateQuantityValidPostRequest?: DevstateQuantityValidPostRequest, observe: any = 'body', reportProgress: boolean = false, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable { + + let localVarHeaders = this.defaultHeaders; + + let localVarHttpHeaderAcceptSelected: string | undefined = options && options.httpHeaderAccept; + if (localVarHttpHeaderAcceptSelected === undefined) { + // to determine the Accept header + const httpHeaderAccepts: string[] = [ + 'application/json' + ]; + localVarHttpHeaderAcceptSelected = this.configuration.selectHeaderAccept(httpHeaderAccepts); + } + if (localVarHttpHeaderAcceptSelected !== undefined) { + localVarHeaders = localVarHeaders.set('Accept', localVarHttpHeaderAcceptSelected); + } + + let localVarHttpContext: HttpContext | undefined = options && options.context; + if (localVarHttpContext === undefined) { + localVarHttpContext = new HttpContext(); + } + + + // to determine the Content-Type header + const consumes: string[] = [ + 'application/json' + ]; + const httpContentTypeSelected: string | undefined = this.configuration.selectHeaderContentType(consumes); + if (httpContentTypeSelected !== undefined) { + localVarHeaders = localVarHeaders.set('Content-Type', httpContentTypeSelected); + } + + let responseType_: 'text' | 'json' | 'blob' = 'json'; + if (localVarHttpHeaderAcceptSelected) { + if (localVarHttpHeaderAcceptSelected.startsWith('text')) { + responseType_ = 'text'; + } else if (this.configuration.isJsonMime(localVarHttpHeaderAcceptSelected)) { + responseType_ = 'json'; + } else { + responseType_ = 'blob'; + } + } + + let localVarPath = `/devstate/quantityValid`; + return this.httpClient.request('post', `${this.configuration.basePath}${localVarPath}`, + { + context: localVarHttpContext, + body: devstateQuantityValidPostRequest, + responseType: responseType_, + withCredentials: this.configuration.withCredentials, + headers: localVarHeaders, + observe: observe, + reportProgress: reportProgress + } + ); + } + + /** + * Add a new Kubernetes resource to the Devfile + * @param devstateResourcePostRequest + * @param observe set whether or not to return the data Observable as the body, response or events. defaults to returning the body. + * @param reportProgress flag to report request and response progress. + */ + public devstateResourcePost(devstateResourcePostRequest?: DevstateResourcePostRequest, observe?: 'body', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable; + public devstateResourcePost(devstateResourcePostRequest?: DevstateResourcePostRequest, observe?: 'response', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public devstateResourcePost(devstateResourcePostRequest?: DevstateResourcePostRequest, observe?: 'events', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public devstateResourcePost(devstateResourcePostRequest?: DevstateResourcePostRequest, observe: any = 'body', reportProgress: boolean = false, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable { + + let localVarHeaders = this.defaultHeaders; + + let localVarHttpHeaderAcceptSelected: string | undefined = options && options.httpHeaderAccept; + if (localVarHttpHeaderAcceptSelected === undefined) { + // to determine the Accept header + const httpHeaderAccepts: string[] = [ + 'application/json' + ]; + localVarHttpHeaderAcceptSelected = this.configuration.selectHeaderAccept(httpHeaderAccepts); + } + if (localVarHttpHeaderAcceptSelected !== undefined) { + localVarHeaders = localVarHeaders.set('Accept', localVarHttpHeaderAcceptSelected); + } + + let localVarHttpContext: HttpContext | undefined = options && options.context; + if (localVarHttpContext === undefined) { + localVarHttpContext = new HttpContext(); + } + + + // to determine the Content-Type header + const consumes: string[] = [ + 'application/json' + ]; + const httpContentTypeSelected: string | undefined = this.configuration.selectHeaderContentType(consumes); + if (httpContentTypeSelected !== undefined) { + localVarHeaders = localVarHeaders.set('Content-Type', httpContentTypeSelected); + } + + let responseType_: 'text' | 'json' | 'blob' = 'json'; + if (localVarHttpHeaderAcceptSelected) { + if (localVarHttpHeaderAcceptSelected.startsWith('text')) { + responseType_ = 'text'; + } else if (this.configuration.isJsonMime(localVarHttpHeaderAcceptSelected)) { + responseType_ = 'json'; + } else { + responseType_ = 'blob'; + } + } + + let localVarPath = `/devstate/resource`; + return this.httpClient.request('post', `${this.configuration.basePath}${localVarPath}`, + { + context: localVarHttpContext, + body: devstateResourcePostRequest, + responseType: responseType_, + withCredentials: this.configuration.withCredentials, + headers: localVarHeaders, + observe: observe, + reportProgress: reportProgress + } + ); + } + + /** + * Delete a resource from the Devfile + * @param resourceName Resource name to delete + * @param observe set whether or not to return the data Observable as the body, response or events. defaults to returning the body. + * @param reportProgress flag to report request and response progress. + */ + public devstateResourceResourceNameDelete(resourceName: string, observe?: 'body', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable; + public devstateResourceResourceNameDelete(resourceName: string, observe?: 'response', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public devstateResourceResourceNameDelete(resourceName: string, observe?: 'events', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public devstateResourceResourceNameDelete(resourceName: string, observe: any = 'body', reportProgress: boolean = false, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable { + if (resourceName === null || resourceName === undefined) { + throw new Error('Required parameter resourceName was null or undefined when calling devstateResourceResourceNameDelete.'); + } + + let localVarHeaders = this.defaultHeaders; + + let localVarHttpHeaderAcceptSelected: string | undefined = options && options.httpHeaderAccept; + if (localVarHttpHeaderAcceptSelected === undefined) { + // to determine the Accept header + const httpHeaderAccepts: string[] = [ + 'application/json' + ]; + localVarHttpHeaderAcceptSelected = this.configuration.selectHeaderAccept(httpHeaderAccepts); + } + if (localVarHttpHeaderAcceptSelected !== undefined) { + localVarHeaders = localVarHeaders.set('Accept', localVarHttpHeaderAcceptSelected); + } + + let localVarHttpContext: HttpContext | undefined = options && options.context; + if (localVarHttpContext === undefined) { + localVarHttpContext = new HttpContext(); + } + + + let responseType_: 'text' | 'json' | 'blob' = 'json'; + if (localVarHttpHeaderAcceptSelected) { + if (localVarHttpHeaderAcceptSelected.startsWith('text')) { + responseType_ = 'text'; + } else if (this.configuration.isJsonMime(localVarHttpHeaderAcceptSelected)) { + responseType_ = 'json'; + } else { + responseType_ = 'blob'; + } + } + + let localVarPath = `/devstate/resource/${this.configuration.encodeParam({name: "resourceName", value: resourceName, in: "path", style: "simple", explode: false, dataType: "string", dataFormat: undefined})}`; + return this.httpClient.request('delete', `${this.configuration.basePath}${localVarPath}`, + { + context: localVarHttpContext, + responseType: responseType_, + withCredentials: this.configuration.withCredentials, + headers: localVarHeaders, + observe: observe, + reportProgress: reportProgress + } + ); + } + + /** + * Stop this \'odo dev\' instance + * @param observe set whether or not to return the data Observable as the body, response or events. defaults to returning the body. + * @param reportProgress flag to report request and response progress. + */ + public instanceDelete(observe?: 'body', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable; + public instanceDelete(observe?: 'response', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public instanceDelete(observe?: 'events', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public instanceDelete(observe: any = 'body', reportProgress: boolean = false, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable { + + let localVarHeaders = this.defaultHeaders; + + let localVarHttpHeaderAcceptSelected: string | undefined = options && options.httpHeaderAccept; + if (localVarHttpHeaderAcceptSelected === undefined) { + // to determine the Accept header + const httpHeaderAccepts: string[] = [ + 'application/json' + ]; + localVarHttpHeaderAcceptSelected = this.configuration.selectHeaderAccept(httpHeaderAccepts); + } + if (localVarHttpHeaderAcceptSelected !== undefined) { + localVarHeaders = localVarHeaders.set('Accept', localVarHttpHeaderAcceptSelected); + } + + let localVarHttpContext: HttpContext | undefined = options && options.context; + if (localVarHttpContext === undefined) { + localVarHttpContext = new HttpContext(); + } + + + let responseType_: 'text' | 'json' | 'blob' = 'json'; + if (localVarHttpHeaderAcceptSelected) { + if (localVarHttpHeaderAcceptSelected.startsWith('text')) { + responseType_ = 'text'; + } else if (this.configuration.isJsonMime(localVarHttpHeaderAcceptSelected)) { + responseType_ = 'json'; + } else { + responseType_ = 'blob'; + } + } + + let localVarPath = `/instance`; + return this.httpClient.request('delete', `${this.configuration.basePath}${localVarPath}`, + { + context: localVarHttpContext, + responseType: responseType_, + withCredentials: this.configuration.withCredentials, + headers: localVarHeaders, + observe: observe, + reportProgress: reportProgress + } + ); + } + + /** + * Get information about the this \'odo dev\' instance. + * @param observe set whether or not to return the data Observable as the body, response or events. defaults to returning the body. + * @param reportProgress flag to report request and response progress. + */ + public instanceGet(observe?: 'body', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable; + public instanceGet(observe?: 'response', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public instanceGet(observe?: 'events', reportProgress?: boolean, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable>; + public instanceGet(observe: any = 'body', reportProgress: boolean = false, options?: {httpHeaderAccept?: 'application/json', context?: HttpContext}): Observable { + + let localVarHeaders = this.defaultHeaders; + + let localVarHttpHeaderAcceptSelected: string | undefined = options && options.httpHeaderAccept; + if (localVarHttpHeaderAcceptSelected === undefined) { + // to determine the Accept header + const httpHeaderAccepts: string[] = [ + 'application/json' + ]; + localVarHttpHeaderAcceptSelected = this.configuration.selectHeaderAccept(httpHeaderAccepts); + } + if (localVarHttpHeaderAcceptSelected !== undefined) { + localVarHeaders = localVarHeaders.set('Accept', localVarHttpHeaderAcceptSelected); + } + + let localVarHttpContext: HttpContext | undefined = options && options.context; + if (localVarHttpContext === undefined) { + localVarHttpContext = new HttpContext(); + } + + + let responseType_: 'text' | 'json' | 'blob' = 'json'; + if (localVarHttpHeaderAcceptSelected) { + if (localVarHttpHeaderAcceptSelected.startsWith('text')) { + responseType_ = 'text'; + } else if (this.configuration.isJsonMime(localVarHttpHeaderAcceptSelected)) { + responseType_ = 'json'; + } else { + responseType_ = 'blob'; + } + } + + let localVarPath = `/instance`; + return this.httpClient.request('get', `${this.configuration.basePath}${localVarPath}`, + { + context: localVarHttpContext, + responseType: responseType_, + withCredentials: this.configuration.withCredentials, + headers: localVarHeaders, + observe: observe, + reportProgress: reportProgress + } + ); + } + +} diff --git a/ui/src/app/api-gen/configuration.ts b/ui/src/app/api-gen/configuration.ts new file mode 100644 index 00000000000..526b454fb2b --- /dev/null +++ b/ui/src/app/api-gen/configuration.ts @@ -0,0 +1,166 @@ +import { HttpParameterCodec } from '@angular/common/http'; +import { Param } from './param'; + +export interface ConfigurationParameters { + /** + * @deprecated Since 5.0. Use credentials instead + */ + apiKeys?: {[ key: string ]: string}; + username?: string; + password?: string; + /** + * @deprecated Since 5.0. Use credentials instead + */ + accessToken?: string | (() => string); + basePath?: string; + withCredentials?: boolean; + /** + * Takes care of encoding query- and form-parameters. + */ + encoder?: HttpParameterCodec; + /** + * Override the default method for encoding path parameters in various + * styles. + *

+ * See {@link README.md} for more details + *

+ */ + encodeParam?: (param: Param) => string; + /** + * The keys are the names in the securitySchemes section of the OpenAPI + * document. They should map to the value used for authentication + * minus any standard prefixes such as 'Basic' or 'Bearer'. + */ + credentials?: {[ key: string ]: string | (() => string | undefined)}; +} + +export class Configuration { + /** + * @deprecated Since 5.0. Use credentials instead + */ + apiKeys?: {[ key: string ]: string}; + username?: string; + password?: string; + /** + * @deprecated Since 5.0. Use credentials instead + */ + accessToken?: string | (() => string); + basePath?: string; + withCredentials?: boolean; + /** + * Takes care of encoding query- and form-parameters. + */ + encoder?: HttpParameterCodec; + /** + * Encoding of various path parameter + * styles. + *

+ * See {@link README.md} for more details + *

+ */ + encodeParam: (param: Param) => string; + /** + * The keys are the names in the securitySchemes section of the OpenAPI + * document. They should map to the value used for authentication + * minus any standard prefixes such as 'Basic' or 'Bearer'. + */ + credentials: {[ key: string ]: string | (() => string | undefined)}; + + constructor(configurationParameters: ConfigurationParameters = {}) { + this.apiKeys = configurationParameters.apiKeys; + this.username = configurationParameters.username; + this.password = configurationParameters.password; + this.accessToken = configurationParameters.accessToken; + this.basePath = configurationParameters.basePath; + this.withCredentials = configurationParameters.withCredentials; + this.encoder = configurationParameters.encoder; + if (configurationParameters.encodeParam) { + this.encodeParam = configurationParameters.encodeParam; + } + else { + this.encodeParam = param => this.defaultEncodeParam(param); + } + if (configurationParameters.credentials) { + this.credentials = configurationParameters.credentials; + } + else { + this.credentials = {}; + } + } + + /** + * Select the correct content-type to use for a request. + * Uses {@link Configuration#isJsonMime} to determine the correct content-type. + * If no content type is found return the first found type if the contentTypes is not empty + * @param contentTypes - the array of content types that are available for selection + * @returns the selected content-type or undefined if no selection could be made. + */ + public selectHeaderContentType (contentTypes: string[]): string | undefined { + if (contentTypes.length === 0) { + return undefined; + } + + const type = contentTypes.find((x: string) => this.isJsonMime(x)); + if (type === undefined) { + return contentTypes[0]; + } + return type; + } + + /** + * Select the correct accept content-type to use for a request. + * Uses {@link Configuration#isJsonMime} to determine the correct accept content-type. + * If no content type is found return the first found type if the contentTypes is not empty + * @param accepts - the array of content types that are available for selection. + * @returns the selected content-type or undefined if no selection could be made. + */ + public selectHeaderAccept(accepts: string[]): string | undefined { + if (accepts.length === 0) { + return undefined; + } + + const type = accepts.find((x: string) => this.isJsonMime(x)); + if (type === undefined) { + return accepts[0]; + } + return type; + } + + /** + * Check if the given MIME is a JSON MIME. + * JSON MIME examples: + * application/json + * application/json; charset=UTF8 + * APPLICATION/JSON + * application/vnd.company+json + * @param mime - MIME (Multipurpose Internet Mail Extensions) + * @return True if the given MIME is JSON, false otherwise. + */ + public isJsonMime(mime: string): boolean { + const jsonMime: RegExp = new RegExp('^(application\/json|[^;/ \t]+\/[^;/ \t]+[+]json)[ \t]*(;.*)?$', 'i'); + return mime !== null && (jsonMime.test(mime) || mime.toLowerCase() === 'application/json-patch+json'); + } + + public lookupCredential(key: string): string | undefined { + const value = this.credentials[key]; + return typeof value === 'function' + ? value() + : value; + } + + private defaultEncodeParam(param: Param): string { + // This implementation exists as fallback for missing configuration + // and for backwards compatibility to older typescript-angular generator versions. + // It only works for the 'simple' parameter style. + // Date-handling only works for the 'date-time' format. + // All other styles and Date-formats are probably handled incorrectly. + // + // But: if that's all you need (i.e.: the most common use-case): no need for customization! + + const value = param.dataFormat === 'date-time' && param.value instanceof Date + ? (param.value as Date).toISOString() + : param.value; + + return encodeURIComponent(String(value)); + } +} diff --git a/ui/src/app/api-gen/encoder.ts b/ui/src/app/api-gen/encoder.ts new file mode 100644 index 00000000000..138c4d5cf2c --- /dev/null +++ b/ui/src/app/api-gen/encoder.ts @@ -0,0 +1,20 @@ +import { HttpParameterCodec } from '@angular/common/http'; + +/** + * Custom HttpParameterCodec + * Workaround for https://github.com/angular/angular/issues/18261 + */ +export class CustomHttpParameterCodec implements HttpParameterCodec { + encodeKey(k: string): string { + return encodeURIComponent(k); + } + encodeValue(v: string): string { + return encodeURIComponent(v); + } + decodeKey(k: string): string { + return decodeURIComponent(k); + } + decodeValue(v: string): string { + return decodeURIComponent(v); + } +} diff --git a/ui/src/app/api-gen/git_push.sh b/ui/src/app/api-gen/git_push.sh new file mode 100644 index 00000000000..f53a75d4fab --- /dev/null +++ b/ui/src/app/api-gen/git_push.sh @@ -0,0 +1,57 @@ +#!/bin/sh +# ref: https://help.github.com/articles/adding-an-existing-project-to-github-using-the-command-line/ +# +# Usage example: /bin/sh ./git_push.sh wing328 openapi-petstore-perl "minor update" "gitlab.com" + +git_user_id=$1 +git_repo_id=$2 +release_note=$3 +git_host=$4 + +if [ "$git_host" = "" ]; then + git_host="github.com" + echo "[INFO] No command line input provided. Set \$git_host to $git_host" +fi + +if [ "$git_user_id" = "" ]; then + git_user_id="GIT_USER_ID" + echo "[INFO] No command line input provided. Set \$git_user_id to $git_user_id" +fi + +if [ "$git_repo_id" = "" ]; then + git_repo_id="GIT_REPO_ID" + echo "[INFO] No command line input provided. Set \$git_repo_id to $git_repo_id" +fi + +if [ "$release_note" = "" ]; then + release_note="Minor update" + echo "[INFO] No command line input provided. Set \$release_note to $release_note" +fi + +# Initialize the local directory as a Git repository +git init + +# Adds the files in the local repository and stages them for commit. +git add . + +# Commits the tracked changes and prepares them to be pushed to a remote repository. +git commit -m "$release_note" + +# Sets the new remote +git_remote=$(git remote) +if [ "$git_remote" = "" ]; then # git remote not defined + + if [ "$GIT_TOKEN" = "" ]; then + echo "[INFO] \$GIT_TOKEN (environment variable) is not set. Using the git credential in your environment." + git remote add origin https://${git_host}/${git_user_id}/${git_repo_id}.git + else + git remote add origin https://${git_user_id}:"${GIT_TOKEN}"@${git_host}/${git_user_id}/${git_repo_id}.git + fi + +fi + +git pull origin master + +# Pushes (Forces) the changes in the local repository up to the remote repository +echo "Git pushing to https://${git_host}/${git_user_id}/${git_repo_id}.git" +git push origin master 2>&1 | grep -v 'To https' diff --git a/ui/src/app/api-gen/index.ts b/ui/src/app/api-gen/index.ts new file mode 100644 index 00000000000..104dd3d21e3 --- /dev/null +++ b/ui/src/app/api-gen/index.ts @@ -0,0 +1,6 @@ +export * from './api/api'; +export * from './model/models'; +export * from './variables'; +export * from './configuration'; +export * from './api.module'; +export * from './param'; diff --git a/ui/src/app/api-gen/model/applyCommand.ts b/ui/src/app/api-gen/model/applyCommand.ts new file mode 100644 index 00000000000..343e26e26aa --- /dev/null +++ b/ui/src/app/api-gen/model/applyCommand.ts @@ -0,0 +1,17 @@ +/** + * odo dev + * API interface for \'odo dev\' + * + * The version of the OpenAPI document: 0.1 + * + * + * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * https://openapi-generator.tech + * Do not edit the class manually. + */ + + +export interface ApplyCommand { + component: string; +} + diff --git a/ui/src/app/api-gen/model/command.ts b/ui/src/app/api-gen/model/command.ts new file mode 100644 index 00000000000..a36b64176fe --- /dev/null +++ b/ui/src/app/api-gen/model/command.ts @@ -0,0 +1,28 @@ +/** + * odo dev + * API interface for \'odo dev\' + * + * The version of the OpenAPI document: 0.1 + * + * + * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * https://openapi-generator.tech + * Do not edit the class manually. + */ +import { ExecCommand } from './execCommand'; +import { ImageCommand } from './imageCommand'; +import { ApplyCommand } from './applyCommand'; +import { CompositeCommand } from './compositeCommand'; + + +export interface Command { + name: string; + group: string; + _default?: boolean; + type: string; + exec?: ExecCommand; + apply?: ApplyCommand; + image?: ImageCommand; + composite?: CompositeCommand; +} + diff --git a/ui/src/app/api-gen/model/componentCommandPostRequest.ts b/ui/src/app/api-gen/model/componentCommandPostRequest.ts new file mode 100644 index 00000000000..eba865f140f --- /dev/null +++ b/ui/src/app/api-gen/model/componentCommandPostRequest.ts @@ -0,0 +1,27 @@ +/** + * odo dev + * API interface for \'odo dev\' + * + * The version of the OpenAPI document: 0.1 + * + * + * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * https://openapi-generator.tech + * Do not edit the class manually. + */ + + +export interface ComponentCommandPostRequest { + /** + * Name of the command that should be executed + */ + name?: ComponentCommandPostRequest.NameEnum; +} +export namespace ComponentCommandPostRequest { + export type NameEnum = 'push'; + export const NameEnum = { + Push: 'push' as NameEnum + }; +} + + diff --git a/ui/src/app/api-gen/model/componentGet200Response.ts b/ui/src/app/api-gen/model/componentGet200Response.ts new file mode 100644 index 00000000000..0223b93792e --- /dev/null +++ b/ui/src/app/api-gen/model/componentGet200Response.ts @@ -0,0 +1,20 @@ +/** + * odo dev + * API interface for \'odo dev\' + * + * The version of the OpenAPI document: 0.1 + * + * + * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * https://openapi-generator.tech + * Do not edit the class manually. + */ + + +export interface ComponentGet200Response { + /** + * Description of the component. This is the same as output of \'odo describe component -o json\' + */ + component?: object; +} + diff --git a/ui/src/app/api-gen/model/compositeCommand.ts b/ui/src/app/api-gen/model/compositeCommand.ts new file mode 100644 index 00000000000..58cef0d281d --- /dev/null +++ b/ui/src/app/api-gen/model/compositeCommand.ts @@ -0,0 +1,18 @@ +/** + * odo dev + * API interface for \'odo dev\' + * + * The version of the OpenAPI document: 0.1 + * + * + * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * https://openapi-generator.tech + * Do not edit the class manually. + */ + + +export interface CompositeCommand { + commands: Array; + parallel: boolean; +} + diff --git a/ui/src/app/api-gen/model/container.ts b/ui/src/app/api-gen/model/container.ts new file mode 100644 index 00000000000..790d0c05314 --- /dev/null +++ b/ui/src/app/api-gen/model/container.ts @@ -0,0 +1,24 @@ +/** + * odo dev + * API interface for \'odo dev\' + * + * The version of the OpenAPI document: 0.1 + * + * + * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * https://openapi-generator.tech + * Do not edit the class manually. + */ + + +export interface Container { + name: string; + image: string; + command: Array; + args: Array; + memoryRequest: string; + memoryLimit: string; + cpuRequest: string; + cpuLimit: string; +} + diff --git a/ui/src/app/api-gen/model/devfileContent.ts b/ui/src/app/api-gen/model/devfileContent.ts new file mode 100644 index 00000000000..2ea6ed84694 --- /dev/null +++ b/ui/src/app/api-gen/model/devfileContent.ts @@ -0,0 +1,29 @@ +/** + * odo dev + * API interface for \'odo dev\' + * + * The version of the OpenAPI document: 0.1 + * + * + * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * https://openapi-generator.tech + * Do not edit the class manually. + */ +import { Container } from './container'; +import { Command } from './command'; +import { Events } from './events'; +import { Metadata } from './metadata'; +import { Resource } from './resource'; +import { Image } from './image'; + + +export interface DevfileContent { + content: string; + commands: Array; + containers: Array; + images: Array; + resources: Array; + events: Events; + metadata: Metadata; +} + diff --git a/ui/src/app/api-gen/model/devstateApplyCommandPostRequest.ts b/ui/src/app/api-gen/model/devstateApplyCommandPostRequest.ts new file mode 100644 index 00000000000..6cb29564c1c --- /dev/null +++ b/ui/src/app/api-gen/model/devstateApplyCommandPostRequest.ts @@ -0,0 +1,21 @@ +/** + * odo dev + * API interface for \'odo dev\' + * + * The version of the OpenAPI document: 0.1 + * + * + * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * https://openapi-generator.tech + * Do not edit the class manually. + */ + + +export interface DevstateApplyCommandPostRequest { + /** + * Name of the command + */ + name?: string; + component?: string; +} + diff --git a/ui/src/app/api-gen/model/devstateChartGet200Response.ts b/ui/src/app/api-gen/model/devstateChartGet200Response.ts new file mode 100644 index 00000000000..202961de10e --- /dev/null +++ b/ui/src/app/api-gen/model/devstateChartGet200Response.ts @@ -0,0 +1,20 @@ +/** + * odo dev + * API interface for \'odo dev\' + * + * The version of the OpenAPI document: 0.1 + * + * + * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * https://openapi-generator.tech + * Do not edit the class manually. + */ + + +export interface DevstateChartGet200Response { + /** + * chart in mermaid format + */ + chart: string; +} + diff --git a/ui/src/app/api-gen/model/devstateCommandCommandNameMovePostRequest.ts b/ui/src/app/api-gen/model/devstateCommandCommandNameMovePostRequest.ts new file mode 100644 index 00000000000..6292adb2cab --- /dev/null +++ b/ui/src/app/api-gen/model/devstateCommandCommandNameMovePostRequest.ts @@ -0,0 +1,32 @@ +/** + * odo dev + * API interface for \'odo dev\' + * + * The version of the OpenAPI document: 0.1 + * + * + * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * https://openapi-generator.tech + * Do not edit the class manually. + */ + + +export interface DevstateCommandCommandNameMovePostRequest { + /** + * Initial group of the command + */ + fromGroup?: string; + /** + * Initial index of the command in the group + */ + fromIndex?: number; + /** + * Target group of the command + */ + toGroup?: string; + /** + * Target index of the command in the group + */ + toIndex?: number; +} + diff --git a/ui/src/app/api-gen/model/devstateCommandCommandNameSetDefaultPostRequest.ts b/ui/src/app/api-gen/model/devstateCommandCommandNameSetDefaultPostRequest.ts new file mode 100644 index 00000000000..618dc4c6663 --- /dev/null +++ b/ui/src/app/api-gen/model/devstateCommandCommandNameSetDefaultPostRequest.ts @@ -0,0 +1,20 @@ +/** + * odo dev + * API interface for \'odo dev\' + * + * The version of the OpenAPI document: 0.1 + * + * + * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * https://openapi-generator.tech + * Do not edit the class manually. + */ + + +export interface DevstateCommandCommandNameSetDefaultPostRequest { + /** + * group of the command + */ + group?: string; +} + diff --git a/ui/src/app/api-gen/model/devstateCompositeCommandPostRequest.ts b/ui/src/app/api-gen/model/devstateCompositeCommandPostRequest.ts new file mode 100644 index 00000000000..27a5ee47eab --- /dev/null +++ b/ui/src/app/api-gen/model/devstateCompositeCommandPostRequest.ts @@ -0,0 +1,22 @@ +/** + * odo dev + * API interface for \'odo dev\' + * + * The version of the OpenAPI document: 0.1 + * + * + * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * https://openapi-generator.tech + * Do not edit the class manually. + */ + + +export interface DevstateCompositeCommandPostRequest { + /** + * Name of the command + */ + name?: string; + parallel?: boolean; + commands?: Array; +} + diff --git a/ui/src/app/api-gen/model/devstateContainerPostRequest.ts b/ui/src/app/api-gen/model/devstateContainerPostRequest.ts new file mode 100644 index 00000000000..5a8636ed3f6 --- /dev/null +++ b/ui/src/app/api-gen/model/devstateContainerPostRequest.ts @@ -0,0 +1,48 @@ +/** + * odo dev + * API interface for \'odo dev\' + * + * The version of the OpenAPI document: 0.1 + * + * + * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * https://openapi-generator.tech + * Do not edit the class manually. + */ + + +export interface DevstateContainerPostRequest { + /** + * Name of the container + */ + name?: string; + /** + * Container image + */ + image?: string; + /** + * Entrypoint of the container + */ + command?: Array; + /** + * Args passed to the Container entrypoint + */ + args?: Array; + /** + * Requested memory for the deployed container + */ + memReq?: string; + /** + * Memory limit for the deployed container + */ + memLimit?: string; + /** + * Requested CPU for the deployed container + */ + cpuReq?: string; + /** + * CPU limit for the deployed container + */ + cpuLimit?: string; +} + diff --git a/ui/src/app/api-gen/model/devstateDevfilePutRequest.ts b/ui/src/app/api-gen/model/devstateDevfilePutRequest.ts new file mode 100644 index 00000000000..ac38745e35e --- /dev/null +++ b/ui/src/app/api-gen/model/devstateDevfilePutRequest.ts @@ -0,0 +1,17 @@ +/** + * odo dev + * API interface for \'odo dev\' + * + * The version of the OpenAPI document: 0.1 + * + * + * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * https://openapi-generator.tech + * Do not edit the class manually. + */ + + +export interface DevstateDevfilePutRequest { + content: string; +} + diff --git a/ui/src/app/api-gen/model/devstateEventsPutRequest.ts b/ui/src/app/api-gen/model/devstateEventsPutRequest.ts new file mode 100644 index 00000000000..72545493aea --- /dev/null +++ b/ui/src/app/api-gen/model/devstateEventsPutRequest.ts @@ -0,0 +1,28 @@ +/** + * odo dev + * API interface for \'odo dev\' + * + * The version of the OpenAPI document: 0.1 + * + * + * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * https://openapi-generator.tech + * Do not edit the class manually. + */ + + +export interface DevstateEventsPutRequest { + eventName?: DevstateEventsPutRequest.EventNameEnum; + commands?: Array; +} +export namespace DevstateEventsPutRequest { + export type EventNameEnum = 'postStart' | 'postStop' | 'preStart' | 'preStop'; + export const EventNameEnum = { + PostStart: 'postStart' as EventNameEnum, + PostStop: 'postStop' as EventNameEnum, + PreStart: 'preStart' as EventNameEnum, + PreStop: 'preStop' as EventNameEnum + }; +} + + diff --git a/ui/src/app/api-gen/model/devstateExecCommandPostRequest.ts b/ui/src/app/api-gen/model/devstateExecCommandPostRequest.ts new file mode 100644 index 00000000000..1105f193890 --- /dev/null +++ b/ui/src/app/api-gen/model/devstateExecCommandPostRequest.ts @@ -0,0 +1,24 @@ +/** + * odo dev + * API interface for \'odo dev\' + * + * The version of the OpenAPI document: 0.1 + * + * + * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * https://openapi-generator.tech + * Do not edit the class manually. + */ + + +export interface DevstateExecCommandPostRequest { + /** + * Name of the command + */ + name?: string; + component?: string; + commandLine?: string; + workingDir?: string; + hotReloadCapable?: boolean; +} + diff --git a/ui/src/app/api-gen/model/devstateImagePostRequest.ts b/ui/src/app/api-gen/model/devstateImagePostRequest.ts new file mode 100644 index 00000000000..157b01564ca --- /dev/null +++ b/ui/src/app/api-gen/model/devstateImagePostRequest.ts @@ -0,0 +1,25 @@ +/** + * odo dev + * API interface for \'odo dev\' + * + * The version of the OpenAPI document: 0.1 + * + * + * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * https://openapi-generator.tech + * Do not edit the class manually. + */ + + +export interface DevstateImagePostRequest { + /** + * Name of the image + */ + name?: string; + imageName?: string; + args?: Array; + buildContext?: string; + rootRequired?: boolean; + uri?: string; +} + diff --git a/ui/src/app/api-gen/model/devstateMetadataPutRequest.ts b/ui/src/app/api-gen/model/devstateMetadataPutRequest.ts new file mode 100644 index 00000000000..6f4f88f64a3 --- /dev/null +++ b/ui/src/app/api-gen/model/devstateMetadataPutRequest.ts @@ -0,0 +1,29 @@ +/** + * odo dev + * API interface for \'odo dev\' + * + * The version of the OpenAPI document: 0.1 + * + * + * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * https://openapi-generator.tech + * Do not edit the class manually. + */ + + +export interface DevstateMetadataPutRequest { + name?: string; + version?: string; + displayName?: string; + description?: string; + tags?: string; + architectures?: string; + icon?: string; + globalMemoryLimit?: string; + projectType?: string; + language?: string; + website?: string; + provider?: string; + supportUrl?: string; +} + diff --git a/ui/src/app/api-gen/model/devstateQuantityValidPostRequest.ts b/ui/src/app/api-gen/model/devstateQuantityValidPostRequest.ts new file mode 100644 index 00000000000..7bbcf32bfff --- /dev/null +++ b/ui/src/app/api-gen/model/devstateQuantityValidPostRequest.ts @@ -0,0 +1,17 @@ +/** + * odo dev + * API interface for \'odo dev\' + * + * The version of the OpenAPI document: 0.1 + * + * + * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * https://openapi-generator.tech + * Do not edit the class manually. + */ + + +export interface DevstateQuantityValidPostRequest { + quantity: string; +} + diff --git a/ui/src/app/api-gen/model/devstateResourcePostRequest.ts b/ui/src/app/api-gen/model/devstateResourcePostRequest.ts new file mode 100644 index 00000000000..d7dc49d4d27 --- /dev/null +++ b/ui/src/app/api-gen/model/devstateResourcePostRequest.ts @@ -0,0 +1,22 @@ +/** + * odo dev + * API interface for \'odo dev\' + * + * The version of the OpenAPI document: 0.1 + * + * + * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * https://openapi-generator.tech + * Do not edit the class manually. + */ + + +export interface DevstateResourcePostRequest { + /** + * Name of the resource + */ + name?: string; + inlined?: string; + uri?: string; +} + diff --git a/ui/src/app/api-gen/model/events.ts b/ui/src/app/api-gen/model/events.ts new file mode 100644 index 00000000000..aaf01f5a276 --- /dev/null +++ b/ui/src/app/api-gen/model/events.ts @@ -0,0 +1,20 @@ +/** + * odo dev + * API interface for \'odo dev\' + * + * The version of the OpenAPI document: 0.1 + * + * + * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * https://openapi-generator.tech + * Do not edit the class manually. + */ + + +export interface Events { + preStart?: Array; + postStart?: Array; + preStop?: Array; + postStop?: Array; +} + diff --git a/ui/src/app/api-gen/model/execCommand.ts b/ui/src/app/api-gen/model/execCommand.ts new file mode 100644 index 00000000000..f395b1dc937 --- /dev/null +++ b/ui/src/app/api-gen/model/execCommand.ts @@ -0,0 +1,20 @@ +/** + * odo dev + * API interface for \'odo dev\' + * + * The version of the OpenAPI document: 0.1 + * + * + * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * https://openapi-generator.tech + * Do not edit the class manually. + */ + + +export interface ExecCommand { + component: string; + commandLine: string; + workingDir: string; + hotReloadCapable: boolean; +} + diff --git a/ui/src/app/api-gen/model/generalError.ts b/ui/src/app/api-gen/model/generalError.ts new file mode 100644 index 00000000000..32d2fdc9d0b --- /dev/null +++ b/ui/src/app/api-gen/model/generalError.ts @@ -0,0 +1,17 @@ +/** + * odo dev + * API interface for \'odo dev\' + * + * The version of the OpenAPI document: 0.1 + * + * + * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * https://openapi-generator.tech + * Do not edit the class manually. + */ + + +export interface GeneralError { + message?: string; +} + diff --git a/ui/src/app/api-gen/model/generalSuccess.ts b/ui/src/app/api-gen/model/generalSuccess.ts new file mode 100644 index 00000000000..565ad445e34 --- /dev/null +++ b/ui/src/app/api-gen/model/generalSuccess.ts @@ -0,0 +1,17 @@ +/** + * odo dev + * API interface for \'odo dev\' + * + * The version of the OpenAPI document: 0.1 + * + * + * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * https://openapi-generator.tech + * Do not edit the class manually. + */ + + +export interface GeneralSuccess { + message?: string; +} + diff --git a/ui/src/app/api-gen/model/image.ts b/ui/src/app/api-gen/model/image.ts new file mode 100644 index 00000000000..0c67494f007 --- /dev/null +++ b/ui/src/app/api-gen/model/image.ts @@ -0,0 +1,22 @@ +/** + * odo dev + * API interface for \'odo dev\' + * + * The version of the OpenAPI document: 0.1 + * + * + * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * https://openapi-generator.tech + * Do not edit the class manually. + */ + + +export interface Image { + name: string; + imageName: string; + args: Array; + buildContext: string; + rootRequired: boolean; + uri: string; +} + diff --git a/ui/src/app/api-gen/model/imageCommand.ts b/ui/src/app/api-gen/model/imageCommand.ts new file mode 100644 index 00000000000..394873d628e --- /dev/null +++ b/ui/src/app/api-gen/model/imageCommand.ts @@ -0,0 +1,17 @@ +/** + * odo dev + * API interface for \'odo dev\' + * + * The version of the OpenAPI document: 0.1 + * + * + * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * https://openapi-generator.tech + * Do not edit the class manually. + */ + + +export interface ImageCommand { + component: string; +} + diff --git a/ui/src/app/api-gen/model/instanceGet200Response.ts b/ui/src/app/api-gen/model/instanceGet200Response.ts new file mode 100644 index 00000000000..8dc0e26824b --- /dev/null +++ b/ui/src/app/api-gen/model/instanceGet200Response.ts @@ -0,0 +1,24 @@ +/** + * odo dev + * API interface for \'odo dev\' + * + * The version of the OpenAPI document: 0.1 + * + * + * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * https://openapi-generator.tech + * Do not edit the class manually. + */ + + +export interface InstanceGet200Response { + /** + * Directory on which this \'odo dev\' instance is running + */ + componentDirectory?: string; + /** + * PID of the this \'odo dev\' instance. + */ + pid?: number; +} + diff --git a/ui/src/app/api-gen/model/metadata.ts b/ui/src/app/api-gen/model/metadata.ts new file mode 100644 index 00000000000..f14bddd8727 --- /dev/null +++ b/ui/src/app/api-gen/model/metadata.ts @@ -0,0 +1,29 @@ +/** + * odo dev + * API interface for \'odo dev\' + * + * The version of the OpenAPI document: 0.1 + * + * + * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * https://openapi-generator.tech + * Do not edit the class manually. + */ + + +export interface Metadata { + name?: string; + version?: string; + displayName?: string; + description?: string; + tags?: string; + architectures?: string; + icon?: string; + globalMemoryLimit?: string; + projectType?: string; + language?: string; + website?: string; + provider?: string; + supportUrl?: string; +} + diff --git a/ui/src/app/api-gen/model/models.ts b/ui/src/app/api-gen/model/models.ts new file mode 100644 index 00000000000..3134e20e218 --- /dev/null +++ b/ui/src/app/api-gen/model/models.ts @@ -0,0 +1,28 @@ +export * from './applyCommand'; +export * from './command'; +export * from './componentCommandPostRequest'; +export * from './componentGet200Response'; +export * from './compositeCommand'; +export * from './container'; +export * from './devfileContent'; +export * from './devstateApplyCommandPostRequest'; +export * from './devstateChartGet200Response'; +export * from './devstateCommandCommandNameMovePostRequest'; +export * from './devstateCommandCommandNameSetDefaultPostRequest'; +export * from './devstateCompositeCommandPostRequest'; +export * from './devstateContainerPostRequest'; +export * from './devstateDevfilePutRequest'; +export * from './devstateEventsPutRequest'; +export * from './devstateExecCommandPostRequest'; +export * from './devstateImagePostRequest'; +export * from './devstateQuantityValidPostRequest'; +export * from './devstateResourcePostRequest'; +export * from './events'; +export * from './execCommand'; +export * from './generalError'; +export * from './generalSuccess'; +export * from './image'; +export * from './imageCommand'; +export * from './instanceGet200Response'; +export * from './metadata'; +export * from './resource'; diff --git a/ui/src/app/api-gen/model/resource.ts b/ui/src/app/api-gen/model/resource.ts new file mode 100644 index 00000000000..90dd3cd5927 --- /dev/null +++ b/ui/src/app/api-gen/model/resource.ts @@ -0,0 +1,19 @@ +/** + * odo dev + * API interface for \'odo dev\' + * + * The version of the OpenAPI document: 0.1 + * + * + * NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * https://openapi-generator.tech + * Do not edit the class manually. + */ + + +export interface Resource { + name: string; + inlined?: string; + uri?: string; +} + diff --git a/ui/src/app/api-gen/param.ts b/ui/src/app/api-gen/param.ts new file mode 100644 index 00000000000..78a2d20a643 --- /dev/null +++ b/ui/src/app/api-gen/param.ts @@ -0,0 +1,69 @@ +/** + * Standard parameter styles defined by OpenAPI spec + */ +export type StandardParamStyle = + | 'matrix' + | 'label' + | 'form' + | 'simple' + | 'spaceDelimited' + | 'pipeDelimited' + | 'deepObject' + ; + +/** + * The OpenAPI standard {@link StandardParamStyle}s may be extended by custom styles by the user. + */ +export type ParamStyle = StandardParamStyle | string; + +/** + * Standard parameter locations defined by OpenAPI spec + */ +export type ParamLocation = 'query' | 'header' | 'path' | 'cookie'; + +/** + * Standard types as defined in OpenAPI Specification: Data Types + */ +export type StandardDataType = + | "integer" + | "number" + | "boolean" + | "string" + | "object" + | "array" + ; + +/** + * Standard {@link DataType}s plus your own types/classes. + */ +export type DataType = StandardDataType | string; + +/** + * Standard formats as defined in OpenAPI Specification: Data Types + */ +export type StandardDataFormat = + | "int32" + | "int64" + | "float" + | "double" + | "byte" + | "binary" + | "date" + | "date-time" + | "password" + ; + +export type DataFormat = StandardDataFormat | string; + +/** + * The parameter to encode. + */ +export interface Param { + name: string; + value: unknown; + in: ParamLocation; + style: ParamStyle, + explode: boolean; + dataType: DataType; + dataFormat: DataFormat | undefined; +} diff --git a/ui/src/app/api-gen/variables.ts b/ui/src/app/api-gen/variables.ts new file mode 100644 index 00000000000..6fe58549f39 --- /dev/null +++ b/ui/src/app/api-gen/variables.ts @@ -0,0 +1,9 @@ +import { InjectionToken } from '@angular/core'; + +export const BASE_PATH = new InjectionToken('basePath'); +export const COLLECTION_FORMATS = { + 'csv': ',', + 'tsv': ' ', + 'ssv': ' ', + 'pipes': '|' +} diff --git a/ui/src/app/app.component.ts b/ui/src/app/app.component.ts index 8124940a243..18801fa8120 100644 --- a/ui/src/app/app.component.ts +++ b/ui/src/app/app.component.ts @@ -1,5 +1,5 @@ import { Component, OnInit } from '@angular/core'; -import { WasmGoService } from './services/wasm-go.service'; +import { DevstateService } from './services/devstate.service'; import { DomSanitizer } from '@angular/platform-browser'; import { MermaidService } from './services/mermaid.service'; import { StateService } from './services/state.service'; @@ -19,7 +19,7 @@ export class AppComponent implements OnInit { constructor( protected sanitizer: DomSanitizer, private matIconRegistry: MatIconRegistry, - private wasmGo: WasmGoService, + private wasmGo: DevstateService, private mermaid: MermaidService, private state: StateService, ) { @@ -35,10 +35,12 @@ export class AppComponent implements OnInit { loading.style.visibility = "hidden"; } - const devfile = this.state.getDevfile(); - if (devfile != null) { - this.onButtonClick(devfile); - } + const devfile = this.wasmGo.getDevfileContent(); + devfile.subscribe({ + next: (devfile) => { + this.onButtonClick(devfile.content); + } + }); this.state.state.subscribe(async newContent => { if (newContent == null) { @@ -47,28 +49,39 @@ export class AppComponent implements OnInit { this.devfileYaml = newContent.content; - try { - const result = this.wasmGo.getFlowChart(); - const svg = await this.mermaid.getMermaidAsSVG(result); - this.mermaidContent = svg; - } catch {} + const result = this.wasmGo.getFlowChart(); + result.subscribe({ + next: async (res) => { + const svg = await this.mermaid.getMermaidAsSVG(res.chart); + this.mermaidContent = svg; + }, + error: (error) => { + console.log(error); + } + }); }); } onButtonClick(content: string){ const result = this.wasmGo.setDevfileContent(content); - if (result.err != '') { - this.errorMessage = result.err; - } else { - this.errorMessage = ''; - this.state.changeDevfileYaml(result.value); - } + result.subscribe({ + next: (value) => { + this.errorMessage = ''; + this.state.changeDevfileYaml(value); + }, + error: (error) => { + this.errorMessage = error.error.message; + } + }); } clear() { if (confirm('You will delete the content of the Devfile. Continue?')) { - this.state.resetDevfile(); - window.location.reload(); + this.wasmGo.clearDevfileContent().subscribe({ + next: (value) => { + this.onButtonClick(value.content); + } + }); } } } diff --git a/ui/src/app/app.module.ts b/ui/src/app/app.module.ts index 92df716fb7e..97007cde80e 100644 --- a/ui/src/app/app.module.ts +++ b/ui/src/app/app.module.ts @@ -1,4 +1,4 @@ -import { APP_INITIALIZER, NgModule } from '@angular/core'; +import { NgModule } from '@angular/core'; import { BrowserModule } from '@angular/platform-browser'; import { BrowserAnimationsModule } from '@angular/platform-browser/animations'; import { ReactiveFormsModule } from '@angular/forms'; @@ -42,20 +42,6 @@ import { MultiCommandComponent } from './controls/multi-command/multi-command.co import { EventsComponent } from './tabs/events/events.component'; import { ChipsEventsComponent } from './controls/chips-events/chips-events.component'; -declare const Go: any; - -function loadWasmModule() { - return () => { - return new Promise((resolve) => { - const go = new Go(); - WebAssembly.instantiateStreaming(fetch("./assets/devfile.6ccdbea7789422ba86e8b08707f15f66.wasm"), go.importObject).then((result) => { - go.run(result.instance); - resolve(); - }); - }); - }; -} - @NgModule({ declarations: [ AppComponent, @@ -102,13 +88,7 @@ function loadWasmModule() { MatToolbarModule, MatTooltipModule ], - providers: [ - { - provide: APP_INITIALIZER, - useFactory: loadWasmModule, - multi: true, - }, - ], + providers: [], bootstrap: [AppComponent] }) export class AppModule { } diff --git a/ui/src/app/forms/command-apply/command-apply.component.ts b/ui/src/app/forms/command-apply/command-apply.component.ts index cdce5a9ec4f..ed8f8e278d1 100644 --- a/ui/src/app/forms/command-apply/command-apply.component.ts +++ b/ui/src/app/forms/command-apply/command-apply.component.ts @@ -1,8 +1,9 @@ import { Component, EventEmitter, Output } from '@angular/core'; import { FormControl, FormGroup, Validators } from '@angular/forms'; import { StateService } from 'src/app/services/state.service'; -import { ClusterResource, WasmGoService } from 'src/app/services/wasm-go.service'; +import { DevstateService } from 'src/app/services/devstate.service'; import { PATTERN_COMMAND_ID } from '../patterns'; +import { Resource } from 'src/app/api-gen'; @Component({ selector: 'app-command-apply', @@ -15,10 +16,10 @@ export class CommandApplyComponent { form: FormGroup; resourceList: string[] = []; showNewResource: boolean = false; - resourceToCreate: ClusterResource | null = null; + resourceToCreate: Resource | null = null; constructor( - private wasm: WasmGoService, + private devstate: DevstateService, private state: StateService, ) { this.form = new FormGroup({ @@ -36,20 +37,32 @@ export class CommandApplyComponent { } create() { - if (this.resourceToCreate != null && - this.resourceToCreate?.name == this.form.controls["component"].value) { - const result = this.wasm.addResource(this.resourceToCreate); - if (result.err != '') { - alert(result.err); - return; - } + const subcreate = () => { + const result = this.devstate.addApplyCommand(this.form.value["name"], this.form.value); + result.subscribe({ + next: (value) => { + this.state.changeDevfileYaml(value); + }, + error: (error) => { + alert(error.error.message); + } + }); } - const result = this.wasm.addApplyCommand(this.form.value["name"], this.form.value); - if (result.err != '') { - alert(result.err); + if (this.resourceToCreate != null && + this.resourceToCreate?.name == this.form.controls["component"].value) { + const result = this.devstate.addResource(this.resourceToCreate); + result.subscribe({ + next: (value) => { + this.state.changeDevfileYaml(value); + subcreate(); + }, + error: (error) => { + alert(error.error.message); + } + }); } else { - this.state.changeDevfileYaml(result.value); + subcreate(); } } @@ -61,7 +74,7 @@ export class CommandApplyComponent { this.showNewResource = v; } - onNewResourceCreated(resource: ClusterResource) { + onNewResourceCreated(resource: Resource) { this.resourceList.push(resource.name); this.form.controls["component"].setValue(resource.name); this.showNewResource = false; diff --git a/ui/src/app/forms/command-composite/command-composite.component.ts b/ui/src/app/forms/command-composite/command-composite.component.ts index e94ccc8876e..b6f9b3490f5 100644 --- a/ui/src/app/forms/command-composite/command-composite.component.ts +++ b/ui/src/app/forms/command-composite/command-composite.component.ts @@ -1,7 +1,7 @@ import { Component, EventEmitter, Output } from '@angular/core'; -import { FormArray, FormControl, FormGroup, Validators } from '@angular/forms'; +import { FormControl, FormGroup, Validators } from '@angular/forms'; import { StateService } from 'src/app/services/state.service'; -import { WasmGoService } from 'src/app/services/wasm-go.service'; +import { DevstateService } from 'src/app/services/devstate.service'; import { PATTERN_COMMAND_ID } from '../patterns'; @Component({ @@ -16,7 +16,7 @@ export class CommandCompositeComponent { commandList: string[] = []; constructor( - private wasm: WasmGoService, + private devstate: DevstateService, private state: StateService, ) { this.form = new FormGroup({ @@ -36,12 +36,15 @@ export class CommandCompositeComponent { } create() { - const result = this.wasm.addCompositeCommand(this.form.value["name"], this.form.value); - if (result.err != '') { - alert(result.err); - } else { - this.state.changeDevfileYaml(result.value); - } + const result = this.devstate.addCompositeCommand(this.form.value["name"], this.form.value); + result.subscribe({ + next: (value) => { + this.state.changeDevfileYaml(value); + }, + error: (error) => { + alert(error.error.message); + } + }); } cancel() { diff --git a/ui/src/app/forms/command-exec/command-exec.component.ts b/ui/src/app/forms/command-exec/command-exec.component.ts index 3f684934d52..b908f85a1d5 100644 --- a/ui/src/app/forms/command-exec/command-exec.component.ts +++ b/ui/src/app/forms/command-exec/command-exec.component.ts @@ -1,8 +1,9 @@ import { Component, EventEmitter, Output } from '@angular/core'; import { FormControl, FormGroup, Validators } from '@angular/forms'; import { StateService } from 'src/app/services/state.service'; -import { Container, WasmGoService } from 'src/app/services/wasm-go.service'; +import { DevstateService } from 'src/app/services/devstate.service'; import { PATTERN_COMMAND_ID } from '../patterns'; +import { Container } from 'src/app/api-gen'; @Component({ selector: 'app-command-exec', @@ -18,7 +19,7 @@ export class CommandExecComponent { containerToCreate: Container | null = null; constructor( - private wasm: WasmGoService, + private devstate: DevstateService, private state: StateService, ) { this.form = new FormGroup({ @@ -38,20 +39,34 @@ export class CommandExecComponent { }); } + create() { + + const subcreate = () => { + const result = this.devstate.addExecCommand(this.form.value["name"], this.form.value); + result.subscribe({ + next: (value) => { + this.state.changeDevfileYaml(value); + }, + error: (error) => { + alert(error.error.message); + } + }); + } + if (this.containerToCreate != null && this.containerToCreate?.name == this.form.controls["component"].value) { - const result = this.wasm.addContainer(this.containerToCreate); - if (result.err != '') { - alert(result.err); - return; - } - } - const result = this.wasm.addExecCommand(this.form.value["name"], this.form.value); - if (result.err != '') { - alert(result.err); + const res = this.devstate.addContainer(this.containerToCreate); + res.subscribe({ + next: () => { + subcreate(); + }, + error: error => { + alert(error.error.message); + } + }); } else { - this.state.changeDevfileYaml(result.value); + subcreate(); } } diff --git a/ui/src/app/forms/command-image/command-image.component.ts b/ui/src/app/forms/command-image/command-image.component.ts index 47d8975b63c..5ae9989f5ee 100644 --- a/ui/src/app/forms/command-image/command-image.component.ts +++ b/ui/src/app/forms/command-image/command-image.component.ts @@ -1,8 +1,9 @@ import { Component, EventEmitter, Output } from '@angular/core'; import { FormControl, FormGroup, Validators } from '@angular/forms'; import { StateService } from 'src/app/services/state.service'; -import { Image, WasmGoService } from 'src/app/services/wasm-go.service'; +import { DevstateService } from 'src/app/services/devstate.service'; import { PATTERN_COMMAND_ID } from '../patterns'; +import { Image } from 'src/app/api-gen'; @Component({ selector: 'app-command-image', @@ -18,7 +19,7 @@ export class CommandImageComponent { imageToCreate: Image | null = null; constructor( - private wasm: WasmGoService, + private devstate: DevstateService, private state: StateService, ) { this.form = new FormGroup({ @@ -36,20 +37,32 @@ export class CommandImageComponent { } create() { - if (this.imageToCreate != null && - this.imageToCreate?.name == this.form.controls["component"].value) { - const result = this.wasm.addImage(this.imageToCreate); - if (result.err != '') { - alert(result.err); - return; - } + + const subcreate = () => { + const result = this.devstate.addApplyCommand(this.form.value["name"], this.form.value); + result.subscribe({ + next: (value) => { + this.state.changeDevfileYaml(value); + }, + error: (error) => { + alert(error.error.message); + } + }); } - const result = this.wasm.addApplyCommand(this.form.value["name"], this.form.value); - if (result.err != '') { - alert(result.err); + if (this.imageToCreate != null && + this.imageToCreate?.name == this.form.controls["component"].value) { + const result = this.devstate.addImage(this.imageToCreate); + result.subscribe({ + next: () => { + subcreate(); + }, + error: error => { + alert(error.error.message); + } + }); } else { - this.state.changeDevfileYaml(result.value); + subcreate(); } } diff --git a/ui/src/app/forms/container/container.component.ts b/ui/src/app/forms/container/container.component.ts index a73835d279d..b1075b1f954 100644 --- a/ui/src/app/forms/container/container.component.ts +++ b/ui/src/app/forms/container/container.component.ts @@ -1,7 +1,9 @@ import { Component, EventEmitter, Input, Output } from '@angular/core'; -import { AbstractControl, FormControl, FormGroup, ValidationErrors, ValidatorFn, Validators } from '@angular/forms'; +import { AbstractControl, AsyncValidatorFn, FormControl, FormGroup, ValidationErrors, ValidatorFn, Validators } from '@angular/forms'; import { PATTERN_COMPONENT_ID } from '../patterns'; -import { Container, WasmGoService } from 'src/app/services/wasm-go.service'; +import { DevstateService } from 'src/app/services/devstate.service'; +import { Observable, of, map, catchError } from 'rxjs'; +import { Container } from 'src/app/api-gen'; @Component({ selector: 'app-container', @@ -19,17 +21,17 @@ export class ContainerComponent { quantityErrMsgCPU = 'Numeric value, with optional unit m, k, M, G, T, P, E'; constructor( - private wasm: WasmGoService, + private devstate: DevstateService, ) { this.form = new FormGroup({ name: new FormControl("", [Validators.required, Validators.pattern(PATTERN_COMPONENT_ID)]), image: new FormControl("", [Validators.required]), command: new FormControl([]), args: new FormControl([]), - memoryRequest: new FormControl("", [this.isQuantity()]), - memoryLimit: new FormControl("", [this.isQuantity()]), - cpuRequest: new FormControl("", [this.isQuantity()]), - cpuLimit: new FormControl("", [this.isQuantity()]), + memoryRequest: new FormControl("", null, [this.isQuantity()]), + memoryLimit: new FormControl("", null, [this.isQuantity()]), + cpuRequest: new FormControl("", null, [this.isQuantity()]), + cpuLimit: new FormControl("", null, [this.isQuantity()]), }) } @@ -41,19 +43,17 @@ export class ContainerComponent { this.canceled.emit(); } - isQuantity(): ValidatorFn { - return (control: AbstractControl): ValidationErrors | null => { + isQuantity(): AsyncValidatorFn { + return (control: AbstractControl): Observable => { const val = control.value; if (val == '') { - return null; + return of(null); } - const valid = this.wasm.isQuantityValid(val); - if (!valid) { - return { - "isQuantity": false, - } - } - return null; + const valid = this.devstate.isQuantityValid(val); + return valid.pipe( + map(() => null), + catchError(() => of({"isQuantity": false})) + ); }; } } diff --git a/ui/src/app/forms/image/image.component.ts b/ui/src/app/forms/image/image.component.ts index fe0991c07e2..f15d6118bac 100644 --- a/ui/src/app/forms/image/image.component.ts +++ b/ui/src/app/forms/image/image.component.ts @@ -1,7 +1,7 @@ import { Component, EventEmitter, Input, Output } from '@angular/core'; import { FormControl, FormGroup, Validators } from '@angular/forms'; -import { Image } from 'src/app/services/wasm-go.service'; import { PATTERN_COMPONENT_ID } from '../patterns'; +import { Image } from 'src/app/api-gen'; @Component({ selector: 'app-image', diff --git a/ui/src/app/forms/metadata/metadata.component.ts b/ui/src/app/forms/metadata/metadata.component.ts index b40b72e1289..3d5017022ff 100644 --- a/ui/src/app/forms/metadata/metadata.component.ts +++ b/ui/src/app/forms/metadata/metadata.component.ts @@ -1,7 +1,7 @@ import { Component, OnInit } from '@angular/core'; import { FormControl, FormGroup, Validators } from '@angular/forms'; import { StateService } from 'src/app/services/state.service'; -import { WasmGoService } from 'src/app/services/wasm-go.service'; +import { DevstateService } from 'src/app/services/devstate.service'; const semverPattern = `^([0-9]+)\\.([0-9]+)\\.([0-9]+)(\\-[0-9a-z-]+(\\.[0-9a-z-]+)*)?(\\+[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?$`; @@ -15,7 +15,7 @@ export class MetadataComponent implements OnInit { form: FormGroup; constructor( - private wasm: WasmGoService, + private devstate: DevstateService, private state: StateService, ) { this.form = new FormGroup({ @@ -41,16 +41,19 @@ export class MetadataComponent implements OnInit { if (metadata == null) { return } - this.form.setValue(metadata); + this.form.patchValue(metadata); }); } onSave() { - const result = this.wasm.setMetadata(this.form.value); - if (result.err != '') { - alert(result.err); - } else { - this.state.changeDevfileYaml(result.value); - } + const result = this.devstate.setMetadata(this.form.value); + result.subscribe({ + next: (value) => { + this.state.changeDevfileYaml(value); + }, + error: (error) => { + alert(error.error.message); + } + }); } } diff --git a/ui/src/app/forms/resource/resource.component.ts b/ui/src/app/forms/resource/resource.component.ts index badc50a7149..280e3d45554 100644 --- a/ui/src/app/forms/resource/resource.component.ts +++ b/ui/src/app/forms/resource/resource.component.ts @@ -1,7 +1,7 @@ import { Component, EventEmitter, Input, Output } from '@angular/core'; import { FormControl, FormGroup, Validators } from '@angular/forms'; -import { ClusterResource } from 'src/app/services/wasm-go.service'; import { PATTERN_COMPONENT_ID } from '../patterns'; +import { Resource } from 'src/app/api-gen'; @Component({ selector: 'app-resource', @@ -11,7 +11,7 @@ import { PATTERN_COMPONENT_ID } from '../patterns'; export class ResourceComponent { @Input() cancelable: boolean = false; @Output() canceled = new EventEmitter(); - @Output() created = new EventEmitter(); + @Output() created = new EventEmitter(); form: FormGroup; uriOrInlined: string = 'uri'; diff --git a/ui/src/app/lists/commands-list/commands-list.component.html b/ui/src/app/lists/commands-list/commands-list.component.html index 0fa8ab2e126..8cd007be13a 100644 --- a/ui/src/app/lists/commands-list/commands-list.component.html +++ b/ui/src/app/lists/commands-list/commands-list.component.html @@ -9,7 +9,7 @@ Default {{kind}} command diff --git a/ui/src/app/lists/commands-list/commands-list.component.ts b/ui/src/app/lists/commands-list/commands-list.component.ts index f8e72d7b29b..bc24cae9fa8 100644 --- a/ui/src/app/lists/commands-list/commands-list.component.ts +++ b/ui/src/app/lists/commands-list/commands-list.component.ts @@ -1,8 +1,8 @@ -import { Component, EventEmitter, Input } from '@angular/core'; +import { Component, Input } from '@angular/core'; import { MatCheckboxChange } from '@angular/material/checkbox'; -import { MatSnackBar } from '@angular/material/snack-bar'; import { StateService } from 'src/app/services/state.service'; -import { Command, WasmGoService } from 'src/app/services/wasm-go.service'; +import { DevstateService } from 'src/app/services/devstate.service'; +import { Command } from 'src/app/api-gen'; @Component({ selector: 'app-commands-list', @@ -15,7 +15,7 @@ export class CommandsListComponent { @Input() dragDisabled: boolean = true; constructor( - private wasm: WasmGoService, + private devstate: DevstateService, private state: StateService, ) {} @@ -28,21 +28,27 @@ export class CommandsListComponent { } setDefault(command: string, group: string) { - const result = this.wasm.setDefaultCommand(command, group); - if (result.err != '') { - alert(result.err); - } else { - this.state.changeDevfileYaml(result.value); - } + const result = this.devstate.setDefaultCommand(command, group); + result.subscribe({ + next: (value) => { + this.state.changeDevfileYaml(value); + }, + error: (error) => { + alert(error.error.message); + } + }); } unsetDefault(command: string) { - const result = this.wasm.unsetDefaultCommand(command); - if (result.err != '') { - alert(result.err); - } else { - this.state.changeDevfileYaml(result.value); - } + const result = this.devstate.unsetDefaultCommand(command); + result.subscribe({ + next: (value) => { + this.state.changeDevfileYaml(value); + }, + error: (error) => { + alert(error.error.message); + } + }); } getCommandsByKind(commands: Command[] | undefined, kind: string ): Command[] | undefined { @@ -51,12 +57,15 @@ export class CommandsListComponent { delete(command: string) { if(confirm('You will delete the command "'+command+'". Continue?')) { - const result = this.wasm.deleteCommand(command); - if (result.err != '') { - alert(result.err); - } else { - this.state.changeDevfileYaml(result.value); - } + const result = this.devstate.deleteCommand(command); + result.subscribe({ + next: (value) => { + this.state.changeDevfileYaml(value); + }, + error: (error) => { + alert(error.error.message); + } + }); } } } diff --git a/ui/src/app/services/devstate.service.spec.ts b/ui/src/app/services/devstate.service.spec.ts new file mode 100644 index 00000000000..39ed12292aa --- /dev/null +++ b/ui/src/app/services/devstate.service.spec.ts @@ -0,0 +1,16 @@ +import { TestBed } from '@angular/core/testing'; + +import { DevstateService } from './devstate.service'; + +describe('DevstateService', () => { + let service: DevstateService; + + beforeEach(() => { + TestBed.configureTestingModule({}); + service = TestBed.inject(DevstateService); + }); + + it('should be created', () => { + expect(service).toBeTruthy(); + }); +}); diff --git a/ui/src/app/services/devstate.service.ts b/ui/src/app/services/devstate.service.ts new file mode 100644 index 00000000000..ca3993e6a4b --- /dev/null +++ b/ui/src/app/services/devstate.service.ts @@ -0,0 +1,160 @@ +import { HttpClient } from '@angular/common/http'; +import { Injectable } from '@angular/core'; +import { Observable } from 'rxjs'; +import { ApplyCommand, CompositeCommand, Container, DevfileContent, DevstateChartGet200Response, ExecCommand, Image, Metadata, Resource } from '../api-gen'; + +@Injectable({ + providedIn: 'root' +}) +export class DevstateService { + + private base = "/api/v1/devstate"; + + constructor(private http: HttpClient) { } + + addContainer(container: Container): Observable { + return this.http.post(this.base+"/container", { + name: container.name, + image: container.image, + command: container.command, + args: container.args, + memReq: container.memoryRequest, + memLimit: container.memoryLimit, + cpuReq: container.cpuRequest, + cpuLimit: container.cpuLimit, + }); + } + + addImage(image: Image): Observable { + return this.http.post(this.base+"/image", { + name: image.name, + imageName: image.imageName, + args: image.args, + buildContext: image.buildContext, + rootRequired: image.rootRequired, + uri: image.uri + }); + } + + addResource(resource: Resource): Observable { + return this.http.post(this.base+"/resource", { + name: resource.name, + inlined: resource.inlined, + uri: resource.uri, + }); + } + + addExecCommand(name: string, cmd: ExecCommand): Observable { + return this.http.post(this.base+"/execCommand", { + name: name, + component: cmd.component, + commandLine: cmd.commandLine, + workingDir: cmd.workingDir, + hotReloadCapable: cmd.hotReloadCapable, + }); + } + + addApplyCommand(name: string, cmd: ApplyCommand): Observable { + return this.http.post(this.base+"/applyCommand", { + name: name, + component: cmd.component, + }); + } + + addCompositeCommand(name: string, cmd: CompositeCommand): Observable { + return this.http.post(this.base+"/compositeCommand", { + name: name, + parallel: cmd.parallel, + commands: cmd.commands, + }); + } + + // getFlowChart calls the wasm module to get the lifecycle of the Devfile in mermaid chart format + getFlowChart(): Observable { + return this.http.get(this.base+"/chart"); + } + + // setDevfileContent calls the wasm module to reset the content of the Devfile + setDevfileContent(devfile: string): Observable { + return this.http.put(this.base+"/devfile", { + content: devfile + }); + } + + // getDevfileContent gets the content of the Devfile + getDevfileContent(): Observable { + return this.http.get(this.base+"/devfile"); + } + + // clearDevfileContent clears the content of the Devfile + clearDevfileContent(): Observable { + return this.http.delete(this.base+"/devfile"); + } + + setMetadata(metadata: Metadata): Observable { + return this.http.put(this.base+"/metadata", { + name: metadata.name, + version: metadata.version, + displayName: metadata.displayName, + description: metadata.description, + tags: metadata.tags, + architectures: metadata.architectures, + icon: metadata.icon, + globalMemoryLimit: metadata.globalMemoryLimit, + projectType: metadata.projectType, + language: metadata.language, + website: metadata.website, + provider: metadata.provider, + supportUrl: metadata.supportUrl, + }); + } + + moveCommand(previousKind: string, newKind: string, previousIndex: number, newIndex: number): Observable { + // TODO set correct command Name + return this.http.post(this.base+"/command/0/move", { + fromGroup: previousKind, + fromIndex: previousIndex, + toGroup: newKind, + toIndex: newIndex + }); + } + + setDefaultCommand(command: string, group: string): Observable { + return this.http.post(this.base+"/command/"+command+"/setDefault", { + group: group + }); + } + + unsetDefaultCommand(command: string): Observable { + return this.http.post(this.base+"/command/"+command+"/unsetDefault", {}); + } + + deleteCommand(command: string): Observable { + return this.http.delete(this.base+"/command/"+command); + } + + deleteContainer(container: string): Observable { + return this.http.delete(this.base+"/container/"+container); + } + + deleteImage(image: string): Observable { + return this.http.delete(this.base+"/image/"+image); + } + + deleteResource(resource: string): Observable { + return this.http.delete(this.base+"/resource/"+resource); + } + + updateEvents(event: "preStart"|"postStart"|"preStop"|"postStop", commands: string[]): Observable { + return this.http.put(this.base+"/events", { + eventName: event, + commands: commands + }); + } + + isQuantityValid(quantity: string): Observable<{}> { + return this.http.post<{}>(this.base+"/quantityValid", { + quantity: quantity + }); + } +} diff --git a/ui/src/app/services/state.service.ts b/ui/src/app/services/state.service.ts index d422a8a0e6d..dcce539cf5f 100644 --- a/ui/src/app/services/state.service.ts +++ b/ui/src/app/services/state.service.ts @@ -1,30 +1,20 @@ import { Injectable } from '@angular/core'; import { BehaviorSubject } from 'rxjs'; - -import { ResultValue } from './wasm-go.service'; +import { DevfileContent } from '../api-gen'; @Injectable({ providedIn: 'root' }) export class StateService { - private _state = new BehaviorSubject(null); + private _state = new BehaviorSubject(null); public state = this._state.asObservable(); - changeDevfileYaml(newValue: ResultValue) { - localStorage.setItem("devfile", newValue.content); + changeDevfileYaml(newValue: DevfileContent) { this._state.next(newValue); } - resetDevfile() { - localStorage.removeItem('devfile'); - } - - getDevfile(): string | null { - return localStorage.getItem("devfile"); - } - getDragAndDropEnabled(): boolean { return localStorage.getItem("dragAndDropEnabled") == "true"; } diff --git a/ui/src/app/services/wasm-go.service.spec.ts b/ui/src/app/services/wasm-go.service.spec.ts deleted file mode 100644 index 2d0f1d68b25..00000000000 --- a/ui/src/app/services/wasm-go.service.spec.ts +++ /dev/null @@ -1,16 +0,0 @@ -import { TestBed } from '@angular/core/testing'; - -import { WasmGoService } from './wasm-go.service'; - -describe('WasmGoService', () => { - let service: WasmGoService; - - beforeEach(() => { - TestBed.configureTestingModule({}); - service = TestBed.inject(WasmGoService); - }); - - it('should be created', () => { - expect(service).toBeTruthy(); - }); -}); diff --git a/ui/src/app/services/wasm-go.service.ts b/ui/src/app/services/wasm-go.service.ts deleted file mode 100644 index 3bdec349585..00000000000 --- a/ui/src/app/services/wasm-go.service.ts +++ /dev/null @@ -1,241 +0,0 @@ -import { Injectable } from '@angular/core'; - -type ChartResult = { - err: string; - value: any; -}; - -type Result = { - err: string; - value: ResultValue; -}; - -export type ResultValue = { - content: string; - metadata: Metadata; - commands: Command[]; - events: Events; - containers: Container[]; - images: Image[]; - resources: ClusterResource[]; -}; - -export type Metadata = { - name: string | null; - version: string | null; - displayName: string | null; - description: string | null; - tags: string | null; - architectures: string | null; - icon: string | null; - globalMemoryLimit: string | null; - projectType: string | null; - language: string | null; - website: string | null; - provider: string | null; - supportUrl: string | null; -}; - -export type Command = { - name: string; - group: string; - default: boolean; - type: "exec" | "apply" | "image" | "composite"; - exec: ExecCommand | undefined; - apply: ApplyCommand | undefined; - image: ImageCommand | undefined; - composite: CompositeCommand | undefined; -}; - -export type Events = { - preStart: string[]; - postStart: string[]; - preStop: string[]; - postStop: string[]; -}; - -export type ExecCommand = { - component: string; - commandLine: string; - workingDir: string; - hotReloadCapable: boolean; -}; - -export type ApplyCommand = { - component: string; -}; - -export type ImageCommand = { - component: string; -}; - -export type CompositeCommand = { - commands: string[]; - parallel: boolean; -}; - -export type Container = { - name: string; - image: string; - command: string[]; - args: string[]; - memoryRequest: string; - memoryLimit: string; - cpuRequest: string; - cpuLimit: string; -}; - -export type Image = { - name: string; - imageName: string; - args: string[]; - buildContext: string; - rootRequired: boolean; - uri: string; -}; - -export type ClusterResource = { - name: string; - inlined: string; - uri: string; -}; - -declare const addContainer: (name: string, image: string, command: string[], args: string[], memReq: string, memLimit: string, cpuReq: string, cpuLimit: string) => Result; -declare const addImage: (name: string, imageName: string, args: string[], buildContext: string, rootRequired: boolean, uri: string) => Result; -declare const addResource: (name: string, inlined: string, uri: string) => Result; -declare const addExecCommand: (name: string, component: string, commmandLine: string, workingDir: string, hotReloadCapable: boolean) => Result; -declare const addApplyCommand: (name: string, component: string) => Result; -declare const addCompositeCommand: (name: string, parallel: boolean, commands: string[]) => Result; -declare const getFlowChart: () => ChartResult; -declare const setDevfileContent: (devfile: string) => Result; -declare const setMetadata: (metadata: Metadata) => Result; -declare const moveCommand: (previousKind: string, newKind: string, previousIndex: number, newIndex: number) => Result; -declare const setDefaultCommand: (command: string, group: string) => Result; -declare const unsetDefaultCommand: (command: string) => Result; -declare const deleteCommand: (command: string) => Result; -declare const deleteContainer: (container: string) => Result; -declare const deleteImage: (image: string) => Result; -declare const deleteResource: (resource: string) => Result; -declare const updateEvents: (event: string, commands: string[]) => Result; -declare const isQuantityValid: (quantity: string) => Boolean; - -@Injectable({ - providedIn: 'root' -}) -// WasmGoService uses the wasm module. -// The module manages a single instance of a Devfile -export class WasmGoService { - - addContainer(container: Container): Result { - return addContainer( - container.name, - container.image, - container.command, - container.args, - container.memoryRequest, - container.memoryLimit, - container.cpuRequest, - container.cpuLimit, - ); - } - - addImage(image: Image): Result { - return addImage( - image.name, - image.imageName, - image.args, - image.buildContext, - image.rootRequired, - image.uri, - ); - } - - addResource(resource: ClusterResource): Result { - return addResource( - resource.name, - resource.inlined, - resource.uri, - ); - } - - addExecCommand(name: string, cmd: ExecCommand): Result { - return addExecCommand( - name, - cmd.component, - cmd.commandLine, - cmd.workingDir, - cmd.hotReloadCapable, - ); - } - - addApplyCommand(name: string, cmd: ApplyCommand): Result { - return addApplyCommand( - name, - cmd.component, - ); - } - - addCompositeCommand(name: string, cmd: CompositeCommand): Result { - return addCompositeCommand( - name, - cmd.parallel, - cmd.commands, - ); - } - - // getFlowChart calls the wasm module to get the lifecycle of the Devfile in mermaid chart format - getFlowChart(): string { - const result = getFlowChart(); - return result.value; - } - - // setDevfileContent calls the wasm module to reset the content of the Devfile - setDevfileContent(devfile: string): Result { - const result = setDevfileContent(devfile); - return result; - } - - setMetadata(metadata: Metadata): Result { - return setMetadata(metadata); - } - - moveCommand(previousKind: string, newKind: string, previousIndex: number, newIndex: number): Result { - return moveCommand(previousKind, newKind, previousIndex, newIndex); - } - - setDefaultCommand(command: string, group: string): Result { - return setDefaultCommand(command, group); - } - - unsetDefaultCommand(command: string): Result { - return unsetDefaultCommand(command); - } - - deleteCommand(command: string): Result { - const result = deleteCommand(command); - return result; - } - - deleteContainer(container: string): Result { - const result = deleteContainer(container); - return result; - } - - deleteImage(image: string): Result { - const result = deleteImage(image); - return result; - } - - deleteResource(resource: string): Result { - const result = deleteResource(resource); - return result; - } - - updateEvents(event: "preStart"|"postStart"|"preStop"|"postStop", commands: string[]): Result { - return updateEvents(event, commands); - } - - isQuantityValid(quantity: string): Boolean { - return isQuantityValid(quantity); - } -} diff --git a/ui/src/app/tabs/commands/commands.component.ts b/ui/src/app/tabs/commands/commands.component.ts index 1a71ea8e5d7..78db30d6d6f 100644 --- a/ui/src/app/tabs/commands/commands.component.ts +++ b/ui/src/app/tabs/commands/commands.component.ts @@ -1,7 +1,8 @@ import { CdkDragDrop } from '@angular/cdk/drag-drop'; import { Component } from '@angular/core'; import { StateService } from 'src/app/services/state.service'; -import { Command, WasmGoService } from 'src/app/services/wasm-go.service'; +import { DevstateService } from 'src/app/services/devstate.service'; +import { Command } from 'src/app/api-gen'; @Component({ selector: 'app-commands', @@ -19,7 +20,7 @@ export class CommandsComponent { constructor( private state: StateService, - private wasm: WasmGoService, + private devstate: DevstateService, ) { this.enableDragAndDrop = this.state.getDragAndDropEnabled(); } @@ -91,12 +92,15 @@ export class CommandsComponent { } moveCommand(previousKind: string, newKind: string, previousIndex: number, newIndex: number) { - const result = this.wasm.moveCommand(previousKind, newKind, previousIndex, newIndex); - if (result.err != '') { - alert(result.err); - } else { - this.state.changeDevfileYaml(result.value); - } + const result = this.devstate.moveCommand(previousKind, newKind, previousIndex, newIndex); + result.subscribe({ + next: (value) => { + this.state.changeDevfileYaml(value); + }, + error: (error) => { + alert(error.error.message); + } + }); } enableDragAndDropChange() { diff --git a/ui/src/app/tabs/containers/containers.component.html b/ui/src/app/tabs/containers/containers.component.html index 84edb26f54e..6d7b5ba1724 100644 --- a/ui/src/app/tabs/containers/containers.component.html +++ b/ui/src/app/tabs/containers/containers.component.html @@ -10,27 +10,27 @@ Image: {{container.image}} - + Command: {{container.command.join(" ")}} - + Args: {{container.args.join(" ")}} - + Memory Request: {{container.memoryRequest}} - + Memory Limit: {{container.memoryLimit}} - + CPU Request: {{container.cpuRequest}} - + CPU Limit: {{container.cpuLimit}} diff --git a/ui/src/app/tabs/containers/containers.component.ts b/ui/src/app/tabs/containers/containers.component.ts index 7b658189e5a..7da87f122cd 100644 --- a/ui/src/app/tabs/containers/containers.component.ts +++ b/ui/src/app/tabs/containers/containers.component.ts @@ -1,6 +1,7 @@ import { Component, OnInit } from '@angular/core'; import { StateService } from 'src/app/services/state.service'; -import { Container, WasmGoService } from 'src/app/services/wasm-go.service'; +import { DevstateService } from 'src/app/services/devstate.service'; +import { Container } from 'src/app/api-gen'; @Component({ selector: 'app-containers', @@ -14,7 +15,7 @@ export class ContainersComponent implements OnInit { constructor( private state: StateService, - private wasm: WasmGoService, + private devstate: DevstateService, ) {} ngOnInit() { @@ -41,22 +42,28 @@ export class ContainersComponent implements OnInit { delete(name: string) { if(confirm('You will delete the container "'+name+'". Continue?')) { - const result = this.wasm.deleteContainer(name); - if (result.err != '') { - alert(result.err); - } else { - this.state.changeDevfileYaml(result.value); - } + const result = this.devstate.deleteContainer(name); + result.subscribe({ + next: (value) => { + this.state.changeDevfileYaml(value); + }, + error: (error) => { + alert(error.error.message); + } + }); } } onCreated(container: Container) { - const result = this.wasm.addContainer(container); - if (result.err != '') { - alert(result.err); - } else { - this.state.changeDevfileYaml(result.value); - } + const result = this.devstate.addContainer(container); + result.subscribe({ + next: value => { + this.state.changeDevfileYaml(value); + }, + error: error => { + alert(error.error.message); + } + }); } scrollToBottom() { diff --git a/ui/src/app/tabs/events/events.component.ts b/ui/src/app/tabs/events/events.component.ts index ec55e9f80f7..3391c46a0e3 100644 --- a/ui/src/app/tabs/events/events.component.ts +++ b/ui/src/app/tabs/events/events.component.ts @@ -1,6 +1,7 @@ import { Component } from '@angular/core'; import { StateService } from 'src/app/services/state.service'; -import { Events, WasmGoService } from 'src/app/services/wasm-go.service'; +import { DevstateService } from 'src/app/services/devstate.service'; +import { Events } from 'src/app/api-gen'; @Component({ selector: 'app-events', @@ -14,22 +15,25 @@ export class EventsComponent { constructor( private state: StateService, - private wasm: WasmGoService, + private devstate: DevstateService, ) {} ngOnInit() { this.state.state.subscribe(async newContent => { this.events = newContent?.events; - this.allCommands = newContent?.commands.map(c => c.name); + this.allCommands = newContent?.commands?.map(c => c.name); }); } onUpdate(event: "preStart" | "postStart" | "preStop" | "postStop", commands: string[]) { - const result = this.wasm.updateEvents(event, commands); - if (result.err != '') { - alert(result.err); - } else { - this.state.changeDevfileYaml(result.value); - } + const result = this.devstate.updateEvents(event, commands); + result.subscribe({ + next: (value) => { + this.state.changeDevfileYaml(value); + }, + error: (error) => { + alert(error.error.message); + } + }); } } diff --git a/ui/src/app/tabs/images/images.component.html b/ui/src/app/tabs/images/images.component.html index 852f3748893..7fe06f55cad 100644 --- a/ui/src/app/tabs/images/images.component.html +++ b/ui/src/app/tabs/images/images.component.html @@ -14,7 +14,7 @@ Dockerfile URI: {{image.uri}} - + Build Args: {{image.args}} diff --git a/ui/src/app/tabs/images/images.component.ts b/ui/src/app/tabs/images/images.component.ts index 46e1f2b57d5..66e7a34834f 100644 --- a/ui/src/app/tabs/images/images.component.ts +++ b/ui/src/app/tabs/images/images.component.ts @@ -1,6 +1,7 @@ import { Component, OnInit } from '@angular/core'; import { StateService } from 'src/app/services/state.service'; -import { Image, WasmGoService } from 'src/app/services/wasm-go.service'; +import { DevstateService } from 'src/app/services/devstate.service'; +import { Image } from 'src/app/api-gen'; @Component({ selector: 'app-images', @@ -14,7 +15,7 @@ export class ImagesComponent implements OnInit { constructor( private state: StateService, - private wasm: WasmGoService, + private devstate: DevstateService, ) {} ngOnInit() { @@ -41,22 +42,28 @@ export class ImagesComponent implements OnInit { delete(name: string) { if(confirm('You will delete the image "'+name+'". Continue?')) { - const result = this.wasm.deleteImage(name); - if (result.err != '') { - alert(result.err); - } else { - this.state.changeDevfileYaml(result.value); - } + const result = this.devstate.deleteImage(name); + result.subscribe({ + next: (value) => { + this.state.changeDevfileYaml(value); + }, + error: (error) => { + alert(error.error.message); + } + }); } } onCreated(image: Image) { - const result = this.wasm.addImage(image); - if (result.err != '') { - alert(result.err); - } else { - this.state.changeDevfileYaml(result.value); - } + const result = this.devstate.addImage(image); + result.subscribe({ + next: value => { + this.state.changeDevfileYaml(value); + }, + error: error => { + alert(error.error.message); + } + }); } scrollToBottom() { diff --git a/ui/src/app/tabs/resources/resources.component.ts b/ui/src/app/tabs/resources/resources.component.ts index 1eba4421d3e..abbd301a81c 100644 --- a/ui/src/app/tabs/resources/resources.component.ts +++ b/ui/src/app/tabs/resources/resources.component.ts @@ -1,6 +1,7 @@ import { Component, OnInit } from '@angular/core'; import { StateService } from 'src/app/services/state.service'; -import { ClusterResource, WasmGoService } from 'src/app/services/wasm-go.service'; +import { DevstateService } from 'src/app/services/devstate.service'; +import { Resource } from 'src/app/api-gen'; @Component({ selector: 'app-resources', @@ -10,11 +11,11 @@ import { ClusterResource, WasmGoService } from 'src/app/services/wasm-go.service export class ResourcesComponent implements OnInit { forceDisplayAdd: boolean = false; - resources: ClusterResource[] | undefined = []; + resources: Resource[] | undefined = []; constructor( private state: StateService, - private wasm: WasmGoService, + private devstate: DevstateService, ) {} ngOnInit() { @@ -41,22 +42,28 @@ export class ResourcesComponent implements OnInit { delete(name: string) { if(confirm('You will delete the resource "'+name+'". Continue?')) { - const result = this.wasm.deleteResource(name); - if (result.err != '') { - alert(result.err); - } else { - this.state.changeDevfileYaml(result.value); - } + const result = this.devstate.deleteResource(name); + result.subscribe({ + next: (value) => { + this.state.changeDevfileYaml(value); + }, + error: (error) => { + alert(error.error.message); + } + }); } } - onCreated(resource: ClusterResource) { - const result = this.wasm.addResource(resource); - if (result.err != '') { - alert(result.err); - } else { - this.state.changeDevfileYaml(result.value); - } + onCreated(resource: Resource) { + const result = this.devstate.addResource(resource); + result.subscribe({ + next: (value) => { + this.state.changeDevfileYaml(value); + }, + error: (error) => { + alert(error.error.message); + } + }); } scrollToBottom() { diff --git a/ui/src/assets/devfile.6ccdbea7789422ba86e8b08707f15f66.wasm b/ui/src/assets/devfile.6ccdbea7789422ba86e8b08707f15f66.wasm deleted file mode 100755 index 5a85044009b..00000000000 Binary files a/ui/src/assets/devfile.6ccdbea7789422ba86e8b08707f15f66.wasm and /dev/null differ diff --git a/ui/src/assets/wasm_exec.js b/ui/src/assets/wasm_exec.js deleted file mode 100644 index 9ce6a20c3ff..00000000000 --- a/ui/src/assets/wasm_exec.js +++ /dev/null @@ -1,554 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -"use strict"; - -(() => { - const enosys = () => { - const err = new Error("not implemented"); - err.code = "ENOSYS"; - return err; - }; - - if (!globalThis.fs) { - let outputBuf = ""; - globalThis.fs = { - constants: { O_WRONLY: -1, O_RDWR: -1, O_CREAT: -1, O_TRUNC: -1, O_APPEND: -1, O_EXCL: -1 }, // unused - writeSync(fd, buf) { - outputBuf += decoder.decode(buf); - const nl = outputBuf.lastIndexOf("\n"); - if (nl != -1) { - console.log(outputBuf.substr(0, nl)); - outputBuf = outputBuf.substr(nl + 1); - } - return buf.length; - }, - write(fd, buf, offset, length, position, callback) { - if (offset !== 0 || length !== buf.length || position !== null) { - callback(enosys()); - return; - } - const n = this.writeSync(fd, buf); - callback(null, n); - }, - chmod(path, mode, callback) { callback(enosys()); }, - chown(path, uid, gid, callback) { callback(enosys()); }, - close(fd, callback) { callback(enosys()); }, - fchmod(fd, mode, callback) { callback(enosys()); }, - fchown(fd, uid, gid, callback) { callback(enosys()); }, - fstat(fd, callback) { callback(enosys()); }, - fsync(fd, callback) { callback(null); }, - ftruncate(fd, length, callback) { callback(enosys()); }, - lchown(path, uid, gid, callback) { callback(enosys()); }, - link(path, link, callback) { callback(enosys()); }, - lstat(path, callback) { callback(enosys()); }, - mkdir(path, perm, callback) { callback(enosys()); }, - open(path, flags, mode, callback) { callback(enosys()); }, - read(fd, buffer, offset, length, position, callback) { callback(enosys()); }, - readdir(path, callback) { callback(enosys()); }, - readlink(path, callback) { callback(enosys()); }, - rename(from, to, callback) { callback(enosys()); }, - rmdir(path, callback) { callback(enosys()); }, - stat(path, callback) { callback(enosys()); }, - symlink(path, link, callback) { callback(enosys()); }, - truncate(path, length, callback) { callback(enosys()); }, - unlink(path, callback) { callback(enosys()); }, - utimes(path, atime, mtime, callback) { callback(enosys()); }, - }; - } - - if (!globalThis.process) { - globalThis.process = { - getuid() { return -1; }, - getgid() { return -1; }, - geteuid() { return -1; }, - getegid() { return -1; }, - getgroups() { throw enosys(); }, - pid: -1, - ppid: -1, - umask() { throw enosys(); }, - cwd() { throw enosys(); }, - chdir() { throw enosys(); }, - } - } - - if (!globalThis.crypto) { - throw new Error("globalThis.crypto is not available, polyfill required (crypto.getRandomValues only)"); - } - - if (!globalThis.performance) { - throw new Error("globalThis.performance is not available, polyfill required (performance.now only)"); - } - - if (!globalThis.TextEncoder) { - throw new Error("globalThis.TextEncoder is not available, polyfill required"); - } - - if (!globalThis.TextDecoder) { - throw new Error("globalThis.TextDecoder is not available, polyfill required"); - } - - const encoder = new TextEncoder("utf-8"); - const decoder = new TextDecoder("utf-8"); - - globalThis.Go = class { - constructor() { - this.argv = ["js"]; - this.env = {}; - this.exit = (code) => { - if (code !== 0) { - console.warn("exit code:", code); - } - }; - this._exitPromise = new Promise((resolve) => { - this._resolveExitPromise = resolve; - }); - this._pendingEvent = null; - this._scheduledTimeouts = new Map(); - this._nextCallbackTimeoutID = 1; - - const setInt64 = (addr, v) => { - this.mem.setUint32(addr + 0, v, true); - this.mem.setUint32(addr + 4, Math.floor(v / 4294967296), true); - } - - const getInt64 = (addr) => { - const low = this.mem.getUint32(addr + 0, true); - const high = this.mem.getInt32(addr + 4, true); - return low + high * 4294967296; - } - - const loadValue = (addr) => { - const f = this.mem.getFloat64(addr, true); - if (f === 0) { - return undefined; - } - if (!isNaN(f)) { - return f; - } - - const id = this.mem.getUint32(addr, true); - return this._values[id]; - } - - const storeValue = (addr, v) => { - const nanHead = 0x7FF80000; - - if (typeof v === "number" && v !== 0) { - if (isNaN(v)) { - this.mem.setUint32(addr + 4, nanHead, true); - this.mem.setUint32(addr, 0, true); - return; - } - this.mem.setFloat64(addr, v, true); - return; - } - - if (v === undefined) { - this.mem.setFloat64(addr, 0, true); - return; - } - - let id = this._ids.get(v); - if (id === undefined) { - id = this._idPool.pop(); - if (id === undefined) { - id = this._values.length; - } - this._values[id] = v; - this._goRefCounts[id] = 0; - this._ids.set(v, id); - } - this._goRefCounts[id]++; - let typeFlag = 0; - switch (typeof v) { - case "object": - if (v !== null) { - typeFlag = 1; - } - break; - case "string": - typeFlag = 2; - break; - case "symbol": - typeFlag = 3; - break; - case "function": - typeFlag = 4; - break; - } - this.mem.setUint32(addr + 4, nanHead | typeFlag, true); - this.mem.setUint32(addr, id, true); - } - - const loadSlice = (addr) => { - const array = getInt64(addr + 0); - const len = getInt64(addr + 8); - return new Uint8Array(this._inst.exports.mem.buffer, array, len); - } - - const loadSliceOfValues = (addr) => { - const array = getInt64(addr + 0); - const len = getInt64(addr + 8); - const a = new Array(len); - for (let i = 0; i < len; i++) { - a[i] = loadValue(array + i * 8); - } - return a; - } - - const loadString = (addr) => { - const saddr = getInt64(addr + 0); - const len = getInt64(addr + 8); - return decoder.decode(new DataView(this._inst.exports.mem.buffer, saddr, len)); - } - - const timeOrigin = Date.now() - performance.now(); - this.importObject = { - go: { - // Go's SP does not change as long as no Go code is running. Some operations (e.g. calls, getters and setters) - // may synchronously trigger a Go event handler. This makes Go code get executed in the middle of the imported - // function. A goroutine can switch to a new stack if the current stack is too small (see morestack function). - // This changes the SP, thus we have to update the SP used by the imported function. - - // func wasmExit(code int32) - "runtime.wasmExit": (sp) => { - sp >>>= 0; - const code = this.mem.getInt32(sp + 8, true); - this.exited = true; - delete this._inst; - delete this._values; - delete this._goRefCounts; - delete this._ids; - delete this._idPool; - this.exit(code); - }, - - // func wasmWrite(fd uintptr, p unsafe.Pointer, n int32) - "runtime.wasmWrite": (sp) => { - sp >>>= 0; - const fd = getInt64(sp + 8); - const p = getInt64(sp + 16); - const n = this.mem.getInt32(sp + 24, true); - fs.writeSync(fd, new Uint8Array(this._inst.exports.mem.buffer, p, n)); - }, - - // func resetMemoryDataView() - "runtime.resetMemoryDataView": (sp) => { - sp >>>= 0; - this.mem = new DataView(this._inst.exports.mem.buffer); - }, - - // func nanotime1() int64 - "runtime.nanotime1": (sp) => { - sp >>>= 0; - setInt64(sp + 8, (timeOrigin + performance.now()) * 1000000); - }, - - // func walltime() (sec int64, nsec int32) - "runtime.walltime": (sp) => { - sp >>>= 0; - const msec = (new Date).getTime(); - setInt64(sp + 8, msec / 1000); - this.mem.setInt32(sp + 16, (msec % 1000) * 1000000, true); - }, - - // func scheduleTimeoutEvent(delay int64) int32 - "runtime.scheduleTimeoutEvent": (sp) => { - sp >>>= 0; - const id = this._nextCallbackTimeoutID; - this._nextCallbackTimeoutID++; - this._scheduledTimeouts.set(id, setTimeout( - () => { - this._resume(); - while (this._scheduledTimeouts.has(id)) { - // for some reason Go failed to register the timeout event, log and try again - // (temporary workaround for https://github.com/golang/go/issues/28975) - console.warn("scheduleTimeoutEvent: missed timeout event"); - this._resume(); - } - }, - getInt64(sp + 8) + 1, // setTimeout has been seen to fire up to 1 millisecond early - )); - this.mem.setInt32(sp + 16, id, true); - }, - - // func clearTimeoutEvent(id int32) - "runtime.clearTimeoutEvent": (sp) => { - sp >>>= 0; - const id = this.mem.getInt32(sp + 8, true); - clearTimeout(this._scheduledTimeouts.get(id)); - this._scheduledTimeouts.delete(id); - }, - - // func getRandomData(r []byte) - "runtime.getRandomData": (sp) => { - sp >>>= 0; - crypto.getRandomValues(loadSlice(sp + 8)); - }, - - // func finalizeRef(v ref) - "syscall/js.finalizeRef": (sp) => { - sp >>>= 0; - const id = this.mem.getUint32(sp + 8, true); - this._goRefCounts[id]--; - if (this._goRefCounts[id] === 0) { - const v = this._values[id]; - this._values[id] = null; - this._ids.delete(v); - this._idPool.push(id); - } - }, - - // func stringVal(value string) ref - "syscall/js.stringVal": (sp) => { - sp >>>= 0; - storeValue(sp + 24, loadString(sp + 8)); - }, - - // func valueGet(v ref, p string) ref - "syscall/js.valueGet": (sp) => { - sp >>>= 0; - const result = Reflect.get(loadValue(sp + 8), loadString(sp + 16)); - sp = this._inst.exports.getsp() >>> 0; // see comment above - storeValue(sp + 32, result); - }, - - // func valueSet(v ref, p string, x ref) - "syscall/js.valueSet": (sp) => { - sp >>>= 0; - Reflect.set(loadValue(sp + 8), loadString(sp + 16), loadValue(sp + 32)); - }, - - // func valueDelete(v ref, p string) - "syscall/js.valueDelete": (sp) => { - sp >>>= 0; - Reflect.deleteProperty(loadValue(sp + 8), loadString(sp + 16)); - }, - - // func valueIndex(v ref, i int) ref - "syscall/js.valueIndex": (sp) => { - sp >>>= 0; - storeValue(sp + 24, Reflect.get(loadValue(sp + 8), getInt64(sp + 16))); - }, - - // valueSetIndex(v ref, i int, x ref) - "syscall/js.valueSetIndex": (sp) => { - sp >>>= 0; - Reflect.set(loadValue(sp + 8), getInt64(sp + 16), loadValue(sp + 24)); - }, - - // func valueCall(v ref, m string, args []ref) (ref, bool) - "syscall/js.valueCall": (sp) => { - sp >>>= 0; - try { - const v = loadValue(sp + 8); - const m = Reflect.get(v, loadString(sp + 16)); - const args = loadSliceOfValues(sp + 32); - const result = Reflect.apply(m, v, args); - sp = this._inst.exports.getsp() >>> 0; // see comment above - storeValue(sp + 56, result); - this.mem.setUint8(sp + 64, 1); - } catch (err) { - sp = this._inst.exports.getsp() >>> 0; // see comment above - storeValue(sp + 56, err); - this.mem.setUint8(sp + 64, 0); - } - }, - - // func valueInvoke(v ref, args []ref) (ref, bool) - "syscall/js.valueInvoke": (sp) => { - sp >>>= 0; - try { - const v = loadValue(sp + 8); - const args = loadSliceOfValues(sp + 16); - const result = Reflect.apply(v, undefined, args); - sp = this._inst.exports.getsp() >>> 0; // see comment above - storeValue(sp + 40, result); - this.mem.setUint8(sp + 48, 1); - } catch (err) { - sp = this._inst.exports.getsp() >>> 0; // see comment above - storeValue(sp + 40, err); - this.mem.setUint8(sp + 48, 0); - } - }, - - // func valueNew(v ref, args []ref) (ref, bool) - "syscall/js.valueNew": (sp) => { - sp >>>= 0; - try { - const v = loadValue(sp + 8); - const args = loadSliceOfValues(sp + 16); - const result = Reflect.construct(v, args); - sp = this._inst.exports.getsp() >>> 0; // see comment above - storeValue(sp + 40, result); - this.mem.setUint8(sp + 48, 1); - } catch (err) { - sp = this._inst.exports.getsp() >>> 0; // see comment above - storeValue(sp + 40, err); - this.mem.setUint8(sp + 48, 0); - } - }, - - // func valueLength(v ref) int - "syscall/js.valueLength": (sp) => { - sp >>>= 0; - setInt64(sp + 16, parseInt(loadValue(sp + 8).length)); - }, - - // valuePrepareString(v ref) (ref, int) - "syscall/js.valuePrepareString": (sp) => { - sp >>>= 0; - const str = encoder.encode(String(loadValue(sp + 8))); - storeValue(sp + 16, str); - setInt64(sp + 24, str.length); - }, - - // valueLoadString(v ref, b []byte) - "syscall/js.valueLoadString": (sp) => { - sp >>>= 0; - const str = loadValue(sp + 8); - loadSlice(sp + 16).set(str); - }, - - // func valueInstanceOf(v ref, t ref) bool - "syscall/js.valueInstanceOf": (sp) => { - sp >>>= 0; - this.mem.setUint8(sp + 24, (loadValue(sp + 8) instanceof loadValue(sp + 16)) ? 1 : 0); - }, - - // func copyBytesToGo(dst []byte, src ref) (int, bool) - "syscall/js.copyBytesToGo": (sp) => { - sp >>>= 0; - const dst = loadSlice(sp + 8); - const src = loadValue(sp + 32); - if (!(src instanceof Uint8Array || src instanceof Uint8ClampedArray)) { - this.mem.setUint8(sp + 48, 0); - return; - } - const toCopy = src.subarray(0, dst.length); - dst.set(toCopy); - setInt64(sp + 40, toCopy.length); - this.mem.setUint8(sp + 48, 1); - }, - - // func copyBytesToJS(dst ref, src []byte) (int, bool) - "syscall/js.copyBytesToJS": (sp) => { - sp >>>= 0; - const dst = loadValue(sp + 8); - const src = loadSlice(sp + 16); - if (!(dst instanceof Uint8Array || dst instanceof Uint8ClampedArray)) { - this.mem.setUint8(sp + 48, 0); - return; - } - const toCopy = src.subarray(0, dst.length); - dst.set(toCopy); - setInt64(sp + 40, toCopy.length); - this.mem.setUint8(sp + 48, 1); - }, - - "debug": (value) => { - console.log(value); - }, - } - }; - } - - async run(instance) { - if (!(instance instanceof WebAssembly.Instance)) { - throw new Error("Go.run: WebAssembly.Instance expected"); - } - this._inst = instance; - this.mem = new DataView(this._inst.exports.mem.buffer); - this._values = [ // JS values that Go currently has references to, indexed by reference id - NaN, - 0, - null, - true, - false, - globalThis, - this, - ]; - this._goRefCounts = new Array(this._values.length).fill(Infinity); // number of references that Go has to a JS value, indexed by reference id - this._ids = new Map([ // mapping from JS values to reference ids - [0, 1], - [null, 2], - [true, 3], - [false, 4], - [globalThis, 5], - [this, 6], - ]); - this._idPool = []; // unused ids that have been garbage collected - this.exited = false; // whether the Go program has exited - - // Pass command line arguments and environment variables to WebAssembly by writing them to the linear memory. - let offset = 4096; - - const strPtr = (str) => { - const ptr = offset; - const bytes = encoder.encode(str + "\0"); - new Uint8Array(this.mem.buffer, offset, bytes.length).set(bytes); - offset += bytes.length; - if (offset % 8 !== 0) { - offset += 8 - (offset % 8); - } - return ptr; - }; - - const argc = this.argv.length; - - const argvPtrs = []; - this.argv.forEach((arg) => { - argvPtrs.push(strPtr(arg)); - }); - argvPtrs.push(0); - - const keys = Object.keys(this.env).sort(); - keys.forEach((key) => { - argvPtrs.push(strPtr(`${key}=${this.env[key]}`)); - }); - argvPtrs.push(0); - - const argv = offset; - argvPtrs.forEach((ptr) => { - this.mem.setUint32(offset, ptr, true); - this.mem.setUint32(offset + 4, 0, true); - offset += 8; - }); - - // The linker guarantees global data starts from at least wasmMinDataAddr. - // Keep in sync with cmd/link/internal/ld/data.go:wasmMinDataAddr. - const wasmMinDataAddr = 4096 + 8192; - if (offset >= wasmMinDataAddr) { - throw new Error("total length of command line and environment variables exceeds limit"); - } - - this._inst.exports.run(argc, argv); - if (this.exited) { - this._resolveExitPromise(); - } - await this._exitPromise; - } - - _resume() { - if (this.exited) { - throw new Error("Go program has already exited"); - } - this._inst.exports.resume(); - if (this.exited) { - this._resolveExitPromise(); - } - } - - _makeFuncWrapper(id) { - const go = this; - return function () { - const event = { id: id, this: this, args: arguments }; - go._pendingEvent = event; - go._resume(); - return event.result; - }; - } - } -})(); diff --git a/ui/src/proxy.conf.json b/ui/src/proxy.conf.json new file mode 100644 index 00000000000..81345af81b3 --- /dev/null +++ b/ui/src/proxy.conf.json @@ -0,0 +1,6 @@ +{ + "/api/v1": { + "target": "http://localhost:20000", + "secure": false + } +} diff --git a/ui/wasm/go.mod b/ui/wasm/go.mod deleted file mode 100644 index deb0da66fdd..00000000000 --- a/ui/wasm/go.mod +++ /dev/null @@ -1,111 +0,0 @@ -module github.com/feloy/devfile-builder/wasm - -go 1.19 - -require ( - github.com/devfile/api/v2 v2.2.0 - github.com/devfile/library/v2 v2.2.0 - github.com/feloy/devfile-lifecycle v0.0.0-20230515183001-11d088157da2 - github.com/google/go-cmp v0.5.6 - k8s.io/utils v0.0.0-20210722164352-7f3ee0f31471 -) - -require ( - github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect - github.com/Heiko-san/mermaidgen v0.0.0-20190623222458-ec72afae378b // indirect - github.com/Microsoft/go-winio v0.5.1 // indirect - github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 // indirect - github.com/acomagu/bufpipe v1.0.3 // indirect - github.com/beorn7/perks v1.0.1 // indirect - github.com/cespare/xxhash/v2 v2.1.1 // indirect - github.com/containerd/containerd v1.5.9 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/devfile/registry-support/index/generator v0.0.0-20221018203505-df96d34d4273 // indirect - github.com/devfile/registry-support/registry-library v0.0.0-20221018213054-47b3ffaeadba // indirect - github.com/docker/cli v20.10.11+incompatible // indirect - github.com/docker/distribution v2.7.1+incompatible // indirect - github.com/docker/docker v20.10.11+incompatible // indirect - github.com/docker/docker-credential-helpers v0.6.4 // indirect - github.com/docker/go-connections v0.4.0 // indirect - github.com/docker/go-metrics v0.0.1 // indirect - github.com/docker/go-units v0.4.0 // indirect - github.com/emirpasic/gods v1.12.0 // indirect - github.com/evanphx/json-patch v4.11.0+incompatible // indirect - github.com/fatih/color v1.7.0 // indirect - github.com/fsnotify/fsnotify v1.4.9 // indirect - github.com/go-git/gcfg v1.5.0 // indirect - github.com/go-git/go-billy/v5 v5.3.1 // indirect - github.com/go-git/go-git/v5 v5.4.2 // indirect - github.com/go-logr/logr v0.4.0 // indirect - github.com/gobwas/glob v0.2.3 // indirect - github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/mock v1.6.0 // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/google/btree v1.0.0 // indirect - github.com/google/gofuzz v1.2.0 // indirect - github.com/googleapis/gnostic v0.5.5 // indirect - github.com/gorilla/mux v1.8.0 // indirect - github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect - github.com/hashicorp/errwrap v1.0.0 // indirect - github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/go-version v1.4.0 // indirect - github.com/imdario/mergo v0.3.12 // indirect - github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect - github.com/json-iterator/go v1.1.11 // indirect - github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 // indirect - github.com/klauspost/compress v1.13.6 // indirect - github.com/mattn/go-colorable v0.1.2 // indirect - github.com/mattn/go-isatty v0.0.12 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect - github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/mitchellh/reflectwalk v1.0.1 // indirect - github.com/moby/locker v1.0.1 // indirect - github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.1 // indirect - github.com/morikuni/aec v1.0.0 // indirect - github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.0.2 // indirect - github.com/openshift/api v0.0.0-20200930075302-db52bc4ef99f // indirect - github.com/peterbourgon/diskv v2.0.1+incompatible // indirect - github.com/pkg/errors v0.9.1 // indirect - github.com/prometheus/client_golang v1.11.0 // indirect - github.com/prometheus/client_model v0.2.0 // indirect - github.com/prometheus/common v0.26.0 // indirect - github.com/prometheus/procfs v0.6.0 // indirect - github.com/sergi/go-diff v1.1.0 // indirect - github.com/sirupsen/logrus v1.8.1 // indirect - github.com/spf13/afero v1.6.0 // indirect - github.com/spf13/pflag v1.0.5 // indirect - github.com/xanzy/ssh-agent v0.3.0 // indirect - github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect - github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect - github.com/xeipuuv/gojsonschema v1.2.0 // indirect - golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871 // indirect - golang.org/x/net v0.0.0-20220107192237-5cfca573fb4d // indirect - golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602 // indirect - golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect - golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e // indirect - golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d // indirect - golang.org/x/text v0.3.7 // indirect - golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368 // indirect - google.golang.org/grpc v1.43.0 // indirect - google.golang.org/protobuf v1.27.1 // indirect - gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/warnings.v0 v0.1.2 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.21.3 // indirect - k8s.io/apiextensions-apiserver v0.21.3 // indirect - k8s.io/apimachinery v0.21.3 // indirect - k8s.io/client-go v0.21.3 // indirect - k8s.io/klog v1.0.0 // indirect - k8s.io/klog/v2 v2.8.0 // indirect - k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 // indirect - oras.land/oras-go v1.1.0 // indirect - sigs.k8s.io/controller-runtime v0.9.5 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.1.2 // indirect - sigs.k8s.io/yaml v1.2.0 // indirect -) diff --git a/ui/wasm/go.sum b/ui/wasm/go.sum deleted file mode 100644 index 962eca35959..00000000000 --- a/ui/wasm/go.sum +++ /dev/null @@ -1,1510 +0,0 @@ -bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v56.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= -github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= -github.com/Azure/go-autorest/autorest v0.11.20/go.mod h1:o3tqFY+QR40VOlk+pV4d77mORO64jOXSgEnPQgLK6JY= -github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= -github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= -github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= -github.com/Azure/go-autorest/autorest/adal v0.9.15/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A= -github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= -github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/Heiko-san/mermaidgen v0.0.0-20190623222458-ec72afae378b h1:vMD9lRM0RSgyh32p9nu7guZq99XBo+m2LTTgih1Jkyc= -github.com/Heiko-san/mermaidgen v0.0.0-20190623222458-ec72afae378b/go.mod h1:wsRHegZ2aKrPucCK/XTSCe3stHT09+U8aAM5AZ0XdKY= -github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= -github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= -github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= -github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= -github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= -github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY= -github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= -github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= -github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= -github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= -github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= -github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= -github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= -github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= -github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= -github.com/Microsoft/hcsshim v0.9.1 h1:VfDCj+QnY19ktX5TsH22JHcjaZ05RWQiwDbOyEg5ziM= -github.com/Microsoft/hcsshim v0.9.1/go.mod h1:Y/0uV2jUab5kBI7SQgl62at0AVX7uaruzADAVmxm3eM= -github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= -github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 h1:YoJbenK9C67SkzkDfmQuVln04ygHj3vjZfd9FL+GmQQ= -github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= -github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs= -github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= -github.com/acomagu/bufpipe v1.0.3 h1:fxAGrHZTgQ9w5QqVItgzwj235/uYZYgbXitB+dLupOk= -github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= -github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= -github.com/aws/aws-sdk-go v1.34.9/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= -github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= -github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= -github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= -github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= -github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= -github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= -github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= -github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd h1:rFt+Y/IK1aEZkEHchZRSq9OQbsSzIT/OrI8YFFmRIng= -github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= -github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b h1:otBG+dV+YK+Soembjv71DPz3uX/V/6MMlSyD9JBQ6kQ= -github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= -github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 h1:nvj0OLI3YqYXer/kZD8Ri1aaunCxIEsOst1BVJswV0o= -github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= -github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= -github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= -github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= -github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= -github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= -github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= -github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= -github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= -github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= -github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= -github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= -github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= -github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= -github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= -github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= -github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= -github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= -github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= -github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= -github.com/containerd/cgroups v1.0.2 h1:mZBclaSgNDfPWtfhj2xJY28LZ9nYIgzB0pwSURPl6JM= -github.com/containerd/cgroups v1.0.2/go.mod h1:qpbpJ1jmlqsR9f2IyaLPsdkCdnt0rbDVqIDlhuu5tRY= -github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= -github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= -github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= -github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= -github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= -github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= -github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= -github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= -github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= -github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= -github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= -github.com/containerd/containerd v1.5.9 h1:rs6Xg1gtIxaeyG+Smsb/0xaSDu1VgFhOCKBXxMxbsF4= -github.com/containerd/containerd v1.5.9/go.mod h1:fvQqCfadDGga5HZyn3j4+dx56qj2I9YwBrlSdalvJYQ= -github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= -github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= -github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= -github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= -github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= -github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= -github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= -github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= -github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= -github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= -github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= -github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= -github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= -github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= -github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= -github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= -github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= -github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= -github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= -github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= -github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= -github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= -github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= -github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= -github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= -github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= -github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= -github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= -github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= -github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= -github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= -github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= -github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= -github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= -github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= -github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= -github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y= -github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= -github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= -github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= -github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= -github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= -github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= -github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= -github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= -github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= -github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= -github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= -github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= -github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= -github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= -github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= -github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= -github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= -github.com/danieljoos/wincred v1.1.0/go.mod h1:XYlo+eRTsVA9aHGp7NGjFkPla4m+DCL7hqDjlFjiygg= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= -github.com/devfile/api/v2 v2.0.0-20211021164004-dabee4e633ed/go.mod h1:d99eTN6QxgzihOOFyOZA+VpUyD4Q1pYRYHZ/ci9J96Q= -github.com/devfile/api/v2 v2.0.0-20220117162434-6e6e6a8bc14c/go.mod h1:d99eTN6QxgzihOOFyOZA+VpUyD4Q1pYRYHZ/ci9J96Q= -github.com/devfile/api/v2 v2.2.0 h1:3Mwl/dtT508oU4pNt/v4G8vqvjoZqi9LOInXCNwKMoc= -github.com/devfile/api/v2 v2.2.0/go.mod h1:dN7xFrOVG+iPqn4UKGibXLd5oVsdE8XyK9OEb5JL3aI= -github.com/devfile/library v1.2.1-0.20211104222135-49d635cb492f/go.mod h1:uFZZdTuRqA68FVe/JoJHP92CgINyQkyWnM2Qyiim+50= -github.com/devfile/library v1.2.1-0.20220308191614-f0f7e11b17de/go.mod h1:GSPfJaBg0+bBjBHbwBE5aerJLH6tWGQu2q2rHYd9czM= -github.com/devfile/library/v2 v2.0.1/go.mod h1:paJ0PARAVy0br13VpBEQ4fO3rZVDxWtooQ29+23PNBk= -github.com/devfile/library/v2 v2.2.0 h1:M12ui+ONi4oU0ynJwhWvPs7jaNX6KsUI5gBRyAVQQCs= -github.com/devfile/library/v2 v2.2.0/go.mod h1:k0dGRv2oJtQkQaCcNSwku2OBM5Xu6fGl1Jsi2fsu/Ps= -github.com/devfile/registry-support/index/generator v0.0.0-20220222194908-7a90a4214f3e/go.mod h1:iRPBxs+ZjfLEduVXpCCIOzdD2588Zv9OCs/CcXMcCCY= -github.com/devfile/registry-support/index/generator v0.0.0-20220527155645-8328a8a883be/go.mod h1:1fyDJL+fPHtcrYA6yjSVWeLmXmjCNth0d5Rq1rvtryc= -github.com/devfile/registry-support/index/generator v0.0.0-20221018203505-df96d34d4273 h1:DXENQSRTEDsk9com38njPg5511DD12HPIgzyFUErnpM= -github.com/devfile/registry-support/index/generator v0.0.0-20221018203505-df96d34d4273/go.mod h1:ZJnaSLjTKCvGJhWmYgQoQ1O3g78qBe4Va6ZugLmi4dE= -github.com/devfile/registry-support/registry-library v0.0.0-20220627163229-4aa39fcb0c0a/go.mod h1:kmEjH5oO465vh36kcYdZLYeG8edVD6N/ZgzyLs1x7qs= -github.com/devfile/registry-support/registry-library v0.0.0-20221018213054-47b3ffaeadba h1:7Ag9guD3qOSzg3PcbSMqTVHDjqZojqb5JoIIonjRjDQ= -github.com/devfile/registry-support/registry-library v0.0.0-20221018213054-47b3ffaeadba/go.mod h1:NOtmnbozFn15w/DPD/Urc+KDlNRP4JH5m+KC5GZoAWA= -github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/distribution/distribution/v3 v3.0.0-20211118083504-a29a3c99a684 h1:DBZ2sN7CK6dgvHVpQsQj4sRMCbWTmd17l+5SUCjnQSY= -github.com/distribution/distribution/v3 v3.0.0-20211118083504-a29a3c99a684/go.mod h1:UfCu3YXJJCI+IdnqGgYP82dk2+Joxmv+mUTVBES6wac= -github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= -github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/cli v20.10.11+incompatible h1:tXU1ezXcruZQRrMP8RN2z9N91h+6egZTS1gsPsKantc= -github.com/docker/cli v20.10.11+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= -github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= -github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v20.10.11+incompatible h1:OqzI/g/W54LczvhnccGqniFoQghHx3pklbLuhfXpqGo= -github.com/docker/docker v20.10.11+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= -github.com/docker/docker-credential-helpers v0.6.4 h1:axCks+yV+2MR3/kZhAmy07yC56WZ2Pwu/fKWtKuZB0o= -github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c= -github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= -github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= -github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= -github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= -github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= -github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= -github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= -github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1 h1:ZClxb8laGDf5arXfYcAtECDFgAgHklGI8CxgjHnXKJ4= -github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= -github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg= -github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= -github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.11.0+incompatible h1:glyUF9yIYtMHzn8xaKw5rMhdWcwsYV8dZHIq5567/xs= -github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ= -github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/feloy/devfile-lifecycle v0.0.0-20230313085338-2c24a8e5b6b1 h1:WnvGh0aavOMsFz3uNIzdiNUcc16r7sU5Slvn4Qux7x4= -github.com/feloy/devfile-lifecycle v0.0.0-20230313085338-2c24a8e5b6b1/go.mod h1:iATIK0fjTcnDmYCKfefNd7Fiw0TEn3V0Q4nmNNuYvvY= -github.com/feloy/devfile-lifecycle v0.0.0-20230515183001-11d088157da2 h1:yu3RkIH8lmDrtC0HaTEc5weAWcyCu0TOB3Gxo8jqNxw= -github.com/feloy/devfile-lifecycle v0.0.0-20230515183001-11d088157da2/go.mod h1:iATIK0fjTcnDmYCKfefNd7Fiw0TEn3V0Q4nmNNuYvvY= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= -github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= -github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0= -github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= -github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= -github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= -github.com/go-git/go-billy/v5 v5.2.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-billy/v5 v5.3.1 h1:CPiOUAzKtMRvolEKw+bG1PLRpT7D3LIs3/3ey4Aiu34= -github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-git-fixtures/v4 v4.2.1 h1:n9gGL1Ct/yIw+nfsfr8s4+sbhT+Ncu2SubfXjIWgci8= -github.com/go-git/go-git-fixtures/v4 v4.2.1/go.mod h1:K8zd3kDUAykwTdDCr+I0per6Y6vMiRR/nnVTBtavnB0= -github.com/go-git/go-git/v5 v5.4.2 h1:BXyZu9t0VkbiHtqrsvdq39UDhGJTl1h55VW6CSC4aY4= -github.com/go-git/go-git/v5 v5.4.2/go.mod h1:gQ1kArt6d+n+BGd+/B/I74HwRTLhth2+zti4ihgckDc= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= -github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/zapr v0.4.0 h1:uc1uML3hRYL9/ZZPdgHS/n8Nzo+eaYL/Efxkkamf7OM= -github.com/go-logr/zapr v0.4.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= -github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= -github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= -github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= -github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/spec v0.19.5/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= -github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= -github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= -github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= -github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= -github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= -github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= -github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k= -github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= -github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= -github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= -github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= -github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= -github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= -github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= -github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= -github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.3.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.4.0 h1:aAQzgqIrRKRa7w75CKpbBxYsmUoPjzVm1W59ca1L0J4= -github.com/hashicorp/go-version v1.4.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= -github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= -github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= -github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= -github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= -github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 h1:DowS9hvgyYSX4TO5NpyC606/Z4SxnNYbT+WX27or6Ck= -github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc= -github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4= -github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= -github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= -github.com/lucasjones/reggen v0.0.0-20200904144131-37ba4fa293bb/go.mod h1:5ELEyG+X8f+meRWHuqUOewBOhvHkl7M76pdGEansxW4= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= -github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= -github.com/matryer/is v1.2.0 h1:92UTHpy8CDwaJ08GqLDzhhuixiBUUD1p3AU6PHddz4A= -github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= -github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= -github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= -github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE= -github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= -github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= -github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= -github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= -github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= -github.com/moby/sys/mountinfo v0.5.0 h1:2Ks8/r6lopsxWi9m58nlwjaeSzUX9iiL1vj5qB/9ObI= -github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= -github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= -github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= -github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 h1:rzf0wL0CHVc8CEsgyygG0Mn9CNCCPZqOPaz8RiiHYQk= -github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= -github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nsf/jsondiff v0.0.0-20210926074059-1e845ec5d249/go.mod h1:mpRZBD8SJ55OIICQ3iWH0Yz3cjzA61JdqMLoWXeB2+8= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= -github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= -github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= -github.com/onsi/gomega v1.14.0 h1:ep6kpPVwmr/nTbklSx2nrLNSIO62DoYAhnPNIMhK8gI= -github.com/onsi/gomega v1.14.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= -github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= -github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= -github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= -github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= -github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= -github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= -github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= -github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= -github.com/openshift/api v0.0.0-20200930075302-db52bc4ef99f h1:/msM59v15x4DaAZeJnQwkVsCGTEa1mx+nSSMehZVAHs= -github.com/openshift/api v0.0.0-20200930075302-db52bc4ef99f/go.mod h1:Si/I9UGeRR3qzg01YWPmtlr0GeGk2fnuggXJRmjAZ6U= -github.com/openshift/build-machinery-go v0.0.0-20200819073603-48aa266c95f7/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= -github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2 h1:JhzVVoYvbOACxoUmOs6V/G4D5nPVUW73rKvXxP4XUJc= -github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= -github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= -github.com/santhosh-tekuri/jsonschema v1.2.4/go.mod h1:TEAUOeZSmIxTTuHatJzrvARHiuO9LYd+cIxzgEHCQI4= -github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= -github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= -github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= -github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= -github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw= -github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= -github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= -github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= -github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= -github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= -github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= -github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= -github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= -github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= -github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= -github.com/xanzy/ssh-agent v0.3.0 h1:wUMzuKtKilRgBAD1sUb8gOwwRr2FGoBVumcjoOACClI= -github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= -github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= -github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 h1:+lm10QQTNSBd8DVTNGHx7o/IKu9HYDvLMffDhbyLccI= -github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= -github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50 h1:hlE8//ciYMztlGpl/VA+Zm1AcTPHYkHJPbHqE6WJUXE= -github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= -github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f h1:ERexzlUfuTvpE74urLSbIQW0Z/6hF9t8U4NsJLaioAY= -github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= -go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= -go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= -go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= -go.uber.org/zap v1.18.1 h1:CSUJ2mjFszzEWt4CdKISEuChVIXGBn3lAPwkRGyVrc4= -go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= -golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871 h1:/pEO3GD/ABYAjuakUS6xSEmmlyVS4kxBNkeA9tLJiTI= -golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220107192237-5cfca573fb4d h1:62NvYBuaanGXR2ZOfwDFkhhl6X1DUgf8qg3GuQvxZsE= -golang.org/x/net v0.0.0-20220107192237-5cfca573fb4d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602 h1:0Ja1LBD+yisY6RWM/BH7TJVXWsSjs2VwBSmvSX4HdBc= -golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210502180810-71e4cd670f79/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE= -golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= -golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= -google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368 h1:Et6SkiuvnBn+SgrSYXs/BrUpGB4mbdwt4R3vaPIlicA= -google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.43.0 h1:Eeu7bZtDZ2DpRCsLhUlcrLnvYaMK1Gz86a+hMVvELmM= -google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= -gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= -gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= -gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= -gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= -gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= -gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.19.0/go.mod h1:I1K45XlvTrDjmj5LoM5LuP/KYrhWbjUKT/SoPG0qTjw= -k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= -k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= -k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= -k8s.io/api v0.21.3 h1:cblWILbLO8ar+Fj6xdDGr603HRsf8Wu9E9rngJeprZQ= -k8s.io/api v0.21.3/go.mod h1:hUgeYHUbBp23Ue4qdX9tR8/ANi/g3ehylAqDn9NWVOg= -k8s.io/apiextensions-apiserver v0.21.3 h1:+B6biyUWpqt41kz5x6peIsljlsuwvNAp/oFax/j2/aY= -k8s.io/apiextensions-apiserver v0.21.3/go.mod h1:kl6dap3Gd45+21Jnh6utCx8Z2xxLm8LGDkprcd+KbsE= -k8s.io/apimachinery v0.19.0/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= -k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= -k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= -k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= -k8s.io/apimachinery v0.21.3 h1:3Ju4nvjCngxxMYby0BimUk+pQHPOQp3eCGChk5kfVII= -k8s.io/apimachinery v0.21.3/go.mod h1:H/IM+5vH9kZRNJ4l3x/fXP/5bOPJaVP/guptnZPeCFI= -k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= -k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= -k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= -k8s.io/apiserver v0.21.3/go.mod h1:eDPWlZG6/cCCMj/JBcEpDoK+I+6i3r9GsChYBHSbAzU= -k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= -k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= -k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= -k8s.io/client-go v0.21.3 h1:J9nxZTOmvkInRDCzcSNQmPJbDYN/PjlxXT9Mos3HcLg= -k8s.io/client-go v0.21.3/go.mod h1:+VPhCgTsaFmGILxR/7E1N0S+ryO010QBeNCv5JwRGYU= -k8s.io/code-generator v0.19.0/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= -k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= -k8s.io/code-generator v0.21.3/go.mod h1:K3y0Bv9Cz2cOW2vXUrNZlFbflhuPvuadW6JdnN6gGKo= -k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= -k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= -k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= -k8s.io/component-base v0.21.3/go.mod h1:kkuhtfEHeZM6LkX0saqSK8PbdO7A0HigUngmhhrwfGQ= -k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= -k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= -k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= -k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= -k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= -k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts= -k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= -k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= -k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= -k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 h1:vEx13qjvaZ4yfObSSXW7BrMc/KQBBT/Jyee8XtLf4x0= -k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= -k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= -k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210722164352-7f3ee0f31471 h1:DnzUXII7sVg1FJ/4JX6YDRJfLNAC7idRatPwe07suiI= -k8s.io/utils v0.0.0-20210722164352-7f3ee0f31471/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -oras.land/oras-go v1.1.0 h1:tfWM1RT7PzUwWphqHU6ptPU3ZhwVnSw/9nEGf519rYg= -oras.land/oras-go v1.1.0/go.mod h1:1A7vR/0KknT2UkJVWh+xMi95I/AhK8ZrxrnUSmXN0bQ= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.19/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/controller-runtime v0.9.5 h1:WThcFE6cqctTn2jCZprLICO6BaKZfhsT37uAapTNfxc= -sigs.k8s.io/controller-runtime v0.9.5/go.mod h1:q6PpkM5vqQubEKUKOM6qr06oXGzOBcCby1DA9FbyZeA= -sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno= -sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/ui/wasm/main.go b/ui/wasm/main.go deleted file mode 100644 index 947cce2eb1f..00000000000 --- a/ui/wasm/main.go +++ /dev/null @@ -1,44 +0,0 @@ -package main - -import ( - "syscall/js" - - "github.com/feloy/devfile-builder/wasm/pkg/exports" -) - -func setFreshDevfile() { - content := `schemaVersion: 2.2.0` - exports.SetDevfileContent(content) -} - -func main() { - setFreshDevfile() - - js.Global().Set("addContainer", js.FuncOf(exports.AddContainerWrapper)) - js.Global().Set("addImage", js.FuncOf(exports.AddImageWrapper)) - js.Global().Set("addResource", js.FuncOf(exports.AddResourceWrapper)) - - js.Global().Set("addExecCommand", js.FuncOf(exports.AddExecCommandWrapper)) - js.Global().Set("addApplyCommand", js.FuncOf(exports.AddApplyCommandWrapper)) - js.Global().Set("addCompositeCommand", js.FuncOf(exports.AddCompositeCommandWrapper)) - - js.Global().Set("getFlowChart", js.FuncOf(exports.GetFlowChartWrapper)) - js.Global().Set("setDevfileContent", js.FuncOf(exports.SetDevfileContentWrapper)) - - js.Global().Set("setMetadata", js.FuncOf(exports.SetMetadataWrapper)) - - js.Global().Set("moveCommand", js.FuncOf(exports.MoveCommandWrapper)) - - js.Global().Set("setDefaultCommand", js.FuncOf(exports.SetDefaultCommandWrapper)) - js.Global().Set("unsetDefaultCommand", js.FuncOf(exports.UnsetDefaultCommandWrapper)) - - js.Global().Set("deleteCommand", js.FuncOf(exports.DeleteCommandWrapper)) - js.Global().Set("deleteContainer", js.FuncOf(exports.DeleteContainerWrapper)) - js.Global().Set("deleteImage", js.FuncOf(exports.DeleteImageWrapper)) - js.Global().Set("deleteResource", js.FuncOf(exports.DeleteResourceWrapper)) - - js.Global().Set("updateEvents", js.FuncOf(exports.UpdateEventsWrapper)) - - js.Global().Set("isQuantityValid", js.FuncOf(exports.IsQuantityValidWrapper)) - <-make(chan bool) -} diff --git a/ui/wasm/pkg/exports/addApplyCommand.go b/ui/wasm/pkg/exports/addApplyCommand.go deleted file mode 100644 index d15668dd4af..00000000000 --- a/ui/wasm/pkg/exports/addApplyCommand.go +++ /dev/null @@ -1,31 +0,0 @@ -package exports - -import ( - "syscall/js" - - "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" - "github.com/feloy/devfile-builder/wasm/pkg/global" - "github.com/feloy/devfile-builder/wasm/pkg/utils" -) - -func AddApplyCommandWrapper(this js.Value, args []js.Value) interface{} { - return result( - addApplyCommand(args[0].String(), args[1].String()), - ) -} - -func addApplyCommand(name string, component string) (map[string]interface{}, error) { - command := v1alpha2.Command{ - Id: name, - CommandUnion: v1alpha2.CommandUnion{ - Apply: &v1alpha2.ApplyCommand{ - Component: component, - }, - }, - } - err := global.Devfile.Data.AddCommands([]v1alpha2.Command{command}) - if err != nil { - return nil, err - } - return utils.GetContent() -} diff --git a/ui/wasm/pkg/exports/addCompositeCommand.go b/ui/wasm/pkg/exports/addCompositeCommand.go deleted file mode 100644 index 21a7f69738c..00000000000 --- a/ui/wasm/pkg/exports/addCompositeCommand.go +++ /dev/null @@ -1,32 +0,0 @@ -package exports - -import ( - "syscall/js" - - "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" - "github.com/feloy/devfile-builder/wasm/pkg/global" - "github.com/feloy/devfile-builder/wasm/pkg/utils" -) - -func AddCompositeCommandWrapper(this js.Value, args []js.Value) interface{} { - return result( - addCompositeCommand(args[0].String(), args[1].Bool(), getStringArray(args[2])), - ) -} - -func addCompositeCommand(name string, parallel bool, commands []string) (map[string]interface{}, error) { - command := v1alpha2.Command{ - Id: name, - CommandUnion: v1alpha2.CommandUnion{ - Composite: &v1alpha2.CompositeCommand{ - Parallel: ¶llel, - Commands: commands, - }, - }, - } - err := global.Devfile.Data.AddCommands([]v1alpha2.Command{command}) - if err != nil { - return nil, err - } - return utils.GetContent() -} diff --git a/ui/wasm/pkg/exports/addContainer.go b/ui/wasm/pkg/exports/addContainer.go deleted file mode 100644 index 6f3cc0a4d60..00000000000 --- a/ui/wasm/pkg/exports/addContainer.go +++ /dev/null @@ -1,41 +0,0 @@ -package exports - -import ( - "syscall/js" - - "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" - "github.com/feloy/devfile-builder/wasm/pkg/global" - "github.com/feloy/devfile-builder/wasm/pkg/utils" -) - -func AddContainerWrapper(this js.Value, args []js.Value) interface{} { - command := getStringArray(args[2]) - arg := getStringArray(args[3]) - return result( - addContainer(args[0].String(), args[1].String(), command, arg, args[4].String(), args[5].String(), args[6].String(), args[7].String()), - ) -} - -func addContainer(name string, image string, command []string, args []string, memRequest string, memLimit string, cpuRequest string, cpuLimit string) (map[string]interface{}, error) { - container := v1alpha2.Component{ - Name: name, - ComponentUnion: v1alpha2.ComponentUnion{ - Container: &v1alpha2.ContainerComponent{ - Container: v1alpha2.Container{ - Image: image, - Command: command, - Args: args, - MemoryRequest: memRequest, - MemoryLimit: memLimit, - CpuRequest: cpuRequest, - CpuLimit: cpuLimit, - }, - }, - }, - } - err := global.Devfile.Data.AddComponents([]v1alpha2.Component{container}) - if err != nil { - return nil, err - } - return utils.GetContent() -} diff --git a/ui/wasm/pkg/exports/addExecCommand.go b/ui/wasm/pkg/exports/addExecCommand.go deleted file mode 100644 index 7e4ca03fe0e..00000000000 --- a/ui/wasm/pkg/exports/addExecCommand.go +++ /dev/null @@ -1,34 +0,0 @@ -package exports - -import ( - "syscall/js" - - "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" - "github.com/feloy/devfile-builder/wasm/pkg/global" - "github.com/feloy/devfile-builder/wasm/pkg/utils" -) - -func AddExecCommandWrapper(this js.Value, args []js.Value) interface{} { - return result( - addExecCommand(args[0].String(), args[1].String(), args[2].String(), args[3].String(), args[4].Bool()), - ) -} - -func addExecCommand(name string, component string, commandLine string, workingDir string, hotReloadCapable bool) (map[string]interface{}, error) { - command := v1alpha2.Command{ - Id: name, - CommandUnion: v1alpha2.CommandUnion{ - Exec: &v1alpha2.ExecCommand{ - Component: component, - CommandLine: commandLine, - WorkingDir: workingDir, - HotReloadCapable: &hotReloadCapable, - }, - }, - } - err := global.Devfile.Data.AddCommands([]v1alpha2.Command{command}) - if err != nil { - return nil, err - } - return utils.GetContent() -} diff --git a/ui/wasm/pkg/exports/addImage.go b/ui/wasm/pkg/exports/addImage.go deleted file mode 100644 index ba853ad605c..00000000000 --- a/ui/wasm/pkg/exports/addImage.go +++ /dev/null @@ -1,46 +0,0 @@ -package exports - -import ( - "syscall/js" - - "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" - "github.com/feloy/devfile-builder/wasm/pkg/global" - "github.com/feloy/devfile-builder/wasm/pkg/utils" -) - -func AddImageWrapper(this js.Value, args []js.Value) interface{} { - arg := getStringArray(args[2]) - return result( - addImage(args[0].String(), args[1].String(), arg, args[3].String(), args[4].Bool(), args[5].String()), - ) -} - -func addImage(name string, imageName string, args []string, buildContext string, rootRequired bool, uri string) (map[string]interface{}, error) { - container := v1alpha2.Component{ - Name: name, - ComponentUnion: v1alpha2.ComponentUnion{ - Image: &v1alpha2.ImageComponent{ - Image: v1alpha2.Image{ - ImageName: imageName, - ImageUnion: v1alpha2.ImageUnion{ - Dockerfile: &v1alpha2.DockerfileImage{ - Dockerfile: v1alpha2.Dockerfile{ - Args: args, - BuildContext: buildContext, - RootRequired: &rootRequired, - }, - DockerfileSrc: v1alpha2.DockerfileSrc{ - Uri: uri, - }, - }, - }, - }, - }, - }, - } - err := global.Devfile.Data.AddComponents([]v1alpha2.Component{container}) - if err != nil { - return nil, err - } - return utils.GetContent() -} diff --git a/ui/wasm/pkg/exports/addResource.go b/ui/wasm/pkg/exports/addResource.go deleted file mode 100644 index 5f19251a64e..00000000000 --- a/ui/wasm/pkg/exports/addResource.go +++ /dev/null @@ -1,40 +0,0 @@ -package exports - -import ( - "errors" - "syscall/js" - - "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" - "github.com/feloy/devfile-builder/wasm/pkg/global" - "github.com/feloy/devfile-builder/wasm/pkg/utils" -) - -func AddResourceWrapper(this js.Value, args []js.Value) interface{} { - return result( - addResource(args[0].String(), args[1].String(), args[2].String()), - ) -} - -func addResource(name string, inlined string, uri string) (map[string]interface{}, error) { - if inlined != "" && uri != "" { - return nil, errors.New("both inlined and uri cannot be set at the same time") - } - container := v1alpha2.Component{ - Name: name, - ComponentUnion: v1alpha2.ComponentUnion{ - Kubernetes: &v1alpha2.KubernetesComponent{ - K8sLikeComponent: v1alpha2.K8sLikeComponent{ - K8sLikeComponentLocation: v1alpha2.K8sLikeComponentLocation{ - Inlined: inlined, - Uri: uri, - }, - }, - }, - }, - } - err := global.Devfile.Data.AddComponents([]v1alpha2.Component{container}) - if err != nil { - return nil, err - } - return utils.GetContent() -} diff --git a/ui/wasm/pkg/exports/addUserCommand.go b/ui/wasm/pkg/exports/addUserCommand.go deleted file mode 100644 index fa2094248dc..00000000000 --- a/ui/wasm/pkg/exports/addUserCommand.go +++ /dev/null @@ -1,32 +0,0 @@ -package exports - -import ( - "syscall/js" - - "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" - "github.com/feloy/devfile-builder/wasm/pkg/global" - "github.com/feloy/devfile-builder/wasm/pkg/utils" -) - -func AddUserCommandWrapper(this js.Value, args []js.Value) interface{} { - return result( - addUserCommand(args[0].String(), args[1].String(), args[2].String()), - ) -} - -func addUserCommand(component string, name string, commandLine string) (map[string]interface{}, error) { - command := v1alpha2.Command{ - Id: name, - CommandUnion: v1alpha2.CommandUnion{ - Exec: &v1alpha2.ExecCommand{ - CommandLine: commandLine, - Component: component, - }, - }, - } - err := global.Devfile.Data.AddCommands([]v1alpha2.Command{command}) - if err != nil { - return nil, err - } - return utils.GetContent() -} diff --git a/ui/wasm/pkg/exports/arrays.go b/ui/wasm/pkg/exports/arrays.go deleted file mode 100644 index 891814785a6..00000000000 --- a/ui/wasm/pkg/exports/arrays.go +++ /dev/null @@ -1,41 +0,0 @@ -package exports - -import "syscall/js" - -type userCommand struct { - Name string - Group string - Default bool - CommandLine string - HotReloadCapable bool - WorkingDir string -} - -func getStringArray(value js.Value) []string { - l := value.Length() - result := make([]string, 0, l) - for i := 0; i < l; i++ { - s := value.Index(i).String() - if len(s) > 0 { - result = append(result, s) - } - } - return result -} - -func getUserCommandArray(value js.Value) []userCommand { - l := value.Length() - result := make([]userCommand, 0, l) - for i := 0; i < l; i++ { - v := value.Index(i) - result = append(result, userCommand{ - Name: v.Get("name").String(), - Group: v.Get("group").String(), - Default: v.Get("default").Bool(), - CommandLine: v.Get("commandLine").String(), - HotReloadCapable: v.Get("hotReloadCapable").Bool(), - WorkingDir: v.Get("workingDir").String(), - }) - } - return result -} diff --git a/ui/wasm/pkg/exports/deleteCommand.go b/ui/wasm/pkg/exports/deleteCommand.go deleted file mode 100644 index 8ad929739fc..00000000000 --- a/ui/wasm/pkg/exports/deleteCommand.go +++ /dev/null @@ -1,52 +0,0 @@ -package exports - -import ( - "fmt" - "syscall/js" - - "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" - "github.com/devfile/library/v2/pkg/devfile/parser/data/v2/common" - - "github.com/feloy/devfile-builder/wasm/pkg/global" - "github.com/feloy/devfile-builder/wasm/pkg/utils" -) - -func DeleteCommandWrapper(this js.Value, args []js.Value) interface{} { - return result( - deleteCommand(args[0].String()), - ) -} - -func deleteCommand(name string) (map[string]interface{}, error) { - - err := checkCommandUsed(name) - if err != nil { - return nil, fmt.Errorf("error deleting command %q: %w", name, err) - } - err = global.Devfile.Data.DeleteCommand(name) - if err != nil { - return nil, err - } - return utils.GetContent() -} - -func checkCommandUsed(name string) error { - commands, err := global.Devfile.Data.GetCommands(common.DevfileOptions{ - CommandOptions: common.CommandOptions{ - CommandType: v1alpha2.CompositeCommandType, - }, - }) - if err != nil { - return err - } - - for _, command := range commands { - for _, subcommand := range command.Composite.Commands { - if subcommand == name { - return fmt.Errorf("command %q is used by composite command %q", name, command.Id) - } - } - } - - return nil -} diff --git a/ui/wasm/pkg/exports/deleteContainer.go b/ui/wasm/pkg/exports/deleteContainer.go deleted file mode 100644 index 8a4264c8f71..00000000000 --- a/ui/wasm/pkg/exports/deleteContainer.go +++ /dev/null @@ -1,50 +0,0 @@ -package exports - -import ( - "fmt" - "syscall/js" - - "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" - "github.com/devfile/library/v2/pkg/devfile/parser/data/v2/common" - - "github.com/feloy/devfile-builder/wasm/pkg/global" - "github.com/feloy/devfile-builder/wasm/pkg/utils" -) - -func DeleteContainerWrapper(this js.Value, args []js.Value) interface{} { - return result( - deleteContainer(args[0].String()), - ) -} - -func deleteContainer(name string) (map[string]interface{}, error) { - - err := checkContainerUsed(name) - if err != nil { - return nil, fmt.Errorf("error deleting container %q: %w", name, err) - } - err = global.Devfile.Data.DeleteComponent(name) - if err != nil { - return nil, err - } - return utils.GetContent() -} - -func checkContainerUsed(name string) error { - commands, err := global.Devfile.Data.GetCommands(common.DevfileOptions{ - CommandOptions: common.CommandOptions{ - CommandType: v1alpha2.ExecCommandType, - }, - }) - if err != nil { - return err - } - - for _, command := range commands { - if command.Exec.Component == name { - return fmt.Errorf("container %q is used by exec command %q", name, command.Id) - } - } - - return nil -} diff --git a/ui/wasm/pkg/exports/deleteImage.go b/ui/wasm/pkg/exports/deleteImage.go deleted file mode 100644 index 56c9b239f3e..00000000000 --- a/ui/wasm/pkg/exports/deleteImage.go +++ /dev/null @@ -1,50 +0,0 @@ -package exports - -import ( - "fmt" - "syscall/js" - - "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" - "github.com/devfile/library/v2/pkg/devfile/parser/data/v2/common" - - "github.com/feloy/devfile-builder/wasm/pkg/global" - "github.com/feloy/devfile-builder/wasm/pkg/utils" -) - -func DeleteImageWrapper(this js.Value, args []js.Value) interface{} { - return result( - deleteImage(args[0].String()), - ) -} - -func deleteImage(name string) (map[string]interface{}, error) { - - err := checkImageUsed(name) - if err != nil { - return nil, fmt.Errorf("error deleting image %q: %w", name, err) - } - err = global.Devfile.Data.DeleteComponent(name) - if err != nil { - return nil, err - } - return utils.GetContent() -} - -func checkImageUsed(name string) error { - commands, err := global.Devfile.Data.GetCommands(common.DevfileOptions{ - CommandOptions: common.CommandOptions{ - CommandType: v1alpha2.ApplyCommandType, - }, - }) - if err != nil { - return err - } - - for _, command := range commands { - if command.Apply.Component == name { - return fmt.Errorf("image %q is used by Image Command %q", name, command.Id) - } - } - - return nil -} diff --git a/ui/wasm/pkg/exports/deleteResource.go b/ui/wasm/pkg/exports/deleteResource.go deleted file mode 100644 index fdb3118a9e2..00000000000 --- a/ui/wasm/pkg/exports/deleteResource.go +++ /dev/null @@ -1,50 +0,0 @@ -package exports - -import ( - "fmt" - "syscall/js" - - "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" - "github.com/devfile/library/v2/pkg/devfile/parser/data/v2/common" - - "github.com/feloy/devfile-builder/wasm/pkg/global" - "github.com/feloy/devfile-builder/wasm/pkg/utils" -) - -func DeleteResourceWrapper(this js.Value, args []js.Value) interface{} { - return result( - deleteResource(args[0].String()), - ) -} - -func deleteResource(name string) (map[string]interface{}, error) { - - err := checkResourceUsed(name) - if err != nil { - return nil, fmt.Errorf("error deleting resource %q: %w", name, err) - } - err = global.Devfile.Data.DeleteComponent(name) - if err != nil { - return nil, err - } - return utils.GetContent() -} - -func checkResourceUsed(name string) error { - commands, err := global.Devfile.Data.GetCommands(common.DevfileOptions{ - CommandOptions: common.CommandOptions{ - CommandType: v1alpha2.ApplyCommandType, - }, - }) - if err != nil { - return err - } - - for _, command := range commands { - if command.Apply.Component == name { - return fmt.Errorf("resource %q is used by Apply Command %q", name, command.Id) - } - } - - return nil -} diff --git a/ui/wasm/pkg/exports/getFlowChart.go b/ui/wasm/pkg/exports/getFlowChart.go deleted file mode 100644 index 51df2f061dd..00000000000 --- a/ui/wasm/pkg/exports/getFlowChart.go +++ /dev/null @@ -1,25 +0,0 @@ -package exports - -import ( - "errors" - "syscall/js" - - "github.com/feloy/devfile-builder/wasm/pkg/global" - "github.com/feloy/devfile-lifecycle/pkg/graph" -) - -// getFlowChart - -func GetFlowChartWrapper(this js.Value, args []js.Value) interface{} { - return result( - getFlowChart(), - ) -} - -func getFlowChart() (string, error) { - g, err := graph.Build(global.Devfile.Data) - if err != nil { - return "", errors.New("error building graph") - } - return g.ToFlowchart().String(), nil -} diff --git a/ui/wasm/pkg/exports/isQuantityValid.go b/ui/wasm/pkg/exports/isQuantityValid.go deleted file mode 100644 index 914b5acbe1a..00000000000 --- a/ui/wasm/pkg/exports/isQuantityValid.go +++ /dev/null @@ -1,16 +0,0 @@ -package exports - -import ( - "syscall/js" - - "k8s.io/apimachinery/pkg/api/resource" -) - -func IsQuantityValidWrapper(this js.Value, args []js.Value) interface{} { - return isQuantityValid(args[0].String()) -} - -func isQuantityValid(quantity string) bool { - _, err := resource.ParseQuantity(quantity) - return err == nil -} diff --git a/ui/wasm/pkg/exports/moveCommand.go b/ui/wasm/pkg/exports/moveCommand.go deleted file mode 100644 index 4d30984f4a3..00000000000 --- a/ui/wasm/pkg/exports/moveCommand.go +++ /dev/null @@ -1,46 +0,0 @@ -package exports - -import ( - "fmt" - "syscall/js" - - "github.com/devfile/library/v2/pkg/devfile/parser/data/v2/common" - - "github.com/feloy/devfile-builder/wasm/pkg/exports/sub" - "github.com/feloy/devfile-builder/wasm/pkg/global" - "github.com/feloy/devfile-builder/wasm/pkg/utils" -) - -func MoveCommandWrapper(this js.Value, args []js.Value) interface{} { - return result( - moveCommand(args[0].String(), args[1].String(), args[2].Int(), args[3].Int()), - ) -} - -func moveCommand(previousGroup, newGroup string, previousIndex, newIndex int) (map[string]interface{}, error) { - commands, err := global.Devfile.Data.GetCommands(common.DevfileOptions{}) - if err != nil { - return nil, err - } - - commandsByGroup, err := sub.MoveCommand(commands, previousGroup, newGroup, previousIndex, newIndex) - if err != nil { - return nil, err - } - - // Deleting from the end as deleting from the beginning seems buggy - for i := len(commands) - 1; i >= 0; i-- { - global.Devfile.Data.DeleteCommand(commands[i].Id) - } - - for _, group := range []string{"build", "run", "test", "debug", "deploy", ""} { - err := global.Devfile.Data.AddCommands(commandsByGroup[group]) - if err != nil { - fmt.Printf("%s\n", err) - return nil, err - } - } - - return utils.GetContent() - -} diff --git a/ui/wasm/pkg/exports/result.go b/ui/wasm/pkg/exports/result.go deleted file mode 100644 index a5f9070ef7d..00000000000 --- a/ui/wasm/pkg/exports/result.go +++ /dev/null @@ -1,13 +0,0 @@ -package exports - -// result returns the value and error in a format acceptable for JS -func result(value interface{}, err error) map[string]interface{} { - errStr := "" - if err != nil { - errStr = err.Error() - } - return map[string]interface{}{ - "value": value, - "err": errStr, - } -} diff --git a/ui/wasm/pkg/exports/setDefaultCommand.go b/ui/wasm/pkg/exports/setDefaultCommand.go deleted file mode 100644 index 6512337f86a..00000000000 --- a/ui/wasm/pkg/exports/setDefaultCommand.go +++ /dev/null @@ -1,39 +0,0 @@ -package exports - -import ( - "fmt" - "syscall/js" - - "github.com/devfile/library/v2/pkg/devfile/parser/data/v2/common" - - "github.com/feloy/devfile-builder/wasm/pkg/global" - "github.com/feloy/devfile-builder/wasm/pkg/utils" -) - -func SetDefaultCommandWrapper(this js.Value, args []js.Value) interface{} { - return result( - setDefaultCommand(args[0].String(), args[1].String()), - ) -} - -func setDefaultCommand(commandName string, group string) (map[string]interface{}, error) { - fmt.Printf("change default for group %q\n", group) - commands, err := global.Devfile.Data.GetCommands(common.DevfileOptions{}) - if err != nil { - return nil, err - } - - for _, command := range commands { - if utils.GetGroup(command) == group { - isDefault := command.Id == commandName - fmt.Printf("setting default = %v for command %q\n", isDefault, command.Id) - utils.SetDefault(&command, isDefault) - err = global.Devfile.Data.UpdateCommand(command) - if err != nil { - fmt.Printf("%s\n", err) - return nil, err - } - } - } - return utils.GetContent() -} diff --git a/ui/wasm/pkg/exports/setDevfileContent.go b/ui/wasm/pkg/exports/setDevfileContent.go deleted file mode 100644 index 8fcbe0e59c2..00000000000 --- a/ui/wasm/pkg/exports/setDevfileContent.go +++ /dev/null @@ -1,40 +0,0 @@ -package exports - -import ( - "fmt" - "syscall/js" - - "github.com/devfile/library/v2/pkg/devfile" - "github.com/devfile/library/v2/pkg/devfile/parser" - context "github.com/devfile/library/v2/pkg/devfile/parser/context" - "github.com/devfile/library/v2/pkg/testingutil/filesystem" - - "k8s.io/utils/pointer" - - "github.com/feloy/devfile-builder/wasm/pkg/global" - "github.com/feloy/devfile-builder/wasm/pkg/utils" -) - -// setDevfileContent - -func SetDevfileContentWrapper(this js.Value, args []js.Value) interface{} { - return result( - SetDevfileContent(args[0].String()), - ) -} - -func SetDevfileContent(content string) (map[string]interface{}, error) { - parserArgs := parser.ParserArgs{ - Data: []byte(content), - ConvertKubernetesContentInUri: pointer.Bool(false), - } - var err error - global.Devfile, _, err = devfile.ParseDevfileAndValidate(parserArgs) - if err != nil { - return nil, fmt.Errorf("error parsing devfile YAML: %w", err) - } - global.FS = filesystem.NewFakeFs() - global.Devfile.Ctx = context.FakeContext(global.FS, "/devfile.yaml") - - return utils.GetContent() -} diff --git a/ui/wasm/pkg/exports/setMetadata.go b/ui/wasm/pkg/exports/setMetadata.go deleted file mode 100644 index 5f37eb0777c..00000000000 --- a/ui/wasm/pkg/exports/setMetadata.go +++ /dev/null @@ -1,62 +0,0 @@ -package exports - -import ( - "strings" - "syscall/js" - - apidevfile "github.com/devfile/api/v2/pkg/devfile" - - "github.com/feloy/devfile-builder/wasm/pkg/global" - "github.com/feloy/devfile-builder/wasm/pkg/utils" -) - -// setMetadata - -func SetMetadataWrapper(this js.Value, args []js.Value) interface{} { - return result( - setMetadata(args[0]), - ) -} - -func setMetadata(metadata js.Value) (map[string]interface{}, error) { - global.Devfile.Data.SetMetadata(apidevfile.DevfileMetadata{ - Name: metadata.Get("name").String(), - Version: metadata.Get("version").String(), - DisplayName: metadata.Get("displayName").String(), - Description: metadata.Get("description").String(), - Tags: splitTags(metadata.Get("tags").String()), - Architectures: splitArchitectures(metadata.Get("architectures").String()), - Icon: metadata.Get("icon").String(), - GlobalMemoryLimit: metadata.Get("globalMemoryLimit").String(), - ProjectType: metadata.Get("projectType").String(), - Language: metadata.Get("language").String(), - Website: metadata.Get("website").String(), - Provider: metadata.Get("provider").String(), - SupportUrl: metadata.Get("supportUrl").String(), - }) - return utils.GetContent() -} - -func splitArchitectures(architectures string) []apidevfile.Architecture { - if architectures == "" { - return nil - } - parts := strings.Split(architectures, utils.SEPARATOR) - result := make([]apidevfile.Architecture, len(parts)) - for i, arch := range parts { - result[i] = apidevfile.Architecture(strings.Trim(arch, " ")) - } - return result -} - -func splitTags(tags string) []string { - if tags == "" { - return nil - } - parts := strings.Split(tags, utils.SEPARATOR) - result := make([]string, len(parts)) - for i, tag := range parts { - result[i] = strings.Trim(tag, " ") - } - return result -} diff --git a/ui/wasm/pkg/exports/sub/moveCommand.go b/ui/wasm/pkg/exports/sub/moveCommand.go deleted file mode 100644 index db86a2d8613..00000000000 --- a/ui/wasm/pkg/exports/sub/moveCommand.go +++ /dev/null @@ -1,36 +0,0 @@ -package sub - -import ( - "fmt" - - "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" - - "github.com/feloy/devfile-builder/wasm/pkg/utils" -) - -func MoveCommand(commands []v1alpha2.Command, previousGroup, newGroup string, previousIndex, newIndex int) (map[string][]v1alpha2.Command, error) { - commandsByGroup := map[string][]v1alpha2.Command{} - - for _, command := range commands { - group := utils.GetGroup(command) - commandsByGroup[group] = append(commandsByGroup[group], command) - } - - if len(commandsByGroup[previousGroup]) < previousIndex { - return nil, fmt.Errorf("unable to find command at index #%d in group %q", previousIndex, previousGroup) - } - - commandToMove := commandsByGroup[previousGroup][previousIndex] - utils.SetGroup(&commandToMove, newGroup) - - commandsByGroup[previousGroup] = append( - commandsByGroup[previousGroup][:previousIndex], - commandsByGroup[previousGroup][previousIndex+1:]..., - ) - - end := append([]v1alpha2.Command{}, commandsByGroup[newGroup][newIndex:]...) - commandsByGroup[newGroup] = append(commandsByGroup[newGroup][:newIndex], commandToMove) - commandsByGroup[newGroup] = append(commandsByGroup[newGroup], end...) - - return commandsByGroup, nil -} diff --git a/ui/wasm/pkg/exports/sub/moveCommand_test.go b/ui/wasm/pkg/exports/sub/moveCommand_test.go deleted file mode 100644 index 4fba586983d..00000000000 --- a/ui/wasm/pkg/exports/sub/moveCommand_test.go +++ /dev/null @@ -1,106 +0,0 @@ -package sub - -import ( - "testing" - - "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" - - "github.com/google/go-cmp/cmp" -) - -func newCommand(group string, id string) v1alpha2.Command { - return v1alpha2.Command{ - Id: id, - CommandUnion: v1alpha2.CommandUnion{ - Exec: &v1alpha2.ExecCommand{ - LabeledCommand: v1alpha2.LabeledCommand{ - BaseCommand: v1alpha2.BaseCommand{ - Group: &v1alpha2.CommandGroup{ - Kind: v1alpha2.CommandGroupKind(group), - }, - }, - }, - }, - }, - } -} - -func Test_moveCommandSub(t *testing.T) { - type args struct { - commands []v1alpha2.Command - previousGroup string - newGroup string - previousIndex int - newIndex int - } - tests := []struct { - name string - args args - want map[string][]v1alpha2.Command - wantErr bool - }{ - { - name: "Move from run to test", - args: args{ - commands: []v1alpha2.Command{ - newCommand("build", "build1"), - newCommand("run", "runToTest"), - newCommand("", "other1"), - }, - previousGroup: "run", - previousIndex: 0, - newGroup: "test", - newIndex: 0, - }, - want: map[string][]v1alpha2.Command{ - "build": { - newCommand("build", "build1"), - }, - "run": {}, - "test": { - newCommand("test", "runToTest"), - }, - "": { - newCommand("", "other1"), - }, - }, - }, - { - name: "Move from other to build", - args: args{ - commands: []v1alpha2.Command{ - newCommand("build", "build1"), - newCommand("run", "run"), - newCommand("other", "otherToBuild"), - }, - previousGroup: "other", - previousIndex: 0, - newGroup: "build", - newIndex: 1, - }, - want: map[string][]v1alpha2.Command{ - "build": { - newCommand("build", "build1"), - newCommand("build", "otherToBuild"), - }, - "run": { - newCommand("run", "run"), - }, - "other": {}, - }, - }, - // TODO: Add test cases. - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := MoveCommand(tt.args.commands, tt.args.previousGroup, tt.args.newGroup, tt.args.previousIndex, tt.args.newIndex) - if (err != nil) != tt.wantErr { - t.Errorf("moveCommand() error = %v, wantErr %v", err, tt.wantErr) - return - } - if diff := cmp.Diff(tt.want, got); diff != "" { - t.Errorf("moveCommand() mismatch (-want +got):\n%s", diff) - } - }) - } -} diff --git a/ui/wasm/pkg/exports/unsetDefaultCommand.go b/ui/wasm/pkg/exports/unsetDefaultCommand.go deleted file mode 100644 index fad79ff5b8e..00000000000 --- a/ui/wasm/pkg/exports/unsetDefaultCommand.go +++ /dev/null @@ -1,37 +0,0 @@ -package exports - -import ( - "fmt" - "syscall/js" - - "github.com/devfile/library/v2/pkg/devfile/parser/data/v2/common" - - "github.com/feloy/devfile-builder/wasm/pkg/global" - "github.com/feloy/devfile-builder/wasm/pkg/utils" -) - -func UnsetDefaultCommandWrapper(this js.Value, args []js.Value) interface{} { - return result( - unsetDefaultCommand(args[0].String()), - ) -} - -func unsetDefaultCommand(commandName string) (map[string]interface{}, error) { - commands, err := global.Devfile.Data.GetCommands(common.DevfileOptions{}) - if err != nil { - return nil, err - } - - for _, command := range commands { - if command.Id == commandName { - utils.SetDefault(&command, false) - err = global.Devfile.Data.UpdateCommand(command) - if err != nil { - fmt.Printf("%s\n", err) - return nil, err - } - break - } - } - return utils.GetContent() -} diff --git a/ui/wasm/pkg/exports/updateEvents.go b/ui/wasm/pkg/exports/updateEvents.go deleted file mode 100644 index 5b22307b30c..00000000000 --- a/ui/wasm/pkg/exports/updateEvents.go +++ /dev/null @@ -1,29 +0,0 @@ -package exports - -import ( - "syscall/js" - - "github.com/feloy/devfile-builder/wasm/pkg/global" - "github.com/feloy/devfile-builder/wasm/pkg/utils" -) - -func UpdateEventsWrapper(this js.Value, args []js.Value) interface{} { - commands := getStringArray(args[1]) - return result( - updateEvents(args[0].String(), commands), - ) -} - -func updateEvents(event string, commands []string) (map[string]interface{}, error) { - switch event { - case "postStart": - global.Devfile.Data.UpdateEvents(commands, nil, nil, nil) - case "postStop": - global.Devfile.Data.UpdateEvents(nil, commands, nil, nil) - case "preStart": - global.Devfile.Data.UpdateEvents(nil, nil, commands, nil) - case "preStop": - global.Devfile.Data.UpdateEvents(nil, nil, nil, commands) - } - return utils.GetContent() -} diff --git a/ui/wasm/pkg/global/global.go b/ui/wasm/pkg/global/global.go deleted file mode 100644 index 0fbe9e44650..00000000000 --- a/ui/wasm/pkg/global/global.go +++ /dev/null @@ -1,9 +0,0 @@ -package global - -import ( - "github.com/devfile/library/v2/pkg/devfile/parser" - "github.com/devfile/library/v2/pkg/testingutil/filesystem" -) - -var Devfile parser.DevfileObj -var FS filesystem.Filesystem diff --git a/ui/wasm/pkg/utils/content.go b/ui/wasm/pkg/utils/content.go deleted file mode 100644 index 97cf633f1e7..00000000000 --- a/ui/wasm/pkg/utils/content.go +++ /dev/null @@ -1,248 +0,0 @@ -package utils - -import ( - "errors" - "fmt" - "strings" - - "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" - apidevfile "github.com/devfile/api/v2/pkg/devfile" - "github.com/devfile/library/v2/pkg/devfile/parser/data/v2/common" - - "k8s.io/utils/pointer" - - "github.com/feloy/devfile-builder/wasm/pkg/global" -) - -const ( - SEPARATOR = "," -) - -// getContent returns the YAML content of the global devfile as string -func GetContent() (map[string]interface{}, error) { - err := global.Devfile.WriteYamlDevfile() - if err != nil { - return nil, errors.New("error writing file") - } - result, err := global.FS.ReadFile("/devfile.yaml") - if err != nil { - return nil, errors.New("error reading file") - } - - commands, err := getCommands() - if err != nil { - return nil, errors.New("error getting commands") - } - - containers, err := getContainers() - if err != nil { - return nil, errors.New("error getting containers") - } - - images, err := getImages() - if err != nil { - return nil, errors.New("error getting images") - } - - resources, err := getResources() - if err != nil { - return nil, errors.New("error getting Kubernetes resources") - } - - return map[string]interface{}{ - "content": string(result), - "metadata": getMetadata(), - "commands": commands, - "events": getEvents(), - "containers": containers, - "images": images, - "resources": resources, - }, nil -} - -func getMetadata() map[string]interface{} { - metadata := global.Devfile.Data.GetMetadata() - return map[string]interface{}{ - "name": metadata.Name, - "version": metadata.Version, - "displayName": metadata.DisplayName, - "description": metadata.Description, - "tags": strings.Join(metadata.Tags, SEPARATOR), - "architectures": joinArchitectures(metadata.Architectures), - "icon": metadata.Icon, - "globalMemoryLimit": metadata.GlobalMemoryLimit, - "projectType": metadata.ProjectType, - "language": metadata.Language, - "website": metadata.Website, - "provider": metadata.Provider, - "supportUrl": metadata.SupportUrl, - } -} - -func getCommands() ([]interface{}, error) { - commands, err := global.Devfile.Data.GetCommands(common.DevfileOptions{}) - if err != nil { - return nil, err - } - result := make([]interface{}, 0, len(commands)) - for _, command := range commands { - newCommand := map[string]interface{}{ - "name": command.Id, - "group": GetGroup(command), - "default": GetDefault(command), - } - - if command.Exec != nil { - newCommand["type"] = "exec" - newCommand["exec"] = map[string]interface{}{ - "component": command.Exec.Component, - "commandLine": command.Exec.CommandLine, - "workingDir": command.Exec.WorkingDir, - "hotReloadCapable": pointer.BoolDeref(command.Exec.HotReloadCapable, false), - } - } - - if command.Apply != nil { - components, err := global.Devfile.Data.GetComponents(common.DevfileOptions{ - FilterByName: command.Apply.Component, - }) - if err != nil { - return nil, err - } - if len(components) == 0 { - return nil, fmt.Errorf("component %q not found", command.Apply.Component) - } - component := components[0] - if component.Kubernetes != nil || component.Openshift != nil { - newCommand["type"] = "apply" - newCommand["apply"] = map[string]interface{}{ - "component": command.Apply.Component, - } - } - if component.Image != nil { - newCommand["type"] = "image" - newCommand["image"] = map[string]interface{}{ - "component": command.Apply.Component, - } - } - - } - - if command.Composite != nil { - commands := make([]interface{}, 0, len(command.Composite.Commands)) - for _, cmd := range command.Composite.Commands { - commands = append(commands, cmd) - } - newCommand["type"] = "composite" - newCommand["composite"] = map[string]interface{}{ - "commands": commands, - "parallel": pointer.BoolDeref(command.Composite.Parallel, false), - } - } - result = append(result, newCommand) - } - return result, nil -} - -func getEvents() map[string]interface{} { - events := global.Devfile.Data.GetEvents() - - return map[string]interface{}{ - "preStart": StringArrayToInterfaceArray(events.PreStart), - "postStart": StringArrayToInterfaceArray(events.PostStart), - "preStop": StringArrayToInterfaceArray(events.PreStop), - "postStop": StringArrayToInterfaceArray(events.PostStop), - } -} - -func getContainers() ([]interface{}, error) { - containers, err := global.Devfile.Data.GetComponents(common.DevfileOptions{ - ComponentOptions: common.ComponentOptions{ - ComponentType: v1alpha2.ContainerComponentType, - }, - }) - if err != nil { - return nil, err - } - result := make([]interface{}, 0, len(containers)) - for _, container := range containers { - commands := make([]interface{}, len(container.ComponentUnion.Container.Command)) - for i, command := range container.ComponentUnion.Container.Command { - commands[i] = command - } - - args := make([]interface{}, len(container.ComponentUnion.Container.Args)) - for i, arg := range container.ComponentUnion.Container.Args { - args[i] = arg - } - - result = append(result, map[string]interface{}{ - "name": container.Name, - "image": container.ComponentUnion.Container.Image, - "command": commands, - "args": args, - "memoryRequest": container.ComponentUnion.Container.MemoryRequest, - "memoryLimit": container.ComponentUnion.Container.MemoryLimit, - "cpuRequest": container.ComponentUnion.Container.CpuRequest, - "cpuLimit": container.ComponentUnion.Container.CpuLimit, - }) - } - return result, nil -} - -func getImages() ([]interface{}, error) { - images, err := global.Devfile.Data.GetComponents(common.DevfileOptions{ - ComponentOptions: common.ComponentOptions{ - ComponentType: v1alpha2.ImageComponentType, - }, - }) - if err != nil { - return nil, err - } - result := make([]interface{}, 0, len(images)) - for _, image := range images { - - args := make([]interface{}, len(image.Image.Dockerfile.Args)) - for i, arg := range image.Image.Dockerfile.Args { - args[i] = arg - } - - result = append(result, map[string]interface{}{ - "name": image.Name, - "imageName": image.Image.ImageName, - "args": args, - "buildContext": image.Image.Dockerfile.BuildContext, - "rootRequired": pointer.BoolDeref(image.Image.Dockerfile.RootRequired, false), - "uri": image.Image.Dockerfile.Uri, - }) - } - return result, nil -} - -func getResources() ([]interface{}, error) { - resources, err := global.Devfile.Data.GetComponents(common.DevfileOptions{ - ComponentOptions: common.ComponentOptions{ - ComponentType: v1alpha2.KubernetesComponentType, - }, - }) - if err != nil { - return nil, err - } - result := make([]interface{}, 0, len(resources)) - for _, resource := range resources { - result = append(result, map[string]interface{}{ - "name": resource.Name, - "inlined": resource.ComponentUnion.Kubernetes.Inlined, - "uri": resource.ComponentUnion.Kubernetes.Uri, - }) - } - return result, nil -} - -func joinArchitectures(architectures []apidevfile.Architecture) string { - strArchs := make([]string, len(architectures)) - for i, arch := range architectures { - strArchs[i] = string(arch) - } - return strings.Join(strArchs, SEPARATOR) -} diff --git a/ui/wasm/pkg/utils/convert.go b/ui/wasm/pkg/utils/convert.go deleted file mode 100644 index c55bf11e7fb..00000000000 --- a/ui/wasm/pkg/utils/convert.go +++ /dev/null @@ -1,9 +0,0 @@ -package utils - -func StringArrayToInterfaceArray(strings []string) []interface{} { - result := make([]interface{}, len(strings)) - for i, str := range strings { - result[i] = str - } - return result -} diff --git a/ui/wasm/vendor/github.com/Azure/go-ansiterm/LICENSE b/ui/wasm/vendor/github.com/Azure/go-ansiterm/LICENSE deleted file mode 100644 index e3d9a64d1d8..00000000000 --- a/ui/wasm/vendor/github.com/Azure/go-ansiterm/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Microsoft Corporation - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/ui/wasm/vendor/github.com/Azure/go-ansiterm/README.md b/ui/wasm/vendor/github.com/Azure/go-ansiterm/README.md deleted file mode 100644 index 261c041e7ab..00000000000 --- a/ui/wasm/vendor/github.com/Azure/go-ansiterm/README.md +++ /dev/null @@ -1,12 +0,0 @@ -# go-ansiterm - -This is a cross platform Ansi Terminal Emulation library. It reads a stream of Ansi characters and produces the appropriate function calls. The results of the function calls are platform dependent. - -For example the parser might receive "ESC, [, A" as a stream of three characters. This is the code for Cursor Up (http://www.vt100.net/docs/vt510-rm/CUU). The parser then calls the cursor up function (CUU()) on an event handler. The event handler determines what platform specific work must be done to cause the cursor to move up one position. - -The parser (parser.go) is a partial implementation of this state machine (http://vt100.net/emu/vt500_parser.png). There are also two event handler implementations, one for tests (test_event_handler.go) to validate that the expected events are being produced and called, the other is a Windows implementation (winterm/win_event_handler.go). - -See parser_test.go for examples exercising the state machine and generating appropriate function calls. - ------ -This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. diff --git a/ui/wasm/vendor/github.com/Azure/go-ansiterm/constants.go b/ui/wasm/vendor/github.com/Azure/go-ansiterm/constants.go deleted file mode 100644 index 96504a33bc9..00000000000 --- a/ui/wasm/vendor/github.com/Azure/go-ansiterm/constants.go +++ /dev/null @@ -1,188 +0,0 @@ -package ansiterm - -const LogEnv = "DEBUG_TERMINAL" - -// ANSI constants -// References: -// -- http://www.ecma-international.org/publications/standards/Ecma-048.htm -// -- http://man7.org/linux/man-pages/man4/console_codes.4.html -// -- http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html -// -- http://en.wikipedia.org/wiki/ANSI_escape_code -// -- http://vt100.net/emu/dec_ansi_parser -// -- http://vt100.net/emu/vt500_parser.svg -// -- http://invisible-island.net/xterm/ctlseqs/ctlseqs.html -// -- http://www.inwap.com/pdp10/ansicode.txt -const ( - // ECMA-48 Set Graphics Rendition - // Note: - // -- Constants leading with an underscore (e.g., _ANSI_xxx) are unsupported or reserved - // -- Fonts could possibly be supported via SetCurrentConsoleFontEx - // -- Windows does not expose the per-window cursor (i.e., caret) blink times - ANSI_SGR_RESET = 0 - ANSI_SGR_BOLD = 1 - ANSI_SGR_DIM = 2 - _ANSI_SGR_ITALIC = 3 - ANSI_SGR_UNDERLINE = 4 - _ANSI_SGR_BLINKSLOW = 5 - _ANSI_SGR_BLINKFAST = 6 - ANSI_SGR_REVERSE = 7 - _ANSI_SGR_INVISIBLE = 8 - _ANSI_SGR_LINETHROUGH = 9 - _ANSI_SGR_FONT_00 = 10 - _ANSI_SGR_FONT_01 = 11 - _ANSI_SGR_FONT_02 = 12 - _ANSI_SGR_FONT_03 = 13 - _ANSI_SGR_FONT_04 = 14 - _ANSI_SGR_FONT_05 = 15 - _ANSI_SGR_FONT_06 = 16 - _ANSI_SGR_FONT_07 = 17 - _ANSI_SGR_FONT_08 = 18 - _ANSI_SGR_FONT_09 = 19 - _ANSI_SGR_FONT_10 = 20 - _ANSI_SGR_DOUBLEUNDERLINE = 21 - ANSI_SGR_BOLD_DIM_OFF = 22 - _ANSI_SGR_ITALIC_OFF = 23 - ANSI_SGR_UNDERLINE_OFF = 24 - _ANSI_SGR_BLINK_OFF = 25 - _ANSI_SGR_RESERVED_00 = 26 - ANSI_SGR_REVERSE_OFF = 27 - _ANSI_SGR_INVISIBLE_OFF = 28 - _ANSI_SGR_LINETHROUGH_OFF = 29 - ANSI_SGR_FOREGROUND_BLACK = 30 - ANSI_SGR_FOREGROUND_RED = 31 - ANSI_SGR_FOREGROUND_GREEN = 32 - ANSI_SGR_FOREGROUND_YELLOW = 33 - ANSI_SGR_FOREGROUND_BLUE = 34 - ANSI_SGR_FOREGROUND_MAGENTA = 35 - ANSI_SGR_FOREGROUND_CYAN = 36 - ANSI_SGR_FOREGROUND_WHITE = 37 - _ANSI_SGR_RESERVED_01 = 38 - ANSI_SGR_FOREGROUND_DEFAULT = 39 - ANSI_SGR_BACKGROUND_BLACK = 40 - ANSI_SGR_BACKGROUND_RED = 41 - ANSI_SGR_BACKGROUND_GREEN = 42 - ANSI_SGR_BACKGROUND_YELLOW = 43 - ANSI_SGR_BACKGROUND_BLUE = 44 - ANSI_SGR_BACKGROUND_MAGENTA = 45 - ANSI_SGR_BACKGROUND_CYAN = 46 - ANSI_SGR_BACKGROUND_WHITE = 47 - _ANSI_SGR_RESERVED_02 = 48 - ANSI_SGR_BACKGROUND_DEFAULT = 49 - // 50 - 65: Unsupported - - ANSI_MAX_CMD_LENGTH = 4096 - - MAX_INPUT_EVENTS = 128 - DEFAULT_WIDTH = 80 - DEFAULT_HEIGHT = 24 - - ANSI_BEL = 0x07 - ANSI_BACKSPACE = 0x08 - ANSI_TAB = 0x09 - ANSI_LINE_FEED = 0x0A - ANSI_VERTICAL_TAB = 0x0B - ANSI_FORM_FEED = 0x0C - ANSI_CARRIAGE_RETURN = 0x0D - ANSI_ESCAPE_PRIMARY = 0x1B - ANSI_ESCAPE_SECONDARY = 0x5B - ANSI_OSC_STRING_ENTRY = 0x5D - ANSI_COMMAND_FIRST = 0x40 - ANSI_COMMAND_LAST = 0x7E - DCS_ENTRY = 0x90 - CSI_ENTRY = 0x9B - OSC_STRING = 0x9D - ANSI_PARAMETER_SEP = ";" - ANSI_CMD_G0 = '(' - ANSI_CMD_G1 = ')' - ANSI_CMD_G2 = '*' - ANSI_CMD_G3 = '+' - ANSI_CMD_DECPNM = '>' - ANSI_CMD_DECPAM = '=' - ANSI_CMD_OSC = ']' - ANSI_CMD_STR_TERM = '\\' - - KEY_CONTROL_PARAM_2 = ";2" - KEY_CONTROL_PARAM_3 = ";3" - KEY_CONTROL_PARAM_4 = ";4" - KEY_CONTROL_PARAM_5 = ";5" - KEY_CONTROL_PARAM_6 = ";6" - KEY_CONTROL_PARAM_7 = ";7" - KEY_CONTROL_PARAM_8 = ";8" - KEY_ESC_CSI = "\x1B[" - KEY_ESC_N = "\x1BN" - KEY_ESC_O = "\x1BO" - - FILL_CHARACTER = ' ' -) - -func getByteRange(start byte, end byte) []byte { - bytes := make([]byte, 0, 32) - for i := start; i <= end; i++ { - bytes = append(bytes, byte(i)) - } - - return bytes -} - -var toGroundBytes = getToGroundBytes() -var executors = getExecuteBytes() - -// SPACE 20+A0 hex Always and everywhere a blank space -// Intermediate 20-2F hex !"#$%&'()*+,-./ -var intermeds = getByteRange(0x20, 0x2F) - -// Parameters 30-3F hex 0123456789:;<=>? -// CSI Parameters 30-39, 3B hex 0123456789; -var csiParams = getByteRange(0x30, 0x3F) - -var csiCollectables = append(getByteRange(0x30, 0x39), getByteRange(0x3B, 0x3F)...) - -// Uppercase 40-5F hex @ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_ -var upperCase = getByteRange(0x40, 0x5F) - -// Lowercase 60-7E hex `abcdefghijlkmnopqrstuvwxyz{|}~ -var lowerCase = getByteRange(0x60, 0x7E) - -// Alphabetics 40-7E hex (all of upper and lower case) -var alphabetics = append(upperCase, lowerCase...) - -var printables = getByteRange(0x20, 0x7F) - -var escapeIntermediateToGroundBytes = getByteRange(0x30, 0x7E) -var escapeToGroundBytes = getEscapeToGroundBytes() - -// See http://www.vt100.net/emu/vt500_parser.png for description of the complex -// byte ranges below - -func getEscapeToGroundBytes() []byte { - escapeToGroundBytes := getByteRange(0x30, 0x4F) - escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x51, 0x57)...) - escapeToGroundBytes = append(escapeToGroundBytes, 0x59) - escapeToGroundBytes = append(escapeToGroundBytes, 0x5A) - escapeToGroundBytes = append(escapeToGroundBytes, 0x5C) - escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x60, 0x7E)...) - return escapeToGroundBytes -} - -func getExecuteBytes() []byte { - executeBytes := getByteRange(0x00, 0x17) - executeBytes = append(executeBytes, 0x19) - executeBytes = append(executeBytes, getByteRange(0x1C, 0x1F)...) - return executeBytes -} - -func getToGroundBytes() []byte { - groundBytes := []byte{0x18} - groundBytes = append(groundBytes, 0x1A) - groundBytes = append(groundBytes, getByteRange(0x80, 0x8F)...) - groundBytes = append(groundBytes, getByteRange(0x91, 0x97)...) - groundBytes = append(groundBytes, 0x99) - groundBytes = append(groundBytes, 0x9A) - groundBytes = append(groundBytes, 0x9C) - return groundBytes -} - -// Delete 7F hex Always and everywhere ignored -// C1 Control 80-9F hex 32 additional control characters -// G1 Displayable A1-FE hex 94 additional displayable characters -// Special A0+FF hex Same as SPACE and DELETE diff --git a/ui/wasm/vendor/github.com/Azure/go-ansiterm/context.go b/ui/wasm/vendor/github.com/Azure/go-ansiterm/context.go deleted file mode 100644 index 8d66e777c03..00000000000 --- a/ui/wasm/vendor/github.com/Azure/go-ansiterm/context.go +++ /dev/null @@ -1,7 +0,0 @@ -package ansiterm - -type ansiContext struct { - currentChar byte - paramBuffer []byte - interBuffer []byte -} diff --git a/ui/wasm/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go b/ui/wasm/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go deleted file mode 100644 index bcbe00d0c5e..00000000000 --- a/ui/wasm/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go +++ /dev/null @@ -1,49 +0,0 @@ -package ansiterm - -type csiEntryState struct { - baseState -} - -func (csiState csiEntryState) Handle(b byte) (s state, e error) { - csiState.parser.logf("CsiEntry::Handle %#x", b) - - nextState, err := csiState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case sliceContains(alphabetics, b): - return csiState.parser.ground, nil - case sliceContains(csiCollectables, b): - return csiState.parser.csiParam, nil - case sliceContains(executors, b): - return csiState, csiState.parser.execute() - } - - return csiState, nil -} - -func (csiState csiEntryState) Transition(s state) error { - csiState.parser.logf("CsiEntry::Transition %s --> %s", csiState.Name(), s.Name()) - csiState.baseState.Transition(s) - - switch s { - case csiState.parser.ground: - return csiState.parser.csiDispatch() - case csiState.parser.csiParam: - switch { - case sliceContains(csiParams, csiState.parser.context.currentChar): - csiState.parser.collectParam() - case sliceContains(intermeds, csiState.parser.context.currentChar): - csiState.parser.collectInter() - } - } - - return nil -} - -func (csiState csiEntryState) Enter() error { - csiState.parser.clear() - return nil -} diff --git a/ui/wasm/vendor/github.com/Azure/go-ansiterm/csi_param_state.go b/ui/wasm/vendor/github.com/Azure/go-ansiterm/csi_param_state.go deleted file mode 100644 index 7ed5e01c342..00000000000 --- a/ui/wasm/vendor/github.com/Azure/go-ansiterm/csi_param_state.go +++ /dev/null @@ -1,38 +0,0 @@ -package ansiterm - -type csiParamState struct { - baseState -} - -func (csiState csiParamState) Handle(b byte) (s state, e error) { - csiState.parser.logf("CsiParam::Handle %#x", b) - - nextState, err := csiState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case sliceContains(alphabetics, b): - return csiState.parser.ground, nil - case sliceContains(csiCollectables, b): - csiState.parser.collectParam() - return csiState, nil - case sliceContains(executors, b): - return csiState, csiState.parser.execute() - } - - return csiState, nil -} - -func (csiState csiParamState) Transition(s state) error { - csiState.parser.logf("CsiParam::Transition %s --> %s", csiState.Name(), s.Name()) - csiState.baseState.Transition(s) - - switch s { - case csiState.parser.ground: - return csiState.parser.csiDispatch() - } - - return nil -} diff --git a/ui/wasm/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go b/ui/wasm/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go deleted file mode 100644 index 1c719db9e48..00000000000 --- a/ui/wasm/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go +++ /dev/null @@ -1,36 +0,0 @@ -package ansiterm - -type escapeIntermediateState struct { - baseState -} - -func (escState escapeIntermediateState) Handle(b byte) (s state, e error) { - escState.parser.logf("escapeIntermediateState::Handle %#x", b) - nextState, err := escState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case sliceContains(intermeds, b): - return escState, escState.parser.collectInter() - case sliceContains(executors, b): - return escState, escState.parser.execute() - case sliceContains(escapeIntermediateToGroundBytes, b): - return escState.parser.ground, nil - } - - return escState, nil -} - -func (escState escapeIntermediateState) Transition(s state) error { - escState.parser.logf("escapeIntermediateState::Transition %s --> %s", escState.Name(), s.Name()) - escState.baseState.Transition(s) - - switch s { - case escState.parser.ground: - return escState.parser.escDispatch() - } - - return nil -} diff --git a/ui/wasm/vendor/github.com/Azure/go-ansiterm/escape_state.go b/ui/wasm/vendor/github.com/Azure/go-ansiterm/escape_state.go deleted file mode 100644 index 6390abd231b..00000000000 --- a/ui/wasm/vendor/github.com/Azure/go-ansiterm/escape_state.go +++ /dev/null @@ -1,47 +0,0 @@ -package ansiterm - -type escapeState struct { - baseState -} - -func (escState escapeState) Handle(b byte) (s state, e error) { - escState.parser.logf("escapeState::Handle %#x", b) - nextState, err := escState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case b == ANSI_ESCAPE_SECONDARY: - return escState.parser.csiEntry, nil - case b == ANSI_OSC_STRING_ENTRY: - return escState.parser.oscString, nil - case sliceContains(executors, b): - return escState, escState.parser.execute() - case sliceContains(escapeToGroundBytes, b): - return escState.parser.ground, nil - case sliceContains(intermeds, b): - return escState.parser.escapeIntermediate, nil - } - - return escState, nil -} - -func (escState escapeState) Transition(s state) error { - escState.parser.logf("Escape::Transition %s --> %s", escState.Name(), s.Name()) - escState.baseState.Transition(s) - - switch s { - case escState.parser.ground: - return escState.parser.escDispatch() - case escState.parser.escapeIntermediate: - return escState.parser.collectInter() - } - - return nil -} - -func (escState escapeState) Enter() error { - escState.parser.clear() - return nil -} diff --git a/ui/wasm/vendor/github.com/Azure/go-ansiterm/event_handler.go b/ui/wasm/vendor/github.com/Azure/go-ansiterm/event_handler.go deleted file mode 100644 index 98087b38c20..00000000000 --- a/ui/wasm/vendor/github.com/Azure/go-ansiterm/event_handler.go +++ /dev/null @@ -1,90 +0,0 @@ -package ansiterm - -type AnsiEventHandler interface { - // Print - Print(b byte) error - - // Execute C0 commands - Execute(b byte) error - - // CUrsor Up - CUU(int) error - - // CUrsor Down - CUD(int) error - - // CUrsor Forward - CUF(int) error - - // CUrsor Backward - CUB(int) error - - // Cursor to Next Line - CNL(int) error - - // Cursor to Previous Line - CPL(int) error - - // Cursor Horizontal position Absolute - CHA(int) error - - // Vertical line Position Absolute - VPA(int) error - - // CUrsor Position - CUP(int, int) error - - // Horizontal and Vertical Position (depends on PUM) - HVP(int, int) error - - // Text Cursor Enable Mode - DECTCEM(bool) error - - // Origin Mode - DECOM(bool) error - - // 132 Column Mode - DECCOLM(bool) error - - // Erase in Display - ED(int) error - - // Erase in Line - EL(int) error - - // Insert Line - IL(int) error - - // Delete Line - DL(int) error - - // Insert Character - ICH(int) error - - // Delete Character - DCH(int) error - - // Set Graphics Rendition - SGR([]int) error - - // Pan Down - SU(int) error - - // Pan Up - SD(int) error - - // Device Attributes - DA([]string) error - - // Set Top and Bottom Margins - DECSTBM(int, int) error - - // Index - IND() error - - // Reverse Index - RI() error - - // Flush updates from previous commands - Flush() error -} diff --git a/ui/wasm/vendor/github.com/Azure/go-ansiterm/ground_state.go b/ui/wasm/vendor/github.com/Azure/go-ansiterm/ground_state.go deleted file mode 100644 index 52451e94693..00000000000 --- a/ui/wasm/vendor/github.com/Azure/go-ansiterm/ground_state.go +++ /dev/null @@ -1,24 +0,0 @@ -package ansiterm - -type groundState struct { - baseState -} - -func (gs groundState) Handle(b byte) (s state, e error) { - gs.parser.context.currentChar = b - - nextState, err := gs.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case sliceContains(printables, b): - return gs, gs.parser.print() - - case sliceContains(executors, b): - return gs, gs.parser.execute() - } - - return gs, nil -} diff --git a/ui/wasm/vendor/github.com/Azure/go-ansiterm/osc_string_state.go b/ui/wasm/vendor/github.com/Azure/go-ansiterm/osc_string_state.go deleted file mode 100644 index 593b10ab696..00000000000 --- a/ui/wasm/vendor/github.com/Azure/go-ansiterm/osc_string_state.go +++ /dev/null @@ -1,31 +0,0 @@ -package ansiterm - -type oscStringState struct { - baseState -} - -func (oscState oscStringState) Handle(b byte) (s state, e error) { - oscState.parser.logf("OscString::Handle %#x", b) - nextState, err := oscState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case isOscStringTerminator(b): - return oscState.parser.ground, nil - } - - return oscState, nil -} - -// See below for OSC string terminators for linux -// http://man7.org/linux/man-pages/man4/console_codes.4.html -func isOscStringTerminator(b byte) bool { - - if b == ANSI_BEL || b == 0x5C { - return true - } - - return false -} diff --git a/ui/wasm/vendor/github.com/Azure/go-ansiterm/parser.go b/ui/wasm/vendor/github.com/Azure/go-ansiterm/parser.go deleted file mode 100644 index 03cec7ada62..00000000000 --- a/ui/wasm/vendor/github.com/Azure/go-ansiterm/parser.go +++ /dev/null @@ -1,151 +0,0 @@ -package ansiterm - -import ( - "errors" - "log" - "os" -) - -type AnsiParser struct { - currState state - eventHandler AnsiEventHandler - context *ansiContext - csiEntry state - csiParam state - dcsEntry state - escape state - escapeIntermediate state - error state - ground state - oscString state - stateMap []state - - logf func(string, ...interface{}) -} - -type Option func(*AnsiParser) - -func WithLogf(f func(string, ...interface{})) Option { - return func(ap *AnsiParser) { - ap.logf = f - } -} - -func CreateParser(initialState string, evtHandler AnsiEventHandler, opts ...Option) *AnsiParser { - ap := &AnsiParser{ - eventHandler: evtHandler, - context: &ansiContext{}, - } - for _, o := range opts { - o(ap) - } - - if isDebugEnv := os.Getenv(LogEnv); isDebugEnv == "1" { - logFile, _ := os.Create("ansiParser.log") - logger := log.New(logFile, "", log.LstdFlags) - if ap.logf != nil { - l := ap.logf - ap.logf = func(s string, v ...interface{}) { - l(s, v...) - logger.Printf(s, v...) - } - } else { - ap.logf = logger.Printf - } - } - - if ap.logf == nil { - ap.logf = func(string, ...interface{}) {} - } - - ap.csiEntry = csiEntryState{baseState{name: "CsiEntry", parser: ap}} - ap.csiParam = csiParamState{baseState{name: "CsiParam", parser: ap}} - ap.dcsEntry = dcsEntryState{baseState{name: "DcsEntry", parser: ap}} - ap.escape = escapeState{baseState{name: "Escape", parser: ap}} - ap.escapeIntermediate = escapeIntermediateState{baseState{name: "EscapeIntermediate", parser: ap}} - ap.error = errorState{baseState{name: "Error", parser: ap}} - ap.ground = groundState{baseState{name: "Ground", parser: ap}} - ap.oscString = oscStringState{baseState{name: "OscString", parser: ap}} - - ap.stateMap = []state{ - ap.csiEntry, - ap.csiParam, - ap.dcsEntry, - ap.escape, - ap.escapeIntermediate, - ap.error, - ap.ground, - ap.oscString, - } - - ap.currState = getState(initialState, ap.stateMap) - - ap.logf("CreateParser: parser %p", ap) - return ap -} - -func getState(name string, states []state) state { - for _, el := range states { - if el.Name() == name { - return el - } - } - - return nil -} - -func (ap *AnsiParser) Parse(bytes []byte) (int, error) { - for i, b := range bytes { - if err := ap.handle(b); err != nil { - return i, err - } - } - - return len(bytes), ap.eventHandler.Flush() -} - -func (ap *AnsiParser) handle(b byte) error { - ap.context.currentChar = b - newState, err := ap.currState.Handle(b) - if err != nil { - return err - } - - if newState == nil { - ap.logf("WARNING: newState is nil") - return errors.New("New state of 'nil' is invalid.") - } - - if newState != ap.currState { - if err := ap.changeState(newState); err != nil { - return err - } - } - - return nil -} - -func (ap *AnsiParser) changeState(newState state) error { - ap.logf("ChangeState %s --> %s", ap.currState.Name(), newState.Name()) - - // Exit old state - if err := ap.currState.Exit(); err != nil { - ap.logf("Exit state '%s' failed with : '%v'", ap.currState.Name(), err) - return err - } - - // Perform transition action - if err := ap.currState.Transition(newState); err != nil { - ap.logf("Transition from '%s' to '%s' failed with: '%v'", ap.currState.Name(), newState.Name, err) - return err - } - - // Enter new state - if err := newState.Enter(); err != nil { - ap.logf("Enter state '%s' failed with: '%v'", newState.Name(), err) - return err - } - - ap.currState = newState - return nil -} diff --git a/ui/wasm/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go b/ui/wasm/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go deleted file mode 100644 index de0a1f9cde3..00000000000 --- a/ui/wasm/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go +++ /dev/null @@ -1,99 +0,0 @@ -package ansiterm - -import ( - "strconv" -) - -func parseParams(bytes []byte) ([]string, error) { - paramBuff := make([]byte, 0, 0) - params := []string{} - - for _, v := range bytes { - if v == ';' { - if len(paramBuff) > 0 { - // Completed parameter, append it to the list - s := string(paramBuff) - params = append(params, s) - paramBuff = make([]byte, 0, 0) - } - } else { - paramBuff = append(paramBuff, v) - } - } - - // Last parameter may not be terminated with ';' - if len(paramBuff) > 0 { - s := string(paramBuff) - params = append(params, s) - } - - return params, nil -} - -func parseCmd(context ansiContext) (string, error) { - return string(context.currentChar), nil -} - -func getInt(params []string, dflt int) int { - i := getInts(params, 1, dflt)[0] - return i -} - -func getInts(params []string, minCount int, dflt int) []int { - ints := []int{} - - for _, v := range params { - i, _ := strconv.Atoi(v) - // Zero is mapped to the default value in VT100. - if i == 0 { - i = dflt - } - ints = append(ints, i) - } - - if len(ints) < minCount { - remaining := minCount - len(ints) - for i := 0; i < remaining; i++ { - ints = append(ints, dflt) - } - } - - return ints -} - -func (ap *AnsiParser) modeDispatch(param string, set bool) error { - switch param { - case "?3": - return ap.eventHandler.DECCOLM(set) - case "?6": - return ap.eventHandler.DECOM(set) - case "?25": - return ap.eventHandler.DECTCEM(set) - } - return nil -} - -func (ap *AnsiParser) hDispatch(params []string) error { - if len(params) == 1 { - return ap.modeDispatch(params[0], true) - } - - return nil -} - -func (ap *AnsiParser) lDispatch(params []string) error { - if len(params) == 1 { - return ap.modeDispatch(params[0], false) - } - - return nil -} - -func getEraseParam(params []string) int { - param := getInt(params, 0) - if param < 0 || 3 < param { - param = 0 - } - - return param -} diff --git a/ui/wasm/vendor/github.com/Azure/go-ansiterm/parser_actions.go b/ui/wasm/vendor/github.com/Azure/go-ansiterm/parser_actions.go deleted file mode 100644 index 0bb5e51e9aa..00000000000 --- a/ui/wasm/vendor/github.com/Azure/go-ansiterm/parser_actions.go +++ /dev/null @@ -1,119 +0,0 @@ -package ansiterm - -func (ap *AnsiParser) collectParam() error { - currChar := ap.context.currentChar - ap.logf("collectParam %#x", currChar) - ap.context.paramBuffer = append(ap.context.paramBuffer, currChar) - return nil -} - -func (ap *AnsiParser) collectInter() error { - currChar := ap.context.currentChar - ap.logf("collectInter %#x", currChar) - ap.context.paramBuffer = append(ap.context.interBuffer, currChar) - return nil -} - -func (ap *AnsiParser) escDispatch() error { - cmd, _ := parseCmd(*ap.context) - intermeds := ap.context.interBuffer - ap.logf("escDispatch currentChar: %#x", ap.context.currentChar) - ap.logf("escDispatch: %v(%v)", cmd, intermeds) - - switch cmd { - case "D": // IND - return ap.eventHandler.IND() - case "E": // NEL, equivalent to CRLF - err := ap.eventHandler.Execute(ANSI_CARRIAGE_RETURN) - if err == nil { - err = ap.eventHandler.Execute(ANSI_LINE_FEED) - } - return err - case "M": // RI - return ap.eventHandler.RI() - } - - return nil -} - -func (ap *AnsiParser) csiDispatch() error { - cmd, _ := parseCmd(*ap.context) - params, _ := parseParams(ap.context.paramBuffer) - ap.logf("Parsed params: %v with length: %d", params, len(params)) - - ap.logf("csiDispatch: %v(%v)", cmd, params) - - switch cmd { - case "@": - return ap.eventHandler.ICH(getInt(params, 1)) - case "A": - return ap.eventHandler.CUU(getInt(params, 1)) - case "B": - return ap.eventHandler.CUD(getInt(params, 1)) - case "C": - return ap.eventHandler.CUF(getInt(params, 1)) - case "D": - return ap.eventHandler.CUB(getInt(params, 1)) - case "E": - return ap.eventHandler.CNL(getInt(params, 1)) - case "F": - return ap.eventHandler.CPL(getInt(params, 1)) - case "G": - return ap.eventHandler.CHA(getInt(params, 1)) - case "H": - ints := getInts(params, 2, 1) - x, y := ints[0], ints[1] - return ap.eventHandler.CUP(x, y) - case "J": - param := getEraseParam(params) - return ap.eventHandler.ED(param) - case "K": - param := getEraseParam(params) - return ap.eventHandler.EL(param) - case "L": - return ap.eventHandler.IL(getInt(params, 1)) - case "M": - return ap.eventHandler.DL(getInt(params, 1)) - case "P": - return ap.eventHandler.DCH(getInt(params, 1)) - case "S": - return ap.eventHandler.SU(getInt(params, 1)) - case "T": - return ap.eventHandler.SD(getInt(params, 1)) - case "c": - return ap.eventHandler.DA(params) - case "d": - return ap.eventHandler.VPA(getInt(params, 1)) - case "f": - ints := getInts(params, 2, 1) - x, y := ints[0], ints[1] - return ap.eventHandler.HVP(x, y) - case "h": - return ap.hDispatch(params) - case "l": - return ap.lDispatch(params) - case "m": - return ap.eventHandler.SGR(getInts(params, 1, 0)) - case "r": - ints := getInts(params, 2, 1) - top, bottom := ints[0], ints[1] - return ap.eventHandler.DECSTBM(top, bottom) - default: - ap.logf("ERROR: Unsupported CSI command: '%s', with full context: %v", cmd, ap.context) - return nil - } - -} - -func (ap *AnsiParser) print() error { - return ap.eventHandler.Print(ap.context.currentChar) -} - -func (ap *AnsiParser) clear() error { - ap.context = &ansiContext{} - return nil -} - -func (ap *AnsiParser) execute() error { - return ap.eventHandler.Execute(ap.context.currentChar) -} diff --git a/ui/wasm/vendor/github.com/Azure/go-ansiterm/states.go b/ui/wasm/vendor/github.com/Azure/go-ansiterm/states.go deleted file mode 100644 index f2ea1fcd12d..00000000000 --- a/ui/wasm/vendor/github.com/Azure/go-ansiterm/states.go +++ /dev/null @@ -1,71 +0,0 @@ -package ansiterm - -type stateID int - -type state interface { - Enter() error - Exit() error - Handle(byte) (state, error) - Name() string - Transition(state) error -} - -type baseState struct { - name string - parser *AnsiParser -} - -func (base baseState) Enter() error { - return nil -} - -func (base baseState) Exit() error { - return nil -} - -func (base baseState) Handle(b byte) (s state, e error) { - - switch { - case b == CSI_ENTRY: - return base.parser.csiEntry, nil - case b == DCS_ENTRY: - return base.parser.dcsEntry, nil - case b == ANSI_ESCAPE_PRIMARY: - return base.parser.escape, nil - case b == OSC_STRING: - return base.parser.oscString, nil - case sliceContains(toGroundBytes, b): - return base.parser.ground, nil - } - - return nil, nil -} - -func (base baseState) Name() string { - return base.name -} - -func (base baseState) Transition(s state) error { - if s == base.parser.ground { - execBytes := []byte{0x18} - execBytes = append(execBytes, 0x1A) - execBytes = append(execBytes, getByteRange(0x80, 0x8F)...) - execBytes = append(execBytes, getByteRange(0x91, 0x97)...) - execBytes = append(execBytes, 0x99) - execBytes = append(execBytes, 0x9A) - - if sliceContains(execBytes, base.parser.context.currentChar) { - return base.parser.execute() - } - } - - return nil -} - -type dcsEntryState struct { - baseState -} - -type errorState struct { - baseState -} diff --git a/ui/wasm/vendor/github.com/Azure/go-ansiterm/utilities.go b/ui/wasm/vendor/github.com/Azure/go-ansiterm/utilities.go deleted file mode 100644 index 392114493a2..00000000000 --- a/ui/wasm/vendor/github.com/Azure/go-ansiterm/utilities.go +++ /dev/null @@ -1,21 +0,0 @@ -package ansiterm - -import ( - "strconv" -) - -func sliceContains(bytes []byte, b byte) bool { - for _, v := range bytes { - if v == b { - return true - } - } - - return false -} - -func convertBytesToInteger(bytes []byte) int { - s := string(bytes) - i, _ := strconv.Atoi(s) - return i -} diff --git a/ui/wasm/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go b/ui/wasm/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go deleted file mode 100644 index a6732797263..00000000000 --- a/ui/wasm/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go +++ /dev/null @@ -1,182 +0,0 @@ -// +build windows - -package winterm - -import ( - "fmt" - "os" - "strconv" - "strings" - "syscall" - - "github.com/Azure/go-ansiterm" -) - -// Windows keyboard constants -// See https://msdn.microsoft.com/en-us/library/windows/desktop/dd375731(v=vs.85).aspx. -const ( - VK_PRIOR = 0x21 // PAGE UP key - VK_NEXT = 0x22 // PAGE DOWN key - VK_END = 0x23 // END key - VK_HOME = 0x24 // HOME key - VK_LEFT = 0x25 // LEFT ARROW key - VK_UP = 0x26 // UP ARROW key - VK_RIGHT = 0x27 // RIGHT ARROW key - VK_DOWN = 0x28 // DOWN ARROW key - VK_SELECT = 0x29 // SELECT key - VK_PRINT = 0x2A // PRINT key - VK_EXECUTE = 0x2B // EXECUTE key - VK_SNAPSHOT = 0x2C // PRINT SCREEN key - VK_INSERT = 0x2D // INS key - VK_DELETE = 0x2E // DEL key - VK_HELP = 0x2F // HELP key - VK_F1 = 0x70 // F1 key - VK_F2 = 0x71 // F2 key - VK_F3 = 0x72 // F3 key - VK_F4 = 0x73 // F4 key - VK_F5 = 0x74 // F5 key - VK_F6 = 0x75 // F6 key - VK_F7 = 0x76 // F7 key - VK_F8 = 0x77 // F8 key - VK_F9 = 0x78 // F9 key - VK_F10 = 0x79 // F10 key - VK_F11 = 0x7A // F11 key - VK_F12 = 0x7B // F12 key - - RIGHT_ALT_PRESSED = 0x0001 - LEFT_ALT_PRESSED = 0x0002 - RIGHT_CTRL_PRESSED = 0x0004 - LEFT_CTRL_PRESSED = 0x0008 - SHIFT_PRESSED = 0x0010 - NUMLOCK_ON = 0x0020 - SCROLLLOCK_ON = 0x0040 - CAPSLOCK_ON = 0x0080 - ENHANCED_KEY = 0x0100 -) - -type ansiCommand struct { - CommandBytes []byte - Command string - Parameters []string - IsSpecial bool -} - -func newAnsiCommand(command []byte) *ansiCommand { - - if isCharacterSelectionCmdChar(command[1]) { - // Is Character Set Selection commands - return &ansiCommand{ - CommandBytes: command, - Command: string(command), - IsSpecial: true, - } - } - - // last char is command character - lastCharIndex := len(command) - 1 - - ac := &ansiCommand{ - CommandBytes: command, - Command: string(command[lastCharIndex]), - IsSpecial: false, - } - - // more than a single escape - if lastCharIndex != 0 { - start := 1 - // skip if double char escape sequence - if command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_ESCAPE_SECONDARY { - start++ - } - // convert this to GetNextParam method - ac.Parameters = strings.Split(string(command[start:lastCharIndex]), ansiterm.ANSI_PARAMETER_SEP) - } - - return ac -} - -func (ac *ansiCommand) paramAsSHORT(index int, defaultValue int16) int16 { - if index < 0 || index >= len(ac.Parameters) { - return defaultValue - } - - param, err := strconv.ParseInt(ac.Parameters[index], 10, 16) - if err != nil { - return defaultValue - } - - return int16(param) -} - -func (ac *ansiCommand) String() string { - return fmt.Sprintf("0x%v \"%v\" (\"%v\")", - bytesToHex(ac.CommandBytes), - ac.Command, - strings.Join(ac.Parameters, "\",\"")) -} - -// isAnsiCommandChar returns true if the passed byte falls within the range of ANSI commands. -// See http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html. -func isAnsiCommandChar(b byte) bool { - switch { - case ansiterm.ANSI_COMMAND_FIRST <= b && b <= ansiterm.ANSI_COMMAND_LAST && b != ansiterm.ANSI_ESCAPE_SECONDARY: - return true - case b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_OSC || b == ansiterm.ANSI_CMD_DECPAM || b == ansiterm.ANSI_CMD_DECPNM: - // non-CSI escape sequence terminator - return true - case b == ansiterm.ANSI_CMD_STR_TERM || b == ansiterm.ANSI_BEL: - // String escape sequence terminator - return true - } - return false -} - -func isXtermOscSequence(command []byte, current byte) bool { - return (len(command) >= 2 && command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_CMD_OSC && current != ansiterm.ANSI_BEL) -} - -func isCharacterSelectionCmdChar(b byte) bool { - return (b == ansiterm.ANSI_CMD_G0 || b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_G2 || b == ansiterm.ANSI_CMD_G3) -} - -// bytesToHex converts a slice of bytes to a human-readable string. -func bytesToHex(b []byte) string { - hex := make([]string, len(b)) - for i, ch := range b { - hex[i] = fmt.Sprintf("%X", ch) - } - return strings.Join(hex, "") -} - -// ensureInRange adjusts the passed value, if necessary, to ensure it is within -// the passed min / max range. -func ensureInRange(n int16, min int16, max int16) int16 { - if n < min { - return min - } else if n > max { - return max - } else { - return n - } -} - -func GetStdFile(nFile int) (*os.File, uintptr) { - var file *os.File - switch nFile { - case syscall.STD_INPUT_HANDLE: - file = os.Stdin - case syscall.STD_OUTPUT_HANDLE: - file = os.Stdout - case syscall.STD_ERROR_HANDLE: - file = os.Stderr - default: - panic(fmt.Errorf("Invalid standard handle identifier: %v", nFile)) - } - - fd, err := syscall.GetStdHandle(nFile) - if err != nil { - panic(fmt.Errorf("Invalid standard handle identifier: %v -- %v", nFile, err)) - } - - return file, uintptr(fd) -} diff --git a/ui/wasm/vendor/github.com/Azure/go-ansiterm/winterm/api.go b/ui/wasm/vendor/github.com/Azure/go-ansiterm/winterm/api.go deleted file mode 100644 index 6055e33b912..00000000000 --- a/ui/wasm/vendor/github.com/Azure/go-ansiterm/winterm/api.go +++ /dev/null @@ -1,327 +0,0 @@ -// +build windows - -package winterm - -import ( - "fmt" - "syscall" - "unsafe" -) - -//=========================================================================================================== -// IMPORTANT NOTE: -// -// The methods below make extensive use of the "unsafe" package to obtain the required pointers. -// Beginning in Go 1.3, the garbage collector may release local variables (e.g., incoming arguments, stack -// variables) the pointers reference *before* the API completes. -// -// As a result, in those cases, the code must hint that the variables remain in active by invoking the -// dummy method "use" (see below). Newer versions of Go are planned to change the mechanism to no longer -// require unsafe pointers. -// -// If you add or modify methods, ENSURE protection of local variables through the "use" builtin to inform -// the garbage collector the variables remain in use if: -// -// -- The value is not a pointer (e.g., int32, struct) -// -- The value is not referenced by the method after passing the pointer to Windows -// -// See http://golang.org/doc/go1.3. -//=========================================================================================================== - -var ( - kernel32DLL = syscall.NewLazyDLL("kernel32.dll") - - getConsoleCursorInfoProc = kernel32DLL.NewProc("GetConsoleCursorInfo") - setConsoleCursorInfoProc = kernel32DLL.NewProc("SetConsoleCursorInfo") - setConsoleCursorPositionProc = kernel32DLL.NewProc("SetConsoleCursorPosition") - setConsoleModeProc = kernel32DLL.NewProc("SetConsoleMode") - getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo") - setConsoleScreenBufferSizeProc = kernel32DLL.NewProc("SetConsoleScreenBufferSize") - scrollConsoleScreenBufferProc = kernel32DLL.NewProc("ScrollConsoleScreenBufferA") - setConsoleTextAttributeProc = kernel32DLL.NewProc("SetConsoleTextAttribute") - setConsoleWindowInfoProc = kernel32DLL.NewProc("SetConsoleWindowInfo") - writeConsoleOutputProc = kernel32DLL.NewProc("WriteConsoleOutputW") - readConsoleInputProc = kernel32DLL.NewProc("ReadConsoleInputW") - waitForSingleObjectProc = kernel32DLL.NewProc("WaitForSingleObject") -) - -// Windows Console constants -const ( - // Console modes - // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx. - ENABLE_PROCESSED_INPUT = 0x0001 - ENABLE_LINE_INPUT = 0x0002 - ENABLE_ECHO_INPUT = 0x0004 - ENABLE_WINDOW_INPUT = 0x0008 - ENABLE_MOUSE_INPUT = 0x0010 - ENABLE_INSERT_MODE = 0x0020 - ENABLE_QUICK_EDIT_MODE = 0x0040 - ENABLE_EXTENDED_FLAGS = 0x0080 - ENABLE_AUTO_POSITION = 0x0100 - ENABLE_VIRTUAL_TERMINAL_INPUT = 0x0200 - - ENABLE_PROCESSED_OUTPUT = 0x0001 - ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002 - ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004 - DISABLE_NEWLINE_AUTO_RETURN = 0x0008 - ENABLE_LVB_GRID_WORLDWIDE = 0x0010 - - // Character attributes - // Note: - // -- The attributes are combined to produce various colors (e.g., Blue + Green will create Cyan). - // Clearing all foreground or background colors results in black; setting all creates white. - // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682088(v=vs.85).aspx#_win32_character_attributes. - FOREGROUND_BLUE uint16 = 0x0001 - FOREGROUND_GREEN uint16 = 0x0002 - FOREGROUND_RED uint16 = 0x0004 - FOREGROUND_INTENSITY uint16 = 0x0008 - FOREGROUND_MASK uint16 = 0x000F - - BACKGROUND_BLUE uint16 = 0x0010 - BACKGROUND_GREEN uint16 = 0x0020 - BACKGROUND_RED uint16 = 0x0040 - BACKGROUND_INTENSITY uint16 = 0x0080 - BACKGROUND_MASK uint16 = 0x00F0 - - COMMON_LVB_MASK uint16 = 0xFF00 - COMMON_LVB_REVERSE_VIDEO uint16 = 0x4000 - COMMON_LVB_UNDERSCORE uint16 = 0x8000 - - // Input event types - // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx. - KEY_EVENT = 0x0001 - MOUSE_EVENT = 0x0002 - WINDOW_BUFFER_SIZE_EVENT = 0x0004 - MENU_EVENT = 0x0008 - FOCUS_EVENT = 0x0010 - - // WaitForSingleObject return codes - WAIT_ABANDONED = 0x00000080 - WAIT_FAILED = 0xFFFFFFFF - WAIT_SIGNALED = 0x0000000 - WAIT_TIMEOUT = 0x00000102 - - // WaitForSingleObject wait duration - WAIT_INFINITE = 0xFFFFFFFF - WAIT_ONE_SECOND = 1000 - WAIT_HALF_SECOND = 500 - WAIT_QUARTER_SECOND = 250 -) - -// Windows API Console types -// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682101(v=vs.85).aspx for Console specific types (e.g., COORD) -// -- See https://msdn.microsoft.com/en-us/library/aa296569(v=vs.60).aspx for comments on alignment -type ( - CHAR_INFO struct { - UnicodeChar uint16 - Attributes uint16 - } - - CONSOLE_CURSOR_INFO struct { - Size uint32 - Visible int32 - } - - CONSOLE_SCREEN_BUFFER_INFO struct { - Size COORD - CursorPosition COORD - Attributes uint16 - Window SMALL_RECT - MaximumWindowSize COORD - } - - COORD struct { - X int16 - Y int16 - } - - SMALL_RECT struct { - Left int16 - Top int16 - Right int16 - Bottom int16 - } - - // INPUT_RECORD is a C/C++ union of which KEY_EVENT_RECORD is one case, it is also the largest - // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx. - INPUT_RECORD struct { - EventType uint16 - KeyEvent KEY_EVENT_RECORD - } - - KEY_EVENT_RECORD struct { - KeyDown int32 - RepeatCount uint16 - VirtualKeyCode uint16 - VirtualScanCode uint16 - UnicodeChar uint16 - ControlKeyState uint32 - } - - WINDOW_BUFFER_SIZE struct { - Size COORD - } -) - -// boolToBOOL converts a Go bool into a Windows int32. -func boolToBOOL(f bool) int32 { - if f { - return int32(1) - } else { - return int32(0) - } -} - -// GetConsoleCursorInfo retrieves information about the size and visiblity of the console cursor. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683163(v=vs.85).aspx. -func GetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error { - r1, r2, err := getConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0) - return checkError(r1, r2, err) -} - -// SetConsoleCursorInfo sets the size and visiblity of the console cursor. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686019(v=vs.85).aspx. -func SetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error { - r1, r2, err := setConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0) - return checkError(r1, r2, err) -} - -// SetConsoleCursorPosition location of the console cursor. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx. -func SetConsoleCursorPosition(handle uintptr, coord COORD) error { - r1, r2, err := setConsoleCursorPositionProc.Call(handle, coordToPointer(coord)) - use(coord) - return checkError(r1, r2, err) -} - -// GetConsoleMode gets the console mode for given file descriptor -// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx. -func GetConsoleMode(handle uintptr) (mode uint32, err error) { - err = syscall.GetConsoleMode(syscall.Handle(handle), &mode) - return mode, err -} - -// SetConsoleMode sets the console mode for given file descriptor -// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx. -func SetConsoleMode(handle uintptr, mode uint32) error { - r1, r2, err := setConsoleModeProc.Call(handle, uintptr(mode), 0) - use(mode) - return checkError(r1, r2, err) -} - -// GetConsoleScreenBufferInfo retrieves information about the specified console screen buffer. -// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx. -func GetConsoleScreenBufferInfo(handle uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) { - info := CONSOLE_SCREEN_BUFFER_INFO{} - err := checkError(getConsoleScreenBufferInfoProc.Call(handle, uintptr(unsafe.Pointer(&info)), 0)) - if err != nil { - return nil, err - } - return &info, nil -} - -func ScrollConsoleScreenBuffer(handle uintptr, scrollRect SMALL_RECT, clipRect SMALL_RECT, destOrigin COORD, char CHAR_INFO) error { - r1, r2, err := scrollConsoleScreenBufferProc.Call(handle, uintptr(unsafe.Pointer(&scrollRect)), uintptr(unsafe.Pointer(&clipRect)), coordToPointer(destOrigin), uintptr(unsafe.Pointer(&char))) - use(scrollRect) - use(clipRect) - use(destOrigin) - use(char) - return checkError(r1, r2, err) -} - -// SetConsoleScreenBufferSize sets the size of the console screen buffer. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686044(v=vs.85).aspx. -func SetConsoleScreenBufferSize(handle uintptr, coord COORD) error { - r1, r2, err := setConsoleScreenBufferSizeProc.Call(handle, coordToPointer(coord)) - use(coord) - return checkError(r1, r2, err) -} - -// SetConsoleTextAttribute sets the attributes of characters written to the -// console screen buffer by the WriteFile or WriteConsole function. -// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686047(v=vs.85).aspx. -func SetConsoleTextAttribute(handle uintptr, attribute uint16) error { - r1, r2, err := setConsoleTextAttributeProc.Call(handle, uintptr(attribute), 0) - use(attribute) - return checkError(r1, r2, err) -} - -// SetConsoleWindowInfo sets the size and position of the console screen buffer's window. -// Note that the size and location must be within and no larger than the backing console screen buffer. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686125(v=vs.85).aspx. -func SetConsoleWindowInfo(handle uintptr, isAbsolute bool, rect SMALL_RECT) error { - r1, r2, err := setConsoleWindowInfoProc.Call(handle, uintptr(boolToBOOL(isAbsolute)), uintptr(unsafe.Pointer(&rect))) - use(isAbsolute) - use(rect) - return checkError(r1, r2, err) -} - -// WriteConsoleOutput writes the CHAR_INFOs from the provided buffer to the active console buffer. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687404(v=vs.85).aspx. -func WriteConsoleOutput(handle uintptr, buffer []CHAR_INFO, bufferSize COORD, bufferCoord COORD, writeRegion *SMALL_RECT) error { - r1, r2, err := writeConsoleOutputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), coordToPointer(bufferSize), coordToPointer(bufferCoord), uintptr(unsafe.Pointer(writeRegion))) - use(buffer) - use(bufferSize) - use(bufferCoord) - return checkError(r1, r2, err) -} - -// ReadConsoleInput reads (and removes) data from the console input buffer. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms684961(v=vs.85).aspx. -func ReadConsoleInput(handle uintptr, buffer []INPUT_RECORD, count *uint32) error { - r1, r2, err := readConsoleInputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), uintptr(len(buffer)), uintptr(unsafe.Pointer(count))) - use(buffer) - return checkError(r1, r2, err) -} - -// WaitForSingleObject waits for the passed handle to be signaled. -// It returns true if the handle was signaled; false otherwise. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687032(v=vs.85).aspx. -func WaitForSingleObject(handle uintptr, msWait uint32) (bool, error) { - r1, _, err := waitForSingleObjectProc.Call(handle, uintptr(uint32(msWait))) - switch r1 { - case WAIT_ABANDONED, WAIT_TIMEOUT: - return false, nil - case WAIT_SIGNALED: - return true, nil - } - use(msWait) - return false, err -} - -// String helpers -func (info CONSOLE_SCREEN_BUFFER_INFO) String() string { - return fmt.Sprintf("Size(%v) Cursor(%v) Window(%v) Max(%v)", info.Size, info.CursorPosition, info.Window, info.MaximumWindowSize) -} - -func (coord COORD) String() string { - return fmt.Sprintf("%v,%v", coord.X, coord.Y) -} - -func (rect SMALL_RECT) String() string { - return fmt.Sprintf("(%v,%v),(%v,%v)", rect.Left, rect.Top, rect.Right, rect.Bottom) -} - -// checkError evaluates the results of a Windows API call and returns the error if it failed. -func checkError(r1, r2 uintptr, err error) error { - // Windows APIs return non-zero to indicate success - if r1 != 0 { - return nil - } - - // Return the error if provided, otherwise default to EINVAL - if err != nil { - return err - } - return syscall.EINVAL -} - -// coordToPointer converts a COORD into a uintptr (by fooling the type system). -func coordToPointer(c COORD) uintptr { - // Note: This code assumes the two SHORTs are correctly laid out; the "cast" to uint32 is just to get a pointer to pass. - return uintptr(*((*uint32)(unsafe.Pointer(&c)))) -} - -// use is a no-op, but the compiler cannot see that it is. -// Calling use(p) ensures that p is kept live until that point. -func use(p interface{}) {} diff --git a/ui/wasm/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go b/ui/wasm/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go deleted file mode 100644 index cbec8f728f4..00000000000 --- a/ui/wasm/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go +++ /dev/null @@ -1,100 +0,0 @@ -// +build windows - -package winterm - -import "github.com/Azure/go-ansiterm" - -const ( - FOREGROUND_COLOR_MASK = FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE - BACKGROUND_COLOR_MASK = BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE -) - -// collectAnsiIntoWindowsAttributes modifies the passed Windows text mode flags to reflect the -// request represented by the passed ANSI mode. -func collectAnsiIntoWindowsAttributes(windowsMode uint16, inverted bool, baseMode uint16, ansiMode int16) (uint16, bool) { - switch ansiMode { - - // Mode styles - case ansiterm.ANSI_SGR_BOLD: - windowsMode = windowsMode | FOREGROUND_INTENSITY - - case ansiterm.ANSI_SGR_DIM, ansiterm.ANSI_SGR_BOLD_DIM_OFF: - windowsMode &^= FOREGROUND_INTENSITY - - case ansiterm.ANSI_SGR_UNDERLINE: - windowsMode = windowsMode | COMMON_LVB_UNDERSCORE - - case ansiterm.ANSI_SGR_REVERSE: - inverted = true - - case ansiterm.ANSI_SGR_REVERSE_OFF: - inverted = false - - case ansiterm.ANSI_SGR_UNDERLINE_OFF: - windowsMode &^= COMMON_LVB_UNDERSCORE - - // Foreground colors - case ansiterm.ANSI_SGR_FOREGROUND_DEFAULT: - windowsMode = (windowsMode &^ FOREGROUND_MASK) | (baseMode & FOREGROUND_MASK) - - case ansiterm.ANSI_SGR_FOREGROUND_BLACK: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) - - case ansiterm.ANSI_SGR_FOREGROUND_RED: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED - - case ansiterm.ANSI_SGR_FOREGROUND_GREEN: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN - - case ansiterm.ANSI_SGR_FOREGROUND_YELLOW: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN - - case ansiterm.ANSI_SGR_FOREGROUND_BLUE: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_BLUE - - case ansiterm.ANSI_SGR_FOREGROUND_MAGENTA: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_BLUE - - case ansiterm.ANSI_SGR_FOREGROUND_CYAN: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN | FOREGROUND_BLUE - - case ansiterm.ANSI_SGR_FOREGROUND_WHITE: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE - - // Background colors - case ansiterm.ANSI_SGR_BACKGROUND_DEFAULT: - // Black with no intensity - windowsMode = (windowsMode &^ BACKGROUND_MASK) | (baseMode & BACKGROUND_MASK) - - case ansiterm.ANSI_SGR_BACKGROUND_BLACK: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) - - case ansiterm.ANSI_SGR_BACKGROUND_RED: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED - - case ansiterm.ANSI_SGR_BACKGROUND_GREEN: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN - - case ansiterm.ANSI_SGR_BACKGROUND_YELLOW: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN - - case ansiterm.ANSI_SGR_BACKGROUND_BLUE: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_BLUE - - case ansiterm.ANSI_SGR_BACKGROUND_MAGENTA: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_BLUE - - case ansiterm.ANSI_SGR_BACKGROUND_CYAN: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN | BACKGROUND_BLUE - - case ansiterm.ANSI_SGR_BACKGROUND_WHITE: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE - } - - return windowsMode, inverted -} - -// invertAttributes inverts the foreground and background colors of a Windows attributes value -func invertAttributes(windowsMode uint16) uint16 { - return (COMMON_LVB_MASK & windowsMode) | ((FOREGROUND_MASK & windowsMode) << 4) | ((BACKGROUND_MASK & windowsMode) >> 4) -} diff --git a/ui/wasm/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go b/ui/wasm/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go deleted file mode 100644 index 3ee06ea7282..00000000000 --- a/ui/wasm/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go +++ /dev/null @@ -1,101 +0,0 @@ -// +build windows - -package winterm - -const ( - horizontal = iota - vertical -) - -func (h *windowsAnsiEventHandler) getCursorWindow(info *CONSOLE_SCREEN_BUFFER_INFO) SMALL_RECT { - if h.originMode { - sr := h.effectiveSr(info.Window) - return SMALL_RECT{ - Top: sr.top, - Bottom: sr.bottom, - Left: 0, - Right: info.Size.X - 1, - } - } else { - return SMALL_RECT{ - Top: info.Window.Top, - Bottom: info.Window.Bottom, - Left: 0, - Right: info.Size.X - 1, - } - } -} - -// setCursorPosition sets the cursor to the specified position, bounded to the screen size -func (h *windowsAnsiEventHandler) setCursorPosition(position COORD, window SMALL_RECT) error { - position.X = ensureInRange(position.X, window.Left, window.Right) - position.Y = ensureInRange(position.Y, window.Top, window.Bottom) - err := SetConsoleCursorPosition(h.fd, position) - if err != nil { - return err - } - h.logf("Cursor position set: (%d, %d)", position.X, position.Y) - return err -} - -func (h *windowsAnsiEventHandler) moveCursorVertical(param int) error { - return h.moveCursor(vertical, param) -} - -func (h *windowsAnsiEventHandler) moveCursorHorizontal(param int) error { - return h.moveCursor(horizontal, param) -} - -func (h *windowsAnsiEventHandler) moveCursor(moveMode int, param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - position := info.CursorPosition - switch moveMode { - case horizontal: - position.X += int16(param) - case vertical: - position.Y += int16(param) - } - - if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) moveCursorLine(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - position := info.CursorPosition - position.X = 0 - position.Y += int16(param) - - if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) moveCursorColumn(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - position := info.CursorPosition - position.X = int16(param) - 1 - - if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { - return err - } - - return nil -} diff --git a/ui/wasm/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go b/ui/wasm/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go deleted file mode 100644 index 244b5fa25ef..00000000000 --- a/ui/wasm/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go +++ /dev/null @@ -1,84 +0,0 @@ -// +build windows - -package winterm - -import "github.com/Azure/go-ansiterm" - -func (h *windowsAnsiEventHandler) clearRange(attributes uint16, fromCoord COORD, toCoord COORD) error { - // Ignore an invalid (negative area) request - if toCoord.Y < fromCoord.Y { - return nil - } - - var err error - - var coordStart = COORD{} - var coordEnd = COORD{} - - xCurrent, yCurrent := fromCoord.X, fromCoord.Y - xEnd, yEnd := toCoord.X, toCoord.Y - - // Clear any partial initial line - if xCurrent > 0 { - coordStart.X, coordStart.Y = xCurrent, yCurrent - coordEnd.X, coordEnd.Y = xEnd, yCurrent - - err = h.clearRect(attributes, coordStart, coordEnd) - if err != nil { - return err - } - - xCurrent = 0 - yCurrent += 1 - } - - // Clear intervening rectangular section - if yCurrent < yEnd { - coordStart.X, coordStart.Y = xCurrent, yCurrent - coordEnd.X, coordEnd.Y = xEnd, yEnd-1 - - err = h.clearRect(attributes, coordStart, coordEnd) - if err != nil { - return err - } - - xCurrent = 0 - yCurrent = yEnd - } - - // Clear remaining partial ending line - coordStart.X, coordStart.Y = xCurrent, yCurrent - coordEnd.X, coordEnd.Y = xEnd, yEnd - - err = h.clearRect(attributes, coordStart, coordEnd) - if err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) clearRect(attributes uint16, fromCoord COORD, toCoord COORD) error { - region := SMALL_RECT{Top: fromCoord.Y, Left: fromCoord.X, Bottom: toCoord.Y, Right: toCoord.X} - width := toCoord.X - fromCoord.X + 1 - height := toCoord.Y - fromCoord.Y + 1 - size := uint32(width) * uint32(height) - - if size <= 0 { - return nil - } - - buffer := make([]CHAR_INFO, size) - - char := CHAR_INFO{ansiterm.FILL_CHARACTER, attributes} - for i := 0; i < int(size); i++ { - buffer[i] = char - } - - err := WriteConsoleOutput(h.fd, buffer, COORD{X: width, Y: height}, COORD{X: 0, Y: 0}, ®ion) - if err != nil { - return err - } - - return nil -} diff --git a/ui/wasm/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go b/ui/wasm/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go deleted file mode 100644 index 2d27fa1d028..00000000000 --- a/ui/wasm/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go +++ /dev/null @@ -1,118 +0,0 @@ -// +build windows - -package winterm - -// effectiveSr gets the current effective scroll region in buffer coordinates -func (h *windowsAnsiEventHandler) effectiveSr(window SMALL_RECT) scrollRegion { - top := addInRange(window.Top, h.sr.top, window.Top, window.Bottom) - bottom := addInRange(window.Top, h.sr.bottom, window.Top, window.Bottom) - if top >= bottom { - top = window.Top - bottom = window.Bottom - } - return scrollRegion{top: top, bottom: bottom} -} - -func (h *windowsAnsiEventHandler) scrollUp(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - sr := h.effectiveSr(info.Window) - return h.scroll(param, sr, info) -} - -func (h *windowsAnsiEventHandler) scrollDown(param int) error { - return h.scrollUp(-param) -} - -func (h *windowsAnsiEventHandler) deleteLines(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - start := info.CursorPosition.Y - sr := h.effectiveSr(info.Window) - // Lines cannot be inserted or deleted outside the scrolling region. - if start >= sr.top && start <= sr.bottom { - sr.top = start - return h.scroll(param, sr, info) - } else { - return nil - } -} - -func (h *windowsAnsiEventHandler) insertLines(param int) error { - return h.deleteLines(-param) -} - -// scroll scrolls the provided scroll region by param lines. The scroll region is in buffer coordinates. -func (h *windowsAnsiEventHandler) scroll(param int, sr scrollRegion, info *CONSOLE_SCREEN_BUFFER_INFO) error { - h.logf("scroll: scrollTop: %d, scrollBottom: %d", sr.top, sr.bottom) - h.logf("scroll: windowTop: %d, windowBottom: %d", info.Window.Top, info.Window.Bottom) - - // Copy from and clip to the scroll region (full buffer width) - scrollRect := SMALL_RECT{ - Top: sr.top, - Bottom: sr.bottom, - Left: 0, - Right: info.Size.X - 1, - } - - // Origin to which area should be copied - destOrigin := COORD{ - X: 0, - Y: sr.top - int16(param), - } - - char := CHAR_INFO{ - UnicodeChar: ' ', - Attributes: h.attributes, - } - - if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil { - return err - } - return nil -} - -func (h *windowsAnsiEventHandler) deleteCharacters(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - return h.scrollLine(param, info.CursorPosition, info) -} - -func (h *windowsAnsiEventHandler) insertCharacters(param int) error { - return h.deleteCharacters(-param) -} - -// scrollLine scrolls a line horizontally starting at the provided position by a number of columns. -func (h *windowsAnsiEventHandler) scrollLine(columns int, position COORD, info *CONSOLE_SCREEN_BUFFER_INFO) error { - // Copy from and clip to the scroll region (full buffer width) - scrollRect := SMALL_RECT{ - Top: position.Y, - Bottom: position.Y, - Left: position.X, - Right: info.Size.X - 1, - } - - // Origin to which area should be copied - destOrigin := COORD{ - X: position.X - int16(columns), - Y: position.Y, - } - - char := CHAR_INFO{ - UnicodeChar: ' ', - Attributes: h.attributes, - } - - if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil { - return err - } - return nil -} diff --git a/ui/wasm/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go b/ui/wasm/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go deleted file mode 100644 index afa7635d77b..00000000000 --- a/ui/wasm/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build windows - -package winterm - -// AddInRange increments a value by the passed quantity while ensuring the values -// always remain within the supplied min / max range. -func addInRange(n int16, increment int16, min int16, max int16) int16 { - return ensureInRange(n+increment, min, max) -} diff --git a/ui/wasm/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go b/ui/wasm/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go deleted file mode 100644 index 2d40fb75ad0..00000000000 --- a/ui/wasm/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go +++ /dev/null @@ -1,743 +0,0 @@ -// +build windows - -package winterm - -import ( - "bytes" - "log" - "os" - "strconv" - - "github.com/Azure/go-ansiterm" -) - -type windowsAnsiEventHandler struct { - fd uintptr - file *os.File - infoReset *CONSOLE_SCREEN_BUFFER_INFO - sr scrollRegion - buffer bytes.Buffer - attributes uint16 - inverted bool - wrapNext bool - drewMarginByte bool - originMode bool - marginByte byte - curInfo *CONSOLE_SCREEN_BUFFER_INFO - curPos COORD - logf func(string, ...interface{}) -} - -type Option func(*windowsAnsiEventHandler) - -func WithLogf(f func(string, ...interface{})) Option { - return func(w *windowsAnsiEventHandler) { - w.logf = f - } -} - -func CreateWinEventHandler(fd uintptr, file *os.File, opts ...Option) ansiterm.AnsiEventHandler { - infoReset, err := GetConsoleScreenBufferInfo(fd) - if err != nil { - return nil - } - - h := &windowsAnsiEventHandler{ - fd: fd, - file: file, - infoReset: infoReset, - attributes: infoReset.Attributes, - } - for _, o := range opts { - o(h) - } - - if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { - logFile, _ := os.Create("winEventHandler.log") - logger := log.New(logFile, "", log.LstdFlags) - if h.logf != nil { - l := h.logf - h.logf = func(s string, v ...interface{}) { - l(s, v...) - logger.Printf(s, v...) - } - } else { - h.logf = logger.Printf - } - } - - if h.logf == nil { - h.logf = func(string, ...interface{}) {} - } - - return h -} - -type scrollRegion struct { - top int16 - bottom int16 -} - -// simulateLF simulates a LF or CR+LF by scrolling if necessary to handle the -// current cursor position and scroll region settings, in which case it returns -// true. If no special handling is necessary, then it does nothing and returns -// false. -// -// In the false case, the caller should ensure that a carriage return -// and line feed are inserted or that the text is otherwise wrapped. -func (h *windowsAnsiEventHandler) simulateLF(includeCR bool) (bool, error) { - if h.wrapNext { - if err := h.Flush(); err != nil { - return false, err - } - h.clearWrap() - } - pos, info, err := h.getCurrentInfo() - if err != nil { - return false, err - } - sr := h.effectiveSr(info.Window) - if pos.Y == sr.bottom { - // Scrolling is necessary. Let Windows automatically scroll if the scrolling region - // is the full window. - if sr.top == info.Window.Top && sr.bottom == info.Window.Bottom { - if includeCR { - pos.X = 0 - h.updatePos(pos) - } - return false, nil - } - - // A custom scroll region is active. Scroll the window manually to simulate - // the LF. - if err := h.Flush(); err != nil { - return false, err - } - h.logf("Simulating LF inside scroll region") - if err := h.scrollUp(1); err != nil { - return false, err - } - if includeCR { - pos.X = 0 - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return false, err - } - } - return true, nil - - } else if pos.Y < info.Window.Bottom { - // Let Windows handle the LF. - pos.Y++ - if includeCR { - pos.X = 0 - } - h.updatePos(pos) - return false, nil - } else { - // The cursor is at the bottom of the screen but outside the scroll - // region. Skip the LF. - h.logf("Simulating LF outside scroll region") - if includeCR { - if err := h.Flush(); err != nil { - return false, err - } - pos.X = 0 - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return false, err - } - } - return true, nil - } -} - -// executeLF executes a LF without a CR. -func (h *windowsAnsiEventHandler) executeLF() error { - handled, err := h.simulateLF(false) - if err != nil { - return err - } - if !handled { - // Windows LF will reset the cursor column position. Write the LF - // and restore the cursor position. - pos, _, err := h.getCurrentInfo() - if err != nil { - return err - } - h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED) - if pos.X != 0 { - if err := h.Flush(); err != nil { - return err - } - h.logf("Resetting cursor position for LF without CR") - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return err - } - } - } - return nil -} - -func (h *windowsAnsiEventHandler) Print(b byte) error { - if h.wrapNext { - h.buffer.WriteByte(h.marginByte) - h.clearWrap() - if _, err := h.simulateLF(true); err != nil { - return err - } - } - pos, info, err := h.getCurrentInfo() - if err != nil { - return err - } - if pos.X == info.Size.X-1 { - h.wrapNext = true - h.marginByte = b - } else { - pos.X++ - h.updatePos(pos) - h.buffer.WriteByte(b) - } - return nil -} - -func (h *windowsAnsiEventHandler) Execute(b byte) error { - switch b { - case ansiterm.ANSI_TAB: - h.logf("Execute(TAB)") - // Move to the next tab stop, but preserve auto-wrap if already set. - if !h.wrapNext { - pos, info, err := h.getCurrentInfo() - if err != nil { - return err - } - pos.X = (pos.X + 8) - pos.X%8 - if pos.X >= info.Size.X { - pos.X = info.Size.X - 1 - } - if err := h.Flush(); err != nil { - return err - } - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return err - } - } - return nil - - case ansiterm.ANSI_BEL: - h.buffer.WriteByte(ansiterm.ANSI_BEL) - return nil - - case ansiterm.ANSI_BACKSPACE: - if h.wrapNext { - if err := h.Flush(); err != nil { - return err - } - h.clearWrap() - } - pos, _, err := h.getCurrentInfo() - if err != nil { - return err - } - if pos.X > 0 { - pos.X-- - h.updatePos(pos) - h.buffer.WriteByte(ansiterm.ANSI_BACKSPACE) - } - return nil - - case ansiterm.ANSI_VERTICAL_TAB, ansiterm.ANSI_FORM_FEED: - // Treat as true LF. - return h.executeLF() - - case ansiterm.ANSI_LINE_FEED: - // Simulate a CR and LF for now since there is no way in go-ansiterm - // to tell if the LF should include CR (and more things break when it's - // missing than when it's incorrectly added). - handled, err := h.simulateLF(true) - if handled || err != nil { - return err - } - return h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED) - - case ansiterm.ANSI_CARRIAGE_RETURN: - if h.wrapNext { - if err := h.Flush(); err != nil { - return err - } - h.clearWrap() - } - pos, _, err := h.getCurrentInfo() - if err != nil { - return err - } - if pos.X != 0 { - pos.X = 0 - h.updatePos(pos) - h.buffer.WriteByte(ansiterm.ANSI_CARRIAGE_RETURN) - } - return nil - - default: - return nil - } -} - -func (h *windowsAnsiEventHandler) CUU(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CUU: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorVertical(-param) -} - -func (h *windowsAnsiEventHandler) CUD(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CUD: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorVertical(param) -} - -func (h *windowsAnsiEventHandler) CUF(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CUF: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorHorizontal(param) -} - -func (h *windowsAnsiEventHandler) CUB(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CUB: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorHorizontal(-param) -} - -func (h *windowsAnsiEventHandler) CNL(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CNL: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorLine(param) -} - -func (h *windowsAnsiEventHandler) CPL(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CPL: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorLine(-param) -} - -func (h *windowsAnsiEventHandler) CHA(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CHA: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorColumn(param) -} - -func (h *windowsAnsiEventHandler) VPA(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("VPA: [[%d]]", param) - h.clearWrap() - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - window := h.getCursorWindow(info) - position := info.CursorPosition - position.Y = window.Top + int16(param) - 1 - return h.setCursorPosition(position, window) -} - -func (h *windowsAnsiEventHandler) CUP(row int, col int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("CUP: [[%d %d]]", row, col) - h.clearWrap() - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - window := h.getCursorWindow(info) - position := COORD{window.Left + int16(col) - 1, window.Top + int16(row) - 1} - return h.setCursorPosition(position, window) -} - -func (h *windowsAnsiEventHandler) HVP(row int, col int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("HVP: [[%d %d]]", row, col) - h.clearWrap() - return h.CUP(row, col) -} - -func (h *windowsAnsiEventHandler) DECTCEM(visible bool) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("DECTCEM: [%v]", []string{strconv.FormatBool(visible)}) - h.clearWrap() - return nil -} - -func (h *windowsAnsiEventHandler) DECOM(enable bool) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("DECOM: [%v]", []string{strconv.FormatBool(enable)}) - h.clearWrap() - h.originMode = enable - return h.CUP(1, 1) -} - -func (h *windowsAnsiEventHandler) DECCOLM(use132 bool) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("DECCOLM: [%v]", []string{strconv.FormatBool(use132)}) - h.clearWrap() - if err := h.ED(2); err != nil { - return err - } - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - targetWidth := int16(80) - if use132 { - targetWidth = 132 - } - if info.Size.X < targetWidth { - if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil { - h.logf("set buffer failed: %v", err) - return err - } - } - window := info.Window - window.Left = 0 - window.Right = targetWidth - 1 - if err := SetConsoleWindowInfo(h.fd, true, window); err != nil { - h.logf("set window failed: %v", err) - return err - } - if info.Size.X > targetWidth { - if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil { - h.logf("set buffer failed: %v", err) - return err - } - } - return SetConsoleCursorPosition(h.fd, COORD{0, 0}) -} - -func (h *windowsAnsiEventHandler) ED(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("ED: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - - // [J -- Erases from the cursor to the end of the screen, including the cursor position. - // [1J -- Erases from the beginning of the screen to the cursor, including the cursor position. - // [2J -- Erases the complete display. The cursor does not move. - // Notes: - // -- Clearing the entire buffer, versus just the Window, works best for Windows Consoles - - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - var start COORD - var end COORD - - switch param { - case 0: - start = info.CursorPosition - end = COORD{info.Size.X - 1, info.Size.Y - 1} - - case 1: - start = COORD{0, 0} - end = info.CursorPosition - - case 2: - start = COORD{0, 0} - end = COORD{info.Size.X - 1, info.Size.Y - 1} - } - - err = h.clearRange(h.attributes, start, end) - if err != nil { - return err - } - - // If the whole buffer was cleared, move the window to the top while preserving - // the window-relative cursor position. - if param == 2 { - pos := info.CursorPosition - window := info.Window - pos.Y -= window.Top - window.Bottom -= window.Top - window.Top = 0 - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return err - } - if err := SetConsoleWindowInfo(h.fd, true, window); err != nil { - return err - } - } - - return nil -} - -func (h *windowsAnsiEventHandler) EL(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("EL: [%v]", strconv.Itoa(param)) - h.clearWrap() - - // [K -- Erases from the cursor to the end of the line, including the cursor position. - // [1K -- Erases from the beginning of the line to the cursor, including the cursor position. - // [2K -- Erases the complete line. - - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - var start COORD - var end COORD - - switch param { - case 0: - start = info.CursorPosition - end = COORD{info.Size.X, info.CursorPosition.Y} - - case 1: - start = COORD{0, info.CursorPosition.Y} - end = info.CursorPosition - - case 2: - start = COORD{0, info.CursorPosition.Y} - end = COORD{info.Size.X, info.CursorPosition.Y} - } - - err = h.clearRange(h.attributes, start, end) - if err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) IL(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("IL: [%v]", strconv.Itoa(param)) - h.clearWrap() - return h.insertLines(param) -} - -func (h *windowsAnsiEventHandler) DL(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("DL: [%v]", strconv.Itoa(param)) - h.clearWrap() - return h.deleteLines(param) -} - -func (h *windowsAnsiEventHandler) ICH(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("ICH: [%v]", strconv.Itoa(param)) - h.clearWrap() - return h.insertCharacters(param) -} - -func (h *windowsAnsiEventHandler) DCH(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("DCH: [%v]", strconv.Itoa(param)) - h.clearWrap() - return h.deleteCharacters(param) -} - -func (h *windowsAnsiEventHandler) SGR(params []int) error { - if err := h.Flush(); err != nil { - return err - } - strings := []string{} - for _, v := range params { - strings = append(strings, strconv.Itoa(v)) - } - - h.logf("SGR: [%v]", strings) - - if len(params) <= 0 { - h.attributes = h.infoReset.Attributes - h.inverted = false - } else { - for _, attr := range params { - - if attr == ansiterm.ANSI_SGR_RESET { - h.attributes = h.infoReset.Attributes - h.inverted = false - continue - } - - h.attributes, h.inverted = collectAnsiIntoWindowsAttributes(h.attributes, h.inverted, h.infoReset.Attributes, int16(attr)) - } - } - - attributes := h.attributes - if h.inverted { - attributes = invertAttributes(attributes) - } - err := SetConsoleTextAttribute(h.fd, attributes) - if err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) SU(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("SU: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.scrollUp(param) -} - -func (h *windowsAnsiEventHandler) SD(param int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("SD: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.scrollDown(param) -} - -func (h *windowsAnsiEventHandler) DA(params []string) error { - h.logf("DA: [%v]", params) - // DA cannot be implemented because it must send data on the VT100 input stream, - // which is not available to go-ansiterm. - return nil -} - -func (h *windowsAnsiEventHandler) DECSTBM(top int, bottom int) error { - if err := h.Flush(); err != nil { - return err - } - h.logf("DECSTBM: [%d, %d]", top, bottom) - - // Windows is 0 indexed, Linux is 1 indexed - h.sr.top = int16(top - 1) - h.sr.bottom = int16(bottom - 1) - - // This command also moves the cursor to the origin. - h.clearWrap() - return h.CUP(1, 1) -} - -func (h *windowsAnsiEventHandler) RI() error { - if err := h.Flush(); err != nil { - return err - } - h.logf("RI: []") - h.clearWrap() - - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - sr := h.effectiveSr(info.Window) - if info.CursorPosition.Y == sr.top { - return h.scrollDown(1) - } - - return h.moveCursorVertical(-1) -} - -func (h *windowsAnsiEventHandler) IND() error { - h.logf("IND: []") - return h.executeLF() -} - -func (h *windowsAnsiEventHandler) Flush() error { - h.curInfo = nil - if h.buffer.Len() > 0 { - h.logf("Flush: [%s]", h.buffer.Bytes()) - if _, err := h.buffer.WriteTo(h.file); err != nil { - return err - } - } - - if h.wrapNext && !h.drewMarginByte { - h.logf("Flush: drawing margin byte '%c'", h.marginByte) - - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - charInfo := []CHAR_INFO{{UnicodeChar: uint16(h.marginByte), Attributes: info.Attributes}} - size := COORD{1, 1} - position := COORD{0, 0} - region := SMALL_RECT{Left: info.CursorPosition.X, Top: info.CursorPosition.Y, Right: info.CursorPosition.X, Bottom: info.CursorPosition.Y} - if err := WriteConsoleOutput(h.fd, charInfo, size, position, ®ion); err != nil { - return err - } - h.drewMarginByte = true - } - return nil -} - -// cacheConsoleInfo ensures that the current console screen information has been queried -// since the last call to Flush(). It must be called before accessing h.curInfo or h.curPos. -func (h *windowsAnsiEventHandler) getCurrentInfo() (COORD, *CONSOLE_SCREEN_BUFFER_INFO, error) { - if h.curInfo == nil { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return COORD{}, nil, err - } - h.curInfo = info - h.curPos = info.CursorPosition - } - return h.curPos, h.curInfo, nil -} - -func (h *windowsAnsiEventHandler) updatePos(pos COORD) { - if h.curInfo == nil { - panic("failed to call getCurrentInfo before calling updatePos") - } - h.curPos = pos -} - -// clearWrap clears the state where the cursor is in the margin -// waiting for the next character before wrapping the line. This must -// be done before most operations that act on the cursor. -func (h *windowsAnsiEventHandler) clearWrap() { - h.wrapNext = false - h.drewMarginByte = false -} diff --git a/ui/wasm/vendor/github.com/Heiko-san/mermaidgen/LICENSE b/ui/wasm/vendor/github.com/Heiko-san/mermaidgen/LICENSE deleted file mode 100644 index f288702d2fa..00000000000 --- a/ui/wasm/vendor/github.com/Heiko-san/mermaidgen/LICENSE +++ /dev/null @@ -1,674 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. diff --git a/ui/wasm/vendor/github.com/Heiko-san/mermaidgen/flowchart/Edge.go b/ui/wasm/vendor/github.com/Heiko-san/mermaidgen/flowchart/Edge.go deleted file mode 100644 index b1e7ded0f56..00000000000 --- a/ui/wasm/vendor/github.com/Heiko-san/mermaidgen/flowchart/Edge.go +++ /dev/null @@ -1,61 +0,0 @@ -package flowchart - -import ( - "fmt" - "strconv" - "strings" -) - -type edgeShape string - -// Shape definitions for Edges as described at -// https://mermaidjs.github.io/flowchart.html#links-between-nodes. -// When added to a Flowchart, Edges get the EShapeArrow shape as the default. -const ( - EShapeArrow edgeShape = `-->` - EShapeDottedArrow edgeShape = `-.->` - EShapeThickArrow edgeShape = `==>` - EShapeLine edgeShape = `---` - EShapeDottedLine edgeShape = `-.-` - EShapeThickLine edgeShape = `===` -) - -// Edge represents a connection between 2 Nodes. -// Create an instance of Edge via Flowchart's AddEdge method, do not create -// instances directly. Already defined IDs (indices) can be looked up via -// Flowchart's GetEdge method or iterated over via its ListEdges method. -type Edge struct { - id int - From *Node // Pointer to the Node where the Edge starts. - To *Node // Pointer to the Node where the Edge ends. - Shape edgeShape // The shape of this Edge. - Text []string // Optional text lines to be added along the Edge. - Style *EdgeStyle // Optional CSS style. -} - -// ID provides access to the Edge's readonly field id. -// Since Edges don't really have IDs, this actually is the index when the Edge -// was added, which is used for linkStyle lines. -func (e *Edge) ID() (id int) { - return e.id -} - -// String renders this graph element to an edge definition line. -// If Style member is set an additional linkStyle line will be created. -func (e *Edge) String() (renderedElement string) { - line := string(e.Shape) - if len(e.Text) > 0 { - line += fmt.Sprintf(`|"%s"|`, strings.Join(e.Text, "
")) - } - text := fmt.Sprintf("%s %s %s\n", e.From.id, line, e.To.id) - if e.Style != nil { - text += fmt.Sprintf(e.Style.String(), strconv.Itoa(e.id)) - } - return text -} - -// AddLines adds one or more lines of text to the Text member. -// This text gets rendered along the Edge, separated by
's. -func (e *Edge) AddLines(lines ...string) { - e.Text = append(e.Text, lines...) -} diff --git a/ui/wasm/vendor/github.com/Heiko-san/mermaidgen/flowchart/EdgeStyle.go b/ui/wasm/vendor/github.com/Heiko-san/mermaidgen/flowchart/EdgeStyle.go deleted file mode 100644 index 998f25f6f3d..00000000000 --- a/ui/wasm/vendor/github.com/Heiko-san/mermaidgen/flowchart/EdgeStyle.go +++ /dev/null @@ -1,69 +0,0 @@ -package flowchart - -import ( - "fmt" - "strings" -) - -type edgeInterpolation string - -// Interpolation definitions for Edges as described at -// https://github.com/knsv/mermaid/issues/580#issuecomment-373929046. -// The default behaviour if no interpolation is given at all is -// InterpolationLinear. -const ( - InterpolationBasis edgeInterpolation = `basis` - InterpolationLinear edgeInterpolation = `linear` -) - -// An EdgeStyle is used to add CSS to an Edge. It renders to a linkStyle line -// for each Edge it is associated with. Note that linkStyles will override any -// effect from the Edge's shape defintion. -// Retrieve an instance of EdgeStyle via Flowchart's EdgeStyle method, do not -// create instances directly. -type EdgeStyle struct { - id string // virtual ID for lookup - Stroke htmlColor // Renders to stroke:#333 - StrokeWidth uint8 // Renders to stroke-width:2px - StrokeDash uint8 // Renders to stroke-dasharray:5px - More string // More styles, e.g.: stroke:#333,stroke-width:1px - Interpolation edgeInterpolation // Edge curve definition -} - -// ID provides access to the EdgeStyle's readonly field id. -func (es *EdgeStyle) ID() (id string) { - return es.id -} - -// String renders this graph element to a linkStyle line. -func (es *EdgeStyle) String() (renderedElement string) { - interpolation := "" - if es.Interpolation != "" { - interpolation = "interpolate " + string(es.Interpolation) - } - styles := []string{} - if es.Stroke != "" { - styles = append(styles, "stroke:"+string(es.Stroke)) - } - if es.StrokeWidth != 1 { - styles = append(styles, fmt.Sprintf(`stroke-width:%dpx`, - es.StrokeWidth)) - } - if es.StrokeDash != 0 { - styles = append(styles, fmt.Sprintf(`stroke-dasharray:%dpx`, - es.StrokeDash)) - } - if es.More != "" { - styles = append(styles, es.More) - } - definitions := strings.Join(styles, ",") - if definitions == "" && interpolation == "" { - // neutral element as a fallback to ensure empty linkStyles don't break - // the mermaid syntax - definitions = fmt.Sprintf(`stroke-width:%dpx`, es.StrokeWidth) - } - if definitions != "" && interpolation != "" { - interpolation += " " - } - return `linkStyle %s ` + interpolation + definitions + "\n" -} diff --git a/ui/wasm/vendor/github.com/Heiko-san/mermaidgen/flowchart/Flowchart.go b/ui/wasm/vendor/github.com/Heiko-san/mermaidgen/flowchart/Flowchart.go deleted file mode 100644 index 363df87ce04..00000000000 --- a/ui/wasm/vendor/github.com/Heiko-san/mermaidgen/flowchart/Flowchart.go +++ /dev/null @@ -1,242 +0,0 @@ -package flowchart - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "os/exec" - "runtime" -) - -////////// ChartDirection ////////////////////////////////////////////////////// - -type chartDirection string - -// Direction definitions for Flowcharts as described at -// https://mermaidjs.github.io/flowchart.html#graph. -// New Flowcharts get DirectionTopDown as the default. -const ( - DirectionTopDown chartDirection = `TB` - DirectionBottomUp chartDirection = `BT` - DirectionRightLeft chartDirection = `RL` - DirectionLeftRight chartDirection = `LR` -) - -////////// GraphItem /////////////////////////////////////////////////////////// - -// interface to define what can be an "item" to a Flowchart/Subgraph -type graphItem interface { - renderGraph() string -} - -////////// Flowchart /////////////////////////////////////////////////////////// - -// Flowchart objects are the entrypoints to this package, the whole graph is -// constructed around a Flowchart object. Create an instance of Flowchart via -// Flowchart's constructor NewFlowchart, do not create instances directly. -type Flowchart struct { - nodeStyles map[string]*NodeStyle // internal storage for NodeStyles - edgeStyles map[string]*EdgeStyle // internal storage for EdgeStyles - subgraphs map[string]*Subgraph // internal storage for Subgraphs - nodes map[string]*Node // internal storage for Nodes - edges []*Edge // internal storage for Edges - items []graphItem // sub-items to render - Direction chartDirection // The direction used to render the graph. - DefaultEdgeStyle *EdgeStyle // Define a default linkStyle element. -} - -// NewFlowchart is the constructor used to create a new Flowchart object. -// This object is the entrypoint for any further interactions with your graph. -// Always use the constructor, don't create Flowchart objects directly. -func NewFlowchart() (newFlowchart *Flowchart) { - f := &Flowchart{} - f.Direction = DirectionTopDown - f.nodeStyles = make(map[string]*NodeStyle) - f.edgeStyles = make(map[string]*EdgeStyle) - f.subgraphs = make(map[string]*Subgraph) - f.nodes = make(map[string]*Node) - return f -} - -// String recursively renders the whole graph to mermaid code lines. -func (fc *Flowchart) String() (renderedElement string) { - text := fmt.Sprintf("graph %s\n", fc.Direction) - if fc.DefaultEdgeStyle != nil { - text += fmt.Sprintf(fc.DefaultEdgeStyle.String(), "default") - } - for _, s := range fc.nodeStyles { - text += s.String() - } - for _, item := range fc.items { - text += item.renderGraph() - } - for _, e := range fc.edges { - text += e.String() - } - return text -} - -// Structs for JSON encode -type mermaidJSON struct { - Theme string `json:"theme"` -} -type dataJSON struct { - Code string `json:"code"` - Mermaid mermaidJSON `json:"mermaid"` -} - -// LiveURL renders the Flowchart and generates a view URL for -// https://mermaidjs.github.io/mermaid-live-editor from it. -func (fc *Flowchart) LiveURL() (url string) { - liveURL := `https://mermaidjs.github.io/mermaid-live-editor/#/view/` - data, _ := json.Marshal(dataJSON{ - Code: fc.String(), Mermaid: mermaidJSON{Theme: "default"}}) - return liveURL + base64.URLEncoding.EncodeToString(data) -} - -// ViewInBrowser uses the URL generated by Flowchart's LiveURL method and opens -// that URL in the OS's default browser. It starts the browser command -// non-blocking and eventually returns any error occured. -func (fc *Flowchart) ViewInBrowser() (err error) { - switch runtime.GOOS { - case "openbsd", "linux": - return exec.Command("xdg-open", fc.LiveURL()).Start() - case "darwin": - return exec.Command("open", fc.LiveURL()).Start() - case "windows": - return exec.Command("rundll32", "url.dll,FileProtocolHandler", - fc.LiveURL()).Start() - default: - return fmt.Errorf("unsupported platform") - } -} - -////////// add & get Styles //////////////////////////////////////////////////// - -// NodeStyle is used to create new or lookup existing NodeStyles by ID. -// The returned object pointers can be assigned to any number of Nodes -// to style them using CSS. -func (fc *Flowchart) NodeStyle(id string) (style *NodeStyle) { - s, found := fc.nodeStyles[id] - if !found { - s = &NodeStyle{id: id, StrokeWidth: 1} - fc.nodeStyles[id] = s - } - return s -} - -// EdgeStyle is used to create new or lookup existing EdgeStyles by ID. -// The returned object pointers can be assigned to any number of Edges -// to style them using CSS. Note that EdgeStyles override the shape of an Edge, -// e.g. if you color an Edge that uses EShapeDottedArrow it looses its dotted -// nature unless you define a dotted line using the EdgeStyle. -func (fc *Flowchart) EdgeStyle(id string) (style *EdgeStyle) { - s, found := fc.edgeStyles[id] - if !found { - s = &EdgeStyle{id: id, StrokeWidth: 1} - fc.edgeStyles[id] = s - } - return s -} - -////////// add Items /////////////////////////////////////////////////////////// - -// AddSubgraph is used to add a nested Subgraph to the Flowchart. -// If the provided ID already exists, no new Subgraph is created and nil is -// returned. The ID can later be used to lookup the created Subgraph using -// Flowchart's GetSubgraph method. If you want to add a Subgraph to a Subgraph, -// use that Subgraph's AddSubgraph method. -func (fc *Flowchart) AddSubgraph(id string) (newSubgraph *Subgraph) { - _, alreadyExists := fc.subgraphs[id] - if alreadyExists { - return nil - } - s := &Subgraph{id: id, flowchart: fc} - fc.subgraphs[id] = s - fc.items = append(fc.items, s) - return s -} - -// AddNode is used to add a new Node to the Flowchart. If the provided ID -// already exists, no new Node is created and nil is returned. The ID can later -// be used to lookup the created Node using Flowchart's GetNode method. -// If you want to add a Node to a Subgraph, use that Subgraph's AddNode method. -func (fc *Flowchart) AddNode(id string) (newNode *Node) { - _, alreadyExists := fc.nodes[id] - if alreadyExists { - return nil - } - n := &Node{id: id, Shape: NShapeRect} - fc.nodes[id] = n - fc.items = append(fc.items, n) - return n -} - -// AddEdge is used to add a new Edge to the Flowchart. Since Edges have no IDs -// this will always succeed. The (pseudo) ID is the index that defines the order -// of all Edges and is used to define linkStyles. The ID can later be used to -// lookup the created Edge using Flowchart's GetEdge method. -func (fc *Flowchart) AddEdge(from *Node, to *Node) (newEdge *Edge) { - e := &Edge{From: from, To: to, Shape: EShapeArrow} - fc.edges = append(fc.edges, e) - e.id = len(fc.edges) - 1 - return e -} - -////////// get Items /////////////////////////////////////////////////////////// - -// GetSubgraph looks up a previously defined Subgraph by its ID. -// If this ID doesn't exist, nil is returned. -// Use Flowchart's or Subgraph's AddSubgraph to create new Subgraphs. -func (fc *Flowchart) GetSubgraph(id string) (existingSubgraph *Subgraph) { - // if not found -> nil - return fc.subgraphs[id] -} - -// GetNode looks up a previously defined Node by its ID. -// If this ID doesn't exist, nil is returned. -// Use Flowchart's or Subgraph's AddNode to create new Nodes. -func (fc *Flowchart) GetNode(id string) (existingNode *Node) { - // if not found -> nil - return fc.nodes[id] -} - -// GetEdge looks up a previously defined Edge by its ID (index). -// If this index doesn't exist, nil is returned. -// Use Flowchart's AddEdge to create new Edges. -func (fc *Flowchart) GetEdge(index int) (existingEdge *Edge) { - if index < 0 || len(fc.edges) <= index { - return nil - } - return fc.edges[index] -} - -////////// list Items ////////////////////////////////////////////////////////// - -// ListSubgraphs returns a slice of all previously defined Subgraphs. -// The order is not well-defined. -func (fc *Flowchart) ListSubgraphs() (allSubgraphs []*Subgraph) { - values := make([]*Subgraph, 0, len(fc.subgraphs)) - for _, v := range fc.subgraphs { - values = append(values, v) - } - return values -} - -// ListNodes returns a slice of all previously defined Nodes. -// The order is not well-defined. -func (fc *Flowchart) ListNodes() (allNodes []*Node) { - values := make([]*Node, 0, len(fc.nodes)) - for _, v := range fc.nodes { - values = append(values, v) - } - return values -} - -// ListEdges returns a slice of all previously defined Edges in the order they -// were added. -func (fc *Flowchart) ListEdges() (allEdges []*Edge) { - e := make([]*Edge, len(fc.edges)) - copy(e, fc.edges) - return e -} diff --git a/ui/wasm/vendor/github.com/Heiko-san/mermaidgen/flowchart/HtmlColor.go b/ui/wasm/vendor/github.com/Heiko-san/mermaidgen/flowchart/HtmlColor.go deleted file mode 100644 index 3f613d2cf5c..00000000000 --- a/ui/wasm/vendor/github.com/Heiko-san/mermaidgen/flowchart/HtmlColor.go +++ /dev/null @@ -1,15 +0,0 @@ -package flowchart - -type htmlColor string - -// Color definitions for use with NodeStyles and EdgeStyles. -const ( - ColorBlack htmlColor = `#000` - ColorBlue htmlColor = `#00f` - ColorGreen htmlColor = `#0f0` - ColorCyan htmlColor = `#0ff` - ColorRed htmlColor = `#f00` - ColorMagenta htmlColor = `#f0f` - ColorYellow htmlColor = `#ff0` - ColorWhite htmlColor = `#fff` -) diff --git a/ui/wasm/vendor/github.com/Heiko-san/mermaidgen/flowchart/Node.go b/ui/wasm/vendor/github.com/Heiko-san/mermaidgen/flowchart/Node.go deleted file mode 100644 index 9f816694218..00000000000 --- a/ui/wasm/vendor/github.com/Heiko-san/mermaidgen/flowchart/Node.go +++ /dev/null @@ -1,73 +0,0 @@ -package flowchart - -import ( - "fmt" - "strings" -) - -type nodeShape string - -// Shape definitions for Nodes as described at -// https://mermaidjs.github.io/flowchart.html#nodes--shapes. -// When added to a Flowchart or Subgraph, Nodes get the NShapeRect shape as the -// default. -const ( - NShapeRect nodeShape = `["%s"]` - NShapeRoundRect nodeShape = `("%s")` - NShapeCircle nodeShape = `(("%s"))` - NShapeRhombus nodeShape = `{"%s"}` - NShapeFlagLeft nodeShape = `>"%s"]` -) - -// Node represents a single, unique node of the Flowchart graph. -// Create an instance of Node via Flowchart's or Subgraph's AddNode method, do -// not create instances directly. Already defined IDs can be looked up via -// Flowchart's GetNode method or iterated over via its ListNodes method. -type Node struct { - id string - Shape nodeShape // The shape of this Node. - Text []string // The body text, ID if no text is added. - Link string // Optional URL for a click-hook. - LinkText string // Optional tooltip for the link. - Style *NodeStyle // Optional CSS style. -} - -// ID provides access to the Node's readonly field id. -func (n *Node) ID() (id string) { - return n.id -} - -// Implements graphItem, see String() for further details. -func (n *Node) renderGraph() string { - textbox := n.id - if len(n.Text) > 0 { - textbox = strings.Join(n.Text, "
") - } - text := n.id + fmt.Sprintf(string(n.Shape), textbox) + "\n" - if n.Style != nil { - text += fmt.Sprintf("class %s %s\n", n.id, n.Style.id) - } - if n.Link != "" { - linktxt := n.Link - if n.LinkText != "" { - linktxt = n.LinkText - } - text += fmt.Sprintf("click %s \"%s\" \"%s\"\n", - n.id, n.Link, linktxt) - } - return text -} - -// String renders this graph element to a node definition line. -// If Style member is set an additional class line will be created. -// If Link member is set an additional click line will be created. -func (n *Node) String() (renderedElement string) { - return n.renderGraph() -} - -// AddLines adds one or more lines of text to the Text member. -// This text gets rendered to the Node's body, separated by
's. -// If no text is added, the Node's ID is rendered to its body. -func (n *Node) AddLines(lines ...string) { - n.Text = append(n.Text, lines...) -} diff --git a/ui/wasm/vendor/github.com/Heiko-san/mermaidgen/flowchart/NodeStyle.go b/ui/wasm/vendor/github.com/Heiko-san/mermaidgen/flowchart/NodeStyle.go deleted file mode 100644 index 9775309a08d..00000000000 --- a/ui/wasm/vendor/github.com/Heiko-san/mermaidgen/flowchart/NodeStyle.go +++ /dev/null @@ -1,52 +0,0 @@ -package flowchart - -import ( - "fmt" - "strings" -) - -// A NodeStyle is used to add CSS to a Node. It renders to a classDef line. -// Retrieve an instance of NodeStyle via Flowchart's NodeStyle method, do not -// create instances directly. -type NodeStyle struct { - id string - Fill htmlColor // renders to something like fill:#f9f - Stroke htmlColor // renders to something like stroke:#333 - StrokeWidth uint8 // renders to something like stroke-width:2px - StrokeDash uint8 // renders to something like stroke-dasharray:5px - More string // more styles, e.g.: stroke:#333,stroke-width:1px -} - -// ID provides access to the NodeStyle's readonly field id. -func (ns *NodeStyle) ID() (id string) { - return ns.id -} - -// String renders this graph element to a classDef line. -func (ns *NodeStyle) String() (renderedElement string) { - styles := []string{} - if ns.Fill != "" { - styles = append(styles, "fill:"+string(ns.Fill)) - } - if ns.Stroke != "" { - styles = append(styles, "stroke:"+string(ns.Stroke)) - } - if ns.StrokeWidth != 1 { - styles = append(styles, fmt.Sprintf(`stroke-width:%dpx`, - ns.StrokeWidth)) - } - if ns.StrokeDash != 0 { - styles = append(styles, fmt.Sprintf(`stroke-dasharray:%dpx`, - ns.StrokeDash)) - } - if ns.More != "" { - styles = append(styles, ns.More) - } - definitions := strings.Join(styles, ",") - if definitions == "" { - // neutral element as a fallback to ensure empty classDefs don't break - // the mermaid syntax - definitions = fmt.Sprintf(`stroke-width:%dpx`, ns.StrokeWidth) - } - return fmt.Sprintf("classDef %s %s\n", ns.id, definitions) -} diff --git a/ui/wasm/vendor/github.com/Heiko-san/mermaidgen/flowchart/Subgraph.go b/ui/wasm/vendor/github.com/Heiko-san/mermaidgen/flowchart/Subgraph.go deleted file mode 100644 index 980ca4cdb4a..00000000000 --- a/ui/wasm/vendor/github.com/Heiko-san/mermaidgen/flowchart/Subgraph.go +++ /dev/null @@ -1,72 +0,0 @@ -package flowchart - -import ( - "fmt" -) - -// Subgraph represents a subgraph block on the Flowchart graph where nested -// Subgraphs and Nodes can be added. Create an instance of Subgraph via -// Flowchart's or Subgraph's AddSubgraph method, do not create instances -// directly. Already defined IDs can be looked up via Flowchart's GetSubgraph -// method or iterated over via its ListSubgraphs method. -type Subgraph struct { - id string // virtual ID for lookup - flowchart *Flowchart // top lvl pointer - items []graphItem // sub-items to render - Title string // The title of this Subgraph. -} - -// ID provides access to the Subgraph's readonly field id. -func (sg *Subgraph) ID() (id string) { - return sg.id -} - -// Flowchart provides access to the Subgraph's underlying Flowchart to be able -// to access Adder, Getter and Lister methods or to lookup Styles. -func (sg *Subgraph) Flowchart() (topLevel *Flowchart) { - return sg.flowchart -} - -// Implements graphItem, see String() for further details. -func (sg *Subgraph) renderGraph() string { - text := fmt.Sprintln("subgraph", sg.Title) - for _, item := range sg.items { - text += item.renderGraph() - } - text += "end\n" - return text -} - -// String renders this graph element to a subgraph block. -func (sg *Subgraph) String() (renderedElement string) { - return sg.renderGraph() -} - -// AddSubgraph is used to add another nested Subgraph below this Subgraph layer. -// If the provided ID already exists, no new Subgraph is created and nil is -// returned. The ID can later be used to lookup the created Subgraph using -// Flowchart's GetSubgraph method. -func (sg *Subgraph) AddSubgraph(id string) (newSubgraph *Subgraph) { - _, alreadyExists := sg.flowchart.subgraphs[id] - if alreadyExists { - return nil - } - s := &Subgraph{id: id, flowchart: sg.flowchart} - sg.flowchart.subgraphs[id] = s - sg.items = append(sg.items, s) - return s -} - -// AddNode is used to add a new Node to this Subgraph layer. If the provided ID -// already exists, no new Node is created and nil is returned. The ID can later -// be used to lookup the created Node using Flowchart's GetNode method. -func (sg *Subgraph) AddNode(id string) (newNode *Node) { - _, alreadyExists := sg.flowchart.nodes[id] - if alreadyExists { - return nil - } - n := &Node{id: id, Shape: NShapeRect} - sg.flowchart.nodes[id] = n - sg.items = append(sg.items, n) - return n -} diff --git a/ui/wasm/vendor/github.com/Heiko-san/mermaidgen/flowchart/doc.go b/ui/wasm/vendor/github.com/Heiko-san/mermaidgen/flowchart/doc.go deleted file mode 100644 index 6fbf0ee5047..00000000000 --- a/ui/wasm/vendor/github.com/Heiko-san/mermaidgen/flowchart/doc.go +++ /dev/null @@ -1,69 +0,0 @@ -/* -Package flowchart is an object oriented approach to define mermaid flowcharts as -defined at https://mermaidjs.github.io/flowchart.html and render them to mermaid -code. - -You use the constructor NewFlowchart to create a new Flowchart object. - - chart := flowchart.NewFlowchart() - -This object is used to construct a graph using various class methods, such as: - - node1 := chart.AddNode("myNodeId1") - node2 := chart.AddNode("myNodeId2") - edge1 := chart.AddEdge(node1, node2) - -Once the graph is completely defined, it can be "rendered" to mermaid code by -stringifying the Flowchart object. - - fmt.Print(chart) - -Which then creates: - - graph TB - myNodeId1["myNodeId1"] - myNodeId2["myNodeId2"] - myNodeId1 --> myNodeId2 - -The package supports defining CSS styles, that can be assigned to Nodes and -Edges. - - ns1 := chart.NodeStyle("myStyleId1") - ns1.StrokeWidth = 3 - ns1.StrokeDash = 5 - es1 := chart.EdgeStyle("myStyleId1") - es1.Stroke = "#f00" - es1.StrokeWidth = 2 - node1.Style = ns1 - node2.Style = chart.NodeStyle("myStyleId1") - edge1.Style = es1 - -There are various useful constants and further styling options, too. - - es1.Stroke = flowchart.ColorRed - node1.Shape = flowchart.NShapeRoundRect - node2.Shape = flowchart.NShapeCircle - edge1.Shape = flowchart.EShapeThickLine - -Let's add some text and maybe a link. - - node1.AddLines("my body text") - node1.Link = "http://www.example.com" - node1.LinkText = "go to example" - -What we have by now is: - - graph TB - classDef myStyleId1 stroke-width:3px,stroke-dasharray:5px - myNodeId1("my body text") - class myNodeId1 myStyleId1 - click myNodeId1 "http://www.example.com" "go to example" - myNodeId2(("myNodeId2")) - class myNodeId2 myStyleId1 - myNodeId1 === myNodeId2 - linkStyle 0 stroke-width:2px,stroke:#f00 - -And there is more. Just explore the package. Start at Flowchart and proceed to -Subgraph. -*/ -package flowchart diff --git a/ui/wasm/vendor/github.com/Microsoft/go-winio/.gitignore b/ui/wasm/vendor/github.com/Microsoft/go-winio/.gitignore deleted file mode 100644 index b883f1fdc6d..00000000000 --- a/ui/wasm/vendor/github.com/Microsoft/go-winio/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.exe diff --git a/ui/wasm/vendor/github.com/Microsoft/go-winio/CODEOWNERS b/ui/wasm/vendor/github.com/Microsoft/go-winio/CODEOWNERS deleted file mode 100644 index ae1b4942b91..00000000000 --- a/ui/wasm/vendor/github.com/Microsoft/go-winio/CODEOWNERS +++ /dev/null @@ -1 +0,0 @@ - * @microsoft/containerplat diff --git a/ui/wasm/vendor/github.com/Microsoft/go-winio/LICENSE b/ui/wasm/vendor/github.com/Microsoft/go-winio/LICENSE deleted file mode 100644 index b8b569d7746..00000000000 --- a/ui/wasm/vendor/github.com/Microsoft/go-winio/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Microsoft - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/ui/wasm/vendor/github.com/Microsoft/go-winio/README.md b/ui/wasm/vendor/github.com/Microsoft/go-winio/README.md deleted file mode 100644 index 683be1dcf9c..00000000000 --- a/ui/wasm/vendor/github.com/Microsoft/go-winio/README.md +++ /dev/null @@ -1,37 +0,0 @@ -# go-winio [![Build Status](https://github.com/microsoft/go-winio/actions/workflows/ci.yml/badge.svg)](https://github.com/microsoft/go-winio/actions/workflows/ci.yml) - -This repository contains utilities for efficiently performing Win32 IO operations in -Go. Currently, this is focused on accessing named pipes and other file handles, and -for using named pipes as a net transport. - -This code relies on IO completion ports to avoid blocking IO on system threads, allowing Go -to reuse the thread to schedule another goroutine. This limits support to Windows Vista and -newer operating systems. This is similar to the implementation of network sockets in Go's net -package. - -Please see the LICENSE file for licensing information. - -## Contributing - -This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) -declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.microsoft.com. - -When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR -appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA. - -We also require that contributors sign their commits using git commit -s or git commit --signoff to certify they either authored the work themselves -or otherwise have permission to use it in this project. Please see https://developercertificate.org/ for more info, as well as to make sure that you can -attest to the rules listed. Our CI uses the DCO Github app to ensure that all commits in a given PR are signed-off. - - -## Code of Conduct - -This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). -For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or -contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. - - - -## Special Thanks -Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe -for another named pipe implementation. diff --git a/ui/wasm/vendor/github.com/Microsoft/go-winio/backup.go b/ui/wasm/vendor/github.com/Microsoft/go-winio/backup.go deleted file mode 100644 index 2be34af4310..00000000000 --- a/ui/wasm/vendor/github.com/Microsoft/go-winio/backup.go +++ /dev/null @@ -1,280 +0,0 @@ -// +build windows - -package winio - -import ( - "encoding/binary" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "runtime" - "syscall" - "unicode/utf16" -) - -//sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead -//sys backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite - -const ( - BackupData = uint32(iota + 1) - BackupEaData - BackupSecurity - BackupAlternateData - BackupLink - BackupPropertyData - BackupObjectId - BackupReparseData - BackupSparseBlock - BackupTxfsData -) - -const ( - StreamSparseAttributes = uint32(8) -) - -const ( - WRITE_DAC = 0x40000 - WRITE_OWNER = 0x80000 - ACCESS_SYSTEM_SECURITY = 0x1000000 -) - -// BackupHeader represents a backup stream of a file. -type BackupHeader struct { - Id uint32 // The backup stream ID - Attributes uint32 // Stream attributes - Size int64 // The size of the stream in bytes - Name string // The name of the stream (for BackupAlternateData only). - Offset int64 // The offset of the stream in the file (for BackupSparseBlock only). -} - -type win32StreamId struct { - StreamId uint32 - Attributes uint32 - Size uint64 - NameSize uint32 -} - -// BackupStreamReader reads from a stream produced by the BackupRead Win32 API and produces a series -// of BackupHeader values. -type BackupStreamReader struct { - r io.Reader - bytesLeft int64 -} - -// NewBackupStreamReader produces a BackupStreamReader from any io.Reader. -func NewBackupStreamReader(r io.Reader) *BackupStreamReader { - return &BackupStreamReader{r, 0} -} - -// Next returns the next backup stream and prepares for calls to Read(). It skips the remainder of the current stream if -// it was not completely read. -func (r *BackupStreamReader) Next() (*BackupHeader, error) { - if r.bytesLeft > 0 { - if s, ok := r.r.(io.Seeker); ok { - // Make sure Seek on io.SeekCurrent sometimes succeeds - // before trying the actual seek. - if _, err := s.Seek(0, io.SeekCurrent); err == nil { - if _, err = s.Seek(r.bytesLeft, io.SeekCurrent); err != nil { - return nil, err - } - r.bytesLeft = 0 - } - } - if _, err := io.Copy(ioutil.Discard, r); err != nil { - return nil, err - } - } - var wsi win32StreamId - if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil { - return nil, err - } - hdr := &BackupHeader{ - Id: wsi.StreamId, - Attributes: wsi.Attributes, - Size: int64(wsi.Size), - } - if wsi.NameSize != 0 { - name := make([]uint16, int(wsi.NameSize/2)) - if err := binary.Read(r.r, binary.LittleEndian, name); err != nil { - return nil, err - } - hdr.Name = syscall.UTF16ToString(name) - } - if wsi.StreamId == BackupSparseBlock { - if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil { - return nil, err - } - hdr.Size -= 8 - } - r.bytesLeft = hdr.Size - return hdr, nil -} - -// Read reads from the current backup stream. -func (r *BackupStreamReader) Read(b []byte) (int, error) { - if r.bytesLeft == 0 { - return 0, io.EOF - } - if int64(len(b)) > r.bytesLeft { - b = b[:r.bytesLeft] - } - n, err := r.r.Read(b) - r.bytesLeft -= int64(n) - if err == io.EOF { - err = io.ErrUnexpectedEOF - } else if r.bytesLeft == 0 && err == nil { - err = io.EOF - } - return n, err -} - -// BackupStreamWriter writes a stream compatible with the BackupWrite Win32 API. -type BackupStreamWriter struct { - w io.Writer - bytesLeft int64 -} - -// NewBackupStreamWriter produces a BackupStreamWriter on top of an io.Writer. -func NewBackupStreamWriter(w io.Writer) *BackupStreamWriter { - return &BackupStreamWriter{w, 0} -} - -// WriteHeader writes the next backup stream header and prepares for calls to Write(). -func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error { - if w.bytesLeft != 0 { - return fmt.Errorf("missing %d bytes", w.bytesLeft) - } - name := utf16.Encode([]rune(hdr.Name)) - wsi := win32StreamId{ - StreamId: hdr.Id, - Attributes: hdr.Attributes, - Size: uint64(hdr.Size), - NameSize: uint32(len(name) * 2), - } - if hdr.Id == BackupSparseBlock { - // Include space for the int64 block offset - wsi.Size += 8 - } - if err := binary.Write(w.w, binary.LittleEndian, &wsi); err != nil { - return err - } - if len(name) != 0 { - if err := binary.Write(w.w, binary.LittleEndian, name); err != nil { - return err - } - } - if hdr.Id == BackupSparseBlock { - if err := binary.Write(w.w, binary.LittleEndian, hdr.Offset); err != nil { - return err - } - } - w.bytesLeft = hdr.Size - return nil -} - -// Write writes to the current backup stream. -func (w *BackupStreamWriter) Write(b []byte) (int, error) { - if w.bytesLeft < int64(len(b)) { - return 0, fmt.Errorf("too many bytes by %d", int64(len(b))-w.bytesLeft) - } - n, err := w.w.Write(b) - w.bytesLeft -= int64(n) - return n, err -} - -// BackupFileReader provides an io.ReadCloser interface on top of the BackupRead Win32 API. -type BackupFileReader struct { - f *os.File - includeSecurity bool - ctx uintptr -} - -// NewBackupFileReader returns a new BackupFileReader from a file handle. If includeSecurity is true, -// Read will attempt to read the security descriptor of the file. -func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader { - r := &BackupFileReader{f, includeSecurity, 0} - return r -} - -// Read reads a backup stream from the file by calling the Win32 API BackupRead(). -func (r *BackupFileReader) Read(b []byte) (int, error) { - var bytesRead uint32 - err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx) - if err != nil { - return 0, &os.PathError{"BackupRead", r.f.Name(), err} - } - runtime.KeepAlive(r.f) - if bytesRead == 0 { - return 0, io.EOF - } - return int(bytesRead), nil -} - -// Close frees Win32 resources associated with the BackupFileReader. It does not close -// the underlying file. -func (r *BackupFileReader) Close() error { - if r.ctx != 0 { - backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx) - runtime.KeepAlive(r.f) - r.ctx = 0 - } - return nil -} - -// BackupFileWriter provides an io.WriteCloser interface on top of the BackupWrite Win32 API. -type BackupFileWriter struct { - f *os.File - includeSecurity bool - ctx uintptr -} - -// NewBackupFileWriter returns a new BackupFileWriter from a file handle. If includeSecurity is true, -// Write() will attempt to restore the security descriptor from the stream. -func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter { - w := &BackupFileWriter{f, includeSecurity, 0} - return w -} - -// Write restores a portion of the file using the provided backup stream. -func (w *BackupFileWriter) Write(b []byte) (int, error) { - var bytesWritten uint32 - err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx) - if err != nil { - return 0, &os.PathError{"BackupWrite", w.f.Name(), err} - } - runtime.KeepAlive(w.f) - if int(bytesWritten) != len(b) { - return int(bytesWritten), errors.New("not all bytes could be written") - } - return len(b), nil -} - -// Close frees Win32 resources associated with the BackupFileWriter. It does not -// close the underlying file. -func (w *BackupFileWriter) Close() error { - if w.ctx != 0 { - backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx) - runtime.KeepAlive(w.f) - w.ctx = 0 - } - return nil -} - -// OpenForBackup opens a file or directory, potentially skipping access checks if the backup -// or restore privileges have been acquired. -// -// If the file opened was a directory, it cannot be used with Readdir(). -func OpenForBackup(path string, access uint32, share uint32, createmode uint32) (*os.File, error) { - winPath, err := syscall.UTF16FromString(path) - if err != nil { - return nil, err - } - h, err := syscall.CreateFile(&winPath[0], access, share, nil, createmode, syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, 0) - if err != nil { - err = &os.PathError{Op: "open", Path: path, Err: err} - return nil, err - } - return os.NewFile(uintptr(h), path), nil -} diff --git a/ui/wasm/vendor/github.com/Microsoft/go-winio/ea.go b/ui/wasm/vendor/github.com/Microsoft/go-winio/ea.go deleted file mode 100644 index 4051c1b33bf..00000000000 --- a/ui/wasm/vendor/github.com/Microsoft/go-winio/ea.go +++ /dev/null @@ -1,137 +0,0 @@ -package winio - -import ( - "bytes" - "encoding/binary" - "errors" -) - -type fileFullEaInformation struct { - NextEntryOffset uint32 - Flags uint8 - NameLength uint8 - ValueLength uint16 -} - -var ( - fileFullEaInformationSize = binary.Size(&fileFullEaInformation{}) - - errInvalidEaBuffer = errors.New("invalid extended attribute buffer") - errEaNameTooLarge = errors.New("extended attribute name too large") - errEaValueTooLarge = errors.New("extended attribute value too large") -) - -// ExtendedAttribute represents a single Windows EA. -type ExtendedAttribute struct { - Name string - Value []byte - Flags uint8 -} - -func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) { - var info fileFullEaInformation - err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info) - if err != nil { - err = errInvalidEaBuffer - return - } - - nameOffset := fileFullEaInformationSize - nameLen := int(info.NameLength) - valueOffset := nameOffset + int(info.NameLength) + 1 - valueLen := int(info.ValueLength) - nextOffset := int(info.NextEntryOffset) - if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) { - err = errInvalidEaBuffer - return - } - - ea.Name = string(b[nameOffset : nameOffset+nameLen]) - ea.Value = b[valueOffset : valueOffset+valueLen] - ea.Flags = info.Flags - if info.NextEntryOffset != 0 { - nb = b[info.NextEntryOffset:] - } - return -} - -// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION -// buffer retrieved from BackupRead, ZwQueryEaFile, etc. -func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) { - for len(b) != 0 { - ea, nb, err := parseEa(b) - if err != nil { - return nil, err - } - - eas = append(eas, ea) - b = nb - } - return -} - -func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error { - if int(uint8(len(ea.Name))) != len(ea.Name) { - return errEaNameTooLarge - } - if int(uint16(len(ea.Value))) != len(ea.Value) { - return errEaValueTooLarge - } - entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value)) - withPadding := (entrySize + 3) &^ 3 - nextOffset := uint32(0) - if !last { - nextOffset = withPadding - } - info := fileFullEaInformation{ - NextEntryOffset: nextOffset, - Flags: ea.Flags, - NameLength: uint8(len(ea.Name)), - ValueLength: uint16(len(ea.Value)), - } - - err := binary.Write(buf, binary.LittleEndian, &info) - if err != nil { - return err - } - - _, err = buf.Write([]byte(ea.Name)) - if err != nil { - return err - } - - err = buf.WriteByte(0) - if err != nil { - return err - } - - _, err = buf.Write(ea.Value) - if err != nil { - return err - } - - _, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize]) - if err != nil { - return err - } - - return nil -} - -// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION -// buffer for use with BackupWrite, ZwSetEaFile, etc. -func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) { - var buf bytes.Buffer - for i := range eas { - last := false - if i == len(eas)-1 { - last = true - } - - err := writeEa(&buf, &eas[i], last) - if err != nil { - return nil, err - } - } - return buf.Bytes(), nil -} diff --git a/ui/wasm/vendor/github.com/Microsoft/go-winio/file.go b/ui/wasm/vendor/github.com/Microsoft/go-winio/file.go deleted file mode 100644 index 0385e410812..00000000000 --- a/ui/wasm/vendor/github.com/Microsoft/go-winio/file.go +++ /dev/null @@ -1,323 +0,0 @@ -// +build windows - -package winio - -import ( - "errors" - "io" - "runtime" - "sync" - "sync/atomic" - "syscall" - "time" -) - -//sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx -//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort -//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus -//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes -//sys wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult - -type atomicBool int32 - -func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 } -func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) } -func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) } -func (b *atomicBool) swap(new bool) bool { - var newInt int32 - if new { - newInt = 1 - } - return atomic.SwapInt32((*int32)(b), newInt) == 1 -} - -const ( - cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1 - cFILE_SKIP_SET_EVENT_ON_HANDLE = 2 -) - -var ( - ErrFileClosed = errors.New("file has already been closed") - ErrTimeout = &timeoutError{} -) - -type timeoutError struct{} - -func (e *timeoutError) Error() string { return "i/o timeout" } -func (e *timeoutError) Timeout() bool { return true } -func (e *timeoutError) Temporary() bool { return true } - -type timeoutChan chan struct{} - -var ioInitOnce sync.Once -var ioCompletionPort syscall.Handle - -// ioResult contains the result of an asynchronous IO operation -type ioResult struct { - bytes uint32 - err error -} - -// ioOperation represents an outstanding asynchronous Win32 IO -type ioOperation struct { - o syscall.Overlapped - ch chan ioResult -} - -func initIo() { - h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff) - if err != nil { - panic(err) - } - ioCompletionPort = h - go ioCompletionProcessor(h) -} - -// win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall. -// It takes ownership of this handle and will close it if it is garbage collected. -type win32File struct { - handle syscall.Handle - wg sync.WaitGroup - wgLock sync.RWMutex - closing atomicBool - socket bool - readDeadline deadlineHandler - writeDeadline deadlineHandler -} - -type deadlineHandler struct { - setLock sync.Mutex - channel timeoutChan - channelLock sync.RWMutex - timer *time.Timer - timedout atomicBool -} - -// makeWin32File makes a new win32File from an existing file handle -func makeWin32File(h syscall.Handle) (*win32File, error) { - f := &win32File{handle: h} - ioInitOnce.Do(initIo) - _, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff) - if err != nil { - return nil, err - } - err = setFileCompletionNotificationModes(h, cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS|cFILE_SKIP_SET_EVENT_ON_HANDLE) - if err != nil { - return nil, err - } - f.readDeadline.channel = make(timeoutChan) - f.writeDeadline.channel = make(timeoutChan) - return f, nil -} - -func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) { - // If we return the result of makeWin32File directly, it can result in an - // interface-wrapped nil, rather than a nil interface value. - f, err := makeWin32File(h) - if err != nil { - return nil, err - } - return f, nil -} - -// closeHandle closes the resources associated with a Win32 handle -func (f *win32File) closeHandle() { - f.wgLock.Lock() - // Atomically set that we are closing, releasing the resources only once. - if !f.closing.swap(true) { - f.wgLock.Unlock() - // cancel all IO and wait for it to complete - cancelIoEx(f.handle, nil) - f.wg.Wait() - // at this point, no new IO can start - syscall.Close(f.handle) - f.handle = 0 - } else { - f.wgLock.Unlock() - } -} - -// Close closes a win32File. -func (f *win32File) Close() error { - f.closeHandle() - return nil -} - -// prepareIo prepares for a new IO operation. -// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning. -func (f *win32File) prepareIo() (*ioOperation, error) { - f.wgLock.RLock() - if f.closing.isSet() { - f.wgLock.RUnlock() - return nil, ErrFileClosed - } - f.wg.Add(1) - f.wgLock.RUnlock() - c := &ioOperation{} - c.ch = make(chan ioResult) - return c, nil -} - -// ioCompletionProcessor processes completed async IOs forever -func ioCompletionProcessor(h syscall.Handle) { - for { - var bytes uint32 - var key uintptr - var op *ioOperation - err := getQueuedCompletionStatus(h, &bytes, &key, &op, syscall.INFINITE) - if op == nil { - panic(err) - } - op.ch <- ioResult{bytes, err} - } -} - -// asyncIo processes the return value from ReadFile or WriteFile, blocking until -// the operation has actually completed. -func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) { - if err != syscall.ERROR_IO_PENDING { - return int(bytes), err - } - - if f.closing.isSet() { - cancelIoEx(f.handle, &c.o) - } - - var timeout timeoutChan - if d != nil { - d.channelLock.Lock() - timeout = d.channel - d.channelLock.Unlock() - } - - var r ioResult - select { - case r = <-c.ch: - err = r.err - if err == syscall.ERROR_OPERATION_ABORTED { - if f.closing.isSet() { - err = ErrFileClosed - } - } else if err != nil && f.socket { - // err is from Win32. Query the overlapped structure to get the winsock error. - var bytes, flags uint32 - err = wsaGetOverlappedResult(f.handle, &c.o, &bytes, false, &flags) - } - case <-timeout: - cancelIoEx(f.handle, &c.o) - r = <-c.ch - err = r.err - if err == syscall.ERROR_OPERATION_ABORTED { - err = ErrTimeout - } - } - - // runtime.KeepAlive is needed, as c is passed via native - // code to ioCompletionProcessor, c must remain alive - // until the channel read is complete. - runtime.KeepAlive(c) - return int(r.bytes), err -} - -// Read reads from a file handle. -func (f *win32File) Read(b []byte) (int, error) { - c, err := f.prepareIo() - if err != nil { - return 0, err - } - defer f.wg.Done() - - if f.readDeadline.timedout.isSet() { - return 0, ErrTimeout - } - - var bytes uint32 - err = syscall.ReadFile(f.handle, b, &bytes, &c.o) - n, err := f.asyncIo(c, &f.readDeadline, bytes, err) - runtime.KeepAlive(b) - - // Handle EOF conditions. - if err == nil && n == 0 && len(b) != 0 { - return 0, io.EOF - } else if err == syscall.ERROR_BROKEN_PIPE { - return 0, io.EOF - } else { - return n, err - } -} - -// Write writes to a file handle. -func (f *win32File) Write(b []byte) (int, error) { - c, err := f.prepareIo() - if err != nil { - return 0, err - } - defer f.wg.Done() - - if f.writeDeadline.timedout.isSet() { - return 0, ErrTimeout - } - - var bytes uint32 - err = syscall.WriteFile(f.handle, b, &bytes, &c.o) - n, err := f.asyncIo(c, &f.writeDeadline, bytes, err) - runtime.KeepAlive(b) - return n, err -} - -func (f *win32File) SetReadDeadline(deadline time.Time) error { - return f.readDeadline.set(deadline) -} - -func (f *win32File) SetWriteDeadline(deadline time.Time) error { - return f.writeDeadline.set(deadline) -} - -func (f *win32File) Flush() error { - return syscall.FlushFileBuffers(f.handle) -} - -func (f *win32File) Fd() uintptr { - return uintptr(f.handle) -} - -func (d *deadlineHandler) set(deadline time.Time) error { - d.setLock.Lock() - defer d.setLock.Unlock() - - if d.timer != nil { - if !d.timer.Stop() { - <-d.channel - } - d.timer = nil - } - d.timedout.setFalse() - - select { - case <-d.channel: - d.channelLock.Lock() - d.channel = make(chan struct{}) - d.channelLock.Unlock() - default: - } - - if deadline.IsZero() { - return nil - } - - timeoutIO := func() { - d.timedout.setTrue() - close(d.channel) - } - - now := time.Now() - duration := deadline.Sub(now) - if deadline.After(now) { - // Deadline is in the future, set a timer to wait - d.timer = time.AfterFunc(duration, timeoutIO) - } else { - // Deadline is in the past. Cancel all pending IO now. - timeoutIO() - } - return nil -} diff --git a/ui/wasm/vendor/github.com/Microsoft/go-winio/fileinfo.go b/ui/wasm/vendor/github.com/Microsoft/go-winio/fileinfo.go deleted file mode 100644 index 3ab6bff69c5..00000000000 --- a/ui/wasm/vendor/github.com/Microsoft/go-winio/fileinfo.go +++ /dev/null @@ -1,73 +0,0 @@ -// +build windows - -package winio - -import ( - "os" - "runtime" - "unsafe" - - "golang.org/x/sys/windows" -) - -// FileBasicInfo contains file access time and file attributes information. -type FileBasicInfo struct { - CreationTime, LastAccessTime, LastWriteTime, ChangeTime windows.Filetime - FileAttributes uint32 - pad uint32 // padding -} - -// GetFileBasicInfo retrieves times and attributes for a file. -func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { - bi := &FileBasicInfo{} - if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { - return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} - } - runtime.KeepAlive(f) - return bi, nil -} - -// SetFileBasicInfo sets times and attributes for a file. -func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error { - if err := windows.SetFileInformationByHandle(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { - return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err} - } - runtime.KeepAlive(f) - return nil -} - -// FileStandardInfo contains extended information for the file. -// FILE_STANDARD_INFO in WinBase.h -// https://docs.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_standard_info -type FileStandardInfo struct { - AllocationSize, EndOfFile int64 - NumberOfLinks uint32 - DeletePending, Directory bool -} - -// GetFileStandardInfo retrieves ended information for the file. -func GetFileStandardInfo(f *os.File) (*FileStandardInfo, error) { - si := &FileStandardInfo{} - if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileStandardInfo, (*byte)(unsafe.Pointer(si)), uint32(unsafe.Sizeof(*si))); err != nil { - return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} - } - runtime.KeepAlive(f) - return si, nil -} - -// FileIDInfo contains the volume serial number and file ID for a file. This pair should be -// unique on a system. -type FileIDInfo struct { - VolumeSerialNumber uint64 - FileID [16]byte -} - -// GetFileID retrieves the unique (volume, file ID) pair for a file. -func GetFileID(f *os.File) (*FileIDInfo, error) { - fileID := &FileIDInfo{} - if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileIdInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil { - return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} - } - runtime.KeepAlive(f) - return fileID, nil -} diff --git a/ui/wasm/vendor/github.com/Microsoft/go-winio/hvsock.go b/ui/wasm/vendor/github.com/Microsoft/go-winio/hvsock.go deleted file mode 100644 index b632f8f8bb9..00000000000 --- a/ui/wasm/vendor/github.com/Microsoft/go-winio/hvsock.go +++ /dev/null @@ -1,307 +0,0 @@ -// +build windows - -package winio - -import ( - "fmt" - "io" - "net" - "os" - "syscall" - "time" - "unsafe" - - "github.com/Microsoft/go-winio/pkg/guid" -) - -//sys bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind - -const ( - afHvSock = 34 // AF_HYPERV - - socketError = ^uintptr(0) -) - -// An HvsockAddr is an address for a AF_HYPERV socket. -type HvsockAddr struct { - VMID guid.GUID - ServiceID guid.GUID -} - -type rawHvsockAddr struct { - Family uint16 - _ uint16 - VMID guid.GUID - ServiceID guid.GUID -} - -// Network returns the address's network name, "hvsock". -func (addr *HvsockAddr) Network() string { - return "hvsock" -} - -func (addr *HvsockAddr) String() string { - return fmt.Sprintf("%s:%s", &addr.VMID, &addr.ServiceID) -} - -// VsockServiceID returns an hvsock service ID corresponding to the specified AF_VSOCK port. -func VsockServiceID(port uint32) guid.GUID { - g, _ := guid.FromString("00000000-facb-11e6-bd58-64006a7986d3") - g.Data1 = port - return g -} - -func (addr *HvsockAddr) raw() rawHvsockAddr { - return rawHvsockAddr{ - Family: afHvSock, - VMID: addr.VMID, - ServiceID: addr.ServiceID, - } -} - -func (addr *HvsockAddr) fromRaw(raw *rawHvsockAddr) { - addr.VMID = raw.VMID - addr.ServiceID = raw.ServiceID -} - -// HvsockListener is a socket listener for the AF_HYPERV address family. -type HvsockListener struct { - sock *win32File - addr HvsockAddr -} - -// HvsockConn is a connected socket of the AF_HYPERV address family. -type HvsockConn struct { - sock *win32File - local, remote HvsockAddr -} - -func newHvSocket() (*win32File, error) { - fd, err := syscall.Socket(afHvSock, syscall.SOCK_STREAM, 1) - if err != nil { - return nil, os.NewSyscallError("socket", err) - } - f, err := makeWin32File(fd) - if err != nil { - syscall.Close(fd) - return nil, err - } - f.socket = true - return f, nil -} - -// ListenHvsock listens for connections on the specified hvsock address. -func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) { - l := &HvsockListener{addr: *addr} - sock, err := newHvSocket() - if err != nil { - return nil, l.opErr("listen", err) - } - sa := addr.raw() - err = bind(sock.handle, unsafe.Pointer(&sa), int32(unsafe.Sizeof(sa))) - if err != nil { - return nil, l.opErr("listen", os.NewSyscallError("socket", err)) - } - err = syscall.Listen(sock.handle, 16) - if err != nil { - return nil, l.opErr("listen", os.NewSyscallError("listen", err)) - } - return &HvsockListener{sock: sock, addr: *addr}, nil -} - -func (l *HvsockListener) opErr(op string, err error) error { - return &net.OpError{Op: op, Net: "hvsock", Addr: &l.addr, Err: err} -} - -// Addr returns the listener's network address. -func (l *HvsockListener) Addr() net.Addr { - return &l.addr -} - -// Accept waits for the next connection and returns it. -func (l *HvsockListener) Accept() (_ net.Conn, err error) { - sock, err := newHvSocket() - if err != nil { - return nil, l.opErr("accept", err) - } - defer func() { - if sock != nil { - sock.Close() - } - }() - c, err := l.sock.prepareIo() - if err != nil { - return nil, l.opErr("accept", err) - } - defer l.sock.wg.Done() - - // AcceptEx, per documentation, requires an extra 16 bytes per address. - const addrlen = uint32(16 + unsafe.Sizeof(rawHvsockAddr{})) - var addrbuf [addrlen * 2]byte - - var bytes uint32 - err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0, addrlen, addrlen, &bytes, &c.o) - _, err = l.sock.asyncIo(c, nil, bytes, err) - if err != nil { - return nil, l.opErr("accept", os.NewSyscallError("acceptex", err)) - } - conn := &HvsockConn{ - sock: sock, - } - conn.local.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[0]))) - conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen]))) - sock = nil - return conn, nil -} - -// Close closes the listener, causing any pending Accept calls to fail. -func (l *HvsockListener) Close() error { - return l.sock.Close() -} - -/* Need to finish ConnectEx handling -func DialHvsock(ctx context.Context, addr *HvsockAddr) (*HvsockConn, error) { - sock, err := newHvSocket() - if err != nil { - return nil, err - } - defer func() { - if sock != nil { - sock.Close() - } - }() - c, err := sock.prepareIo() - if err != nil { - return nil, err - } - defer sock.wg.Done() - var bytes uint32 - err = windows.ConnectEx(windows.Handle(sock.handle), sa, nil, 0, &bytes, &c.o) - _, err = sock.asyncIo(ctx, c, nil, bytes, err) - if err != nil { - return nil, err - } - conn := &HvsockConn{ - sock: sock, - remote: *addr, - } - sock = nil - return conn, nil -} -*/ - -func (conn *HvsockConn) opErr(op string, err error) error { - return &net.OpError{Op: op, Net: "hvsock", Source: &conn.local, Addr: &conn.remote, Err: err} -} - -func (conn *HvsockConn) Read(b []byte) (int, error) { - c, err := conn.sock.prepareIo() - if err != nil { - return 0, conn.opErr("read", err) - } - defer conn.sock.wg.Done() - buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} - var flags, bytes uint32 - err = syscall.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil) - n, err := conn.sock.asyncIo(c, &conn.sock.readDeadline, bytes, err) - if err != nil { - if _, ok := err.(syscall.Errno); ok { - err = os.NewSyscallError("wsarecv", err) - } - return 0, conn.opErr("read", err) - } else if n == 0 { - err = io.EOF - } - return n, err -} - -func (conn *HvsockConn) Write(b []byte) (int, error) { - t := 0 - for len(b) != 0 { - n, err := conn.write(b) - if err != nil { - return t + n, err - } - t += n - b = b[n:] - } - return t, nil -} - -func (conn *HvsockConn) write(b []byte) (int, error) { - c, err := conn.sock.prepareIo() - if err != nil { - return 0, conn.opErr("write", err) - } - defer conn.sock.wg.Done() - buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} - var bytes uint32 - err = syscall.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil) - n, err := conn.sock.asyncIo(c, &conn.sock.writeDeadline, bytes, err) - if err != nil { - if _, ok := err.(syscall.Errno); ok { - err = os.NewSyscallError("wsasend", err) - } - return 0, conn.opErr("write", err) - } - return n, err -} - -// Close closes the socket connection, failing any pending read or write calls. -func (conn *HvsockConn) Close() error { - return conn.sock.Close() -} - -func (conn *HvsockConn) shutdown(how int) error { - err := syscall.Shutdown(conn.sock.handle, syscall.SHUT_RD) - if err != nil { - return os.NewSyscallError("shutdown", err) - } - return nil -} - -// CloseRead shuts down the read end of the socket. -func (conn *HvsockConn) CloseRead() error { - err := conn.shutdown(syscall.SHUT_RD) - if err != nil { - return conn.opErr("close", err) - } - return nil -} - -// CloseWrite shuts down the write end of the socket, notifying the other endpoint that -// no more data will be written. -func (conn *HvsockConn) CloseWrite() error { - err := conn.shutdown(syscall.SHUT_WR) - if err != nil { - return conn.opErr("close", err) - } - return nil -} - -// LocalAddr returns the local address of the connection. -func (conn *HvsockConn) LocalAddr() net.Addr { - return &conn.local -} - -// RemoteAddr returns the remote address of the connection. -func (conn *HvsockConn) RemoteAddr() net.Addr { - return &conn.remote -} - -// SetDeadline implements the net.Conn SetDeadline method. -func (conn *HvsockConn) SetDeadline(t time.Time) error { - conn.SetReadDeadline(t) - conn.SetWriteDeadline(t) - return nil -} - -// SetReadDeadline implements the net.Conn SetReadDeadline method. -func (conn *HvsockConn) SetReadDeadline(t time.Time) error { - return conn.sock.SetReadDeadline(t) -} - -// SetWriteDeadline implements the net.Conn SetWriteDeadline method. -func (conn *HvsockConn) SetWriteDeadline(t time.Time) error { - return conn.sock.SetWriteDeadline(t) -} diff --git a/ui/wasm/vendor/github.com/Microsoft/go-winio/pipe.go b/ui/wasm/vendor/github.com/Microsoft/go-winio/pipe.go deleted file mode 100644 index 96700a73de2..00000000000 --- a/ui/wasm/vendor/github.com/Microsoft/go-winio/pipe.go +++ /dev/null @@ -1,517 +0,0 @@ -// +build windows - -package winio - -import ( - "context" - "errors" - "fmt" - "io" - "net" - "os" - "runtime" - "syscall" - "time" - "unsafe" -) - -//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe -//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW -//sys createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW -//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo -//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW -//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc -//sys ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) = ntdll.NtCreateNamedPipeFile -//sys rtlNtStatusToDosError(status ntstatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb -//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) = ntdll.RtlDosPathNameToNtPathName_U -//sys rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) = ntdll.RtlDefaultNpAcl - -type ioStatusBlock struct { - Status, Information uintptr -} - -type objectAttributes struct { - Length uintptr - RootDirectory uintptr - ObjectName *unicodeString - Attributes uintptr - SecurityDescriptor *securityDescriptor - SecurityQoS uintptr -} - -type unicodeString struct { - Length uint16 - MaximumLength uint16 - Buffer uintptr -} - -type securityDescriptor struct { - Revision byte - Sbz1 byte - Control uint16 - Owner uintptr - Group uintptr - Sacl uintptr - Dacl uintptr -} - -type ntstatus int32 - -func (status ntstatus) Err() error { - if status >= 0 { - return nil - } - return rtlNtStatusToDosError(status) -} - -const ( - cERROR_PIPE_BUSY = syscall.Errno(231) - cERROR_NO_DATA = syscall.Errno(232) - cERROR_PIPE_CONNECTED = syscall.Errno(535) - cERROR_SEM_TIMEOUT = syscall.Errno(121) - - cSECURITY_SQOS_PRESENT = 0x100000 - cSECURITY_ANONYMOUS = 0 - - cPIPE_TYPE_MESSAGE = 4 - - cPIPE_READMODE_MESSAGE = 2 - - cFILE_OPEN = 1 - cFILE_CREATE = 2 - - cFILE_PIPE_MESSAGE_TYPE = 1 - cFILE_PIPE_REJECT_REMOTE_CLIENTS = 2 - - cSE_DACL_PRESENT = 4 -) - -var ( - // ErrPipeListenerClosed is returned for pipe operations on listeners that have been closed. - // This error should match net.errClosing since docker takes a dependency on its text. - ErrPipeListenerClosed = errors.New("use of closed network connection") - - errPipeWriteClosed = errors.New("pipe has been closed for write") -) - -type win32Pipe struct { - *win32File - path string -} - -type win32MessageBytePipe struct { - win32Pipe - writeClosed bool - readEOF bool -} - -type pipeAddress string - -func (f *win32Pipe) LocalAddr() net.Addr { - return pipeAddress(f.path) -} - -func (f *win32Pipe) RemoteAddr() net.Addr { - return pipeAddress(f.path) -} - -func (f *win32Pipe) SetDeadline(t time.Time) error { - f.SetReadDeadline(t) - f.SetWriteDeadline(t) - return nil -} - -// CloseWrite closes the write side of a message pipe in byte mode. -func (f *win32MessageBytePipe) CloseWrite() error { - if f.writeClosed { - return errPipeWriteClosed - } - err := f.win32File.Flush() - if err != nil { - return err - } - _, err = f.win32File.Write(nil) - if err != nil { - return err - } - f.writeClosed = true - return nil -} - -// Write writes bytes to a message pipe in byte mode. Zero-byte writes are ignored, since -// they are used to implement CloseWrite(). -func (f *win32MessageBytePipe) Write(b []byte) (int, error) { - if f.writeClosed { - return 0, errPipeWriteClosed - } - if len(b) == 0 { - return 0, nil - } - return f.win32File.Write(b) -} - -// Read reads bytes from a message pipe in byte mode. A read of a zero-byte message on a message -// mode pipe will return io.EOF, as will all subsequent reads. -func (f *win32MessageBytePipe) Read(b []byte) (int, error) { - if f.readEOF { - return 0, io.EOF - } - n, err := f.win32File.Read(b) - if err == io.EOF { - // If this was the result of a zero-byte read, then - // it is possible that the read was due to a zero-size - // message. Since we are simulating CloseWrite with a - // zero-byte message, ensure that all future Read() calls - // also return EOF. - f.readEOF = true - } else if err == syscall.ERROR_MORE_DATA { - // ERROR_MORE_DATA indicates that the pipe's read mode is message mode - // and the message still has more bytes. Treat this as a success, since - // this package presents all named pipes as byte streams. - err = nil - } - return n, err -} - -func (s pipeAddress) Network() string { - return "pipe" -} - -func (s pipeAddress) String() string { - return string(s) -} - -// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout. -func tryDialPipe(ctx context.Context, path *string, access uint32) (syscall.Handle, error) { - for { - - select { - case <-ctx.Done(): - return syscall.Handle(0), ctx.Err() - default: - h, err := createFile(*path, access, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0) - if err == nil { - return h, nil - } - if err != cERROR_PIPE_BUSY { - return h, &os.PathError{Err: err, Op: "open", Path: *path} - } - // Wait 10 msec and try again. This is a rather simplistic - // view, as we always try each 10 milliseconds. - time.Sleep(10 * time.Millisecond) - } - } -} - -// DialPipe connects to a named pipe by path, timing out if the connection -// takes longer than the specified duration. If timeout is nil, then we use -// a default timeout of 2 seconds. (We do not use WaitNamedPipe.) -func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { - var absTimeout time.Time - if timeout != nil { - absTimeout = time.Now().Add(*timeout) - } else { - absTimeout = time.Now().Add(2 * time.Second) - } - ctx, _ := context.WithDeadline(context.Background(), absTimeout) - conn, err := DialPipeContext(ctx, path) - if err == context.DeadlineExceeded { - return nil, ErrTimeout - } - return conn, err -} - -// DialPipeContext attempts to connect to a named pipe by `path` until `ctx` -// cancellation or timeout. -func DialPipeContext(ctx context.Context, path string) (net.Conn, error) { - return DialPipeAccess(ctx, path, syscall.GENERIC_READ|syscall.GENERIC_WRITE) -} - -// DialPipeAccess attempts to connect to a named pipe by `path` with `access` until `ctx` -// cancellation or timeout. -func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, error) { - var err error - var h syscall.Handle - h, err = tryDialPipe(ctx, &path, access) - if err != nil { - return nil, err - } - - var flags uint32 - err = getNamedPipeInfo(h, &flags, nil, nil, nil) - if err != nil { - return nil, err - } - - f, err := makeWin32File(h) - if err != nil { - syscall.Close(h) - return nil, err - } - - // If the pipe is in message mode, return a message byte pipe, which - // supports CloseWrite(). - if flags&cPIPE_TYPE_MESSAGE != 0 { - return &win32MessageBytePipe{ - win32Pipe: win32Pipe{win32File: f, path: path}, - }, nil - } - return &win32Pipe{win32File: f, path: path}, nil -} - -type acceptResponse struct { - f *win32File - err error -} - -type win32PipeListener struct { - firstHandle syscall.Handle - path string - config PipeConfig - acceptCh chan (chan acceptResponse) - closeCh chan int - doneCh chan int -} - -func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (syscall.Handle, error) { - path16, err := syscall.UTF16FromString(path) - if err != nil { - return 0, &os.PathError{Op: "open", Path: path, Err: err} - } - - var oa objectAttributes - oa.Length = unsafe.Sizeof(oa) - - var ntPath unicodeString - if err := rtlDosPathNameToNtPathName(&path16[0], &ntPath, 0, 0).Err(); err != nil { - return 0, &os.PathError{Op: "open", Path: path, Err: err} - } - defer localFree(ntPath.Buffer) - oa.ObjectName = &ntPath - - // The security descriptor is only needed for the first pipe. - if first { - if sd != nil { - len := uint32(len(sd)) - sdb := localAlloc(0, len) - defer localFree(sdb) - copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd) - oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb)) - } else { - // Construct the default named pipe security descriptor. - var dacl uintptr - if err := rtlDefaultNpAcl(&dacl).Err(); err != nil { - return 0, fmt.Errorf("getting default named pipe ACL: %s", err) - } - defer localFree(dacl) - - sdb := &securityDescriptor{ - Revision: 1, - Control: cSE_DACL_PRESENT, - Dacl: dacl, - } - oa.SecurityDescriptor = sdb - } - } - - typ := uint32(cFILE_PIPE_REJECT_REMOTE_CLIENTS) - if c.MessageMode { - typ |= cFILE_PIPE_MESSAGE_TYPE - } - - disposition := uint32(cFILE_OPEN) - access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | syscall.SYNCHRONIZE) - if first { - disposition = cFILE_CREATE - // By not asking for read or write access, the named pipe file system - // will put this pipe into an initially disconnected state, blocking - // client connections until the next call with first == false. - access = syscall.SYNCHRONIZE - } - - timeout := int64(-50 * 10000) // 50ms - - var ( - h syscall.Handle - iosb ioStatusBlock - ) - err = ntCreateNamedPipeFile(&h, access, &oa, &iosb, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, disposition, 0, typ, 0, 0, 0xffffffff, uint32(c.InputBufferSize), uint32(c.OutputBufferSize), &timeout).Err() - if err != nil { - return 0, &os.PathError{Op: "open", Path: path, Err: err} - } - - runtime.KeepAlive(ntPath) - return h, nil -} - -func (l *win32PipeListener) makeServerPipe() (*win32File, error) { - h, err := makeServerPipeHandle(l.path, nil, &l.config, false) - if err != nil { - return nil, err - } - f, err := makeWin32File(h) - if err != nil { - syscall.Close(h) - return nil, err - } - return f, nil -} - -func (l *win32PipeListener) makeConnectedServerPipe() (*win32File, error) { - p, err := l.makeServerPipe() - if err != nil { - return nil, err - } - - // Wait for the client to connect. - ch := make(chan error) - go func(p *win32File) { - ch <- connectPipe(p) - }(p) - - select { - case err = <-ch: - if err != nil { - p.Close() - p = nil - } - case <-l.closeCh: - // Abort the connect request by closing the handle. - p.Close() - p = nil - err = <-ch - if err == nil || err == ErrFileClosed { - err = ErrPipeListenerClosed - } - } - return p, err -} - -func (l *win32PipeListener) listenerRoutine() { - closed := false - for !closed { - select { - case <-l.closeCh: - closed = true - case responseCh := <-l.acceptCh: - var ( - p *win32File - err error - ) - for { - p, err = l.makeConnectedServerPipe() - // If the connection was immediately closed by the client, try - // again. - if err != cERROR_NO_DATA { - break - } - } - responseCh <- acceptResponse{p, err} - closed = err == ErrPipeListenerClosed - } - } - syscall.Close(l.firstHandle) - l.firstHandle = 0 - // Notify Close() and Accept() callers that the handle has been closed. - close(l.doneCh) -} - -// PipeConfig contain configuration for the pipe listener. -type PipeConfig struct { - // SecurityDescriptor contains a Windows security descriptor in SDDL format. - SecurityDescriptor string - - // MessageMode determines whether the pipe is in byte or message mode. In either - // case the pipe is read in byte mode by default. The only practical difference in - // this implementation is that CloseWrite() is only supported for message mode pipes; - // CloseWrite() is implemented as a zero-byte write, but zero-byte writes are only - // transferred to the reader (and returned as io.EOF in this implementation) - // when the pipe is in message mode. - MessageMode bool - - // InputBufferSize specifies the size of the input buffer, in bytes. - InputBufferSize int32 - - // OutputBufferSize specifies the size of the output buffer, in bytes. - OutputBufferSize int32 -} - -// ListenPipe creates a listener on a Windows named pipe path, e.g. \\.\pipe\mypipe. -// The pipe must not already exist. -func ListenPipe(path string, c *PipeConfig) (net.Listener, error) { - var ( - sd []byte - err error - ) - if c == nil { - c = &PipeConfig{} - } - if c.SecurityDescriptor != "" { - sd, err = SddlToSecurityDescriptor(c.SecurityDescriptor) - if err != nil { - return nil, err - } - } - h, err := makeServerPipeHandle(path, sd, c, true) - if err != nil { - return nil, err - } - l := &win32PipeListener{ - firstHandle: h, - path: path, - config: *c, - acceptCh: make(chan (chan acceptResponse)), - closeCh: make(chan int), - doneCh: make(chan int), - } - go l.listenerRoutine() - return l, nil -} - -func connectPipe(p *win32File) error { - c, err := p.prepareIo() - if err != nil { - return err - } - defer p.wg.Done() - - err = connectNamedPipe(p.handle, &c.o) - _, err = p.asyncIo(c, nil, 0, err) - if err != nil && err != cERROR_PIPE_CONNECTED { - return err - } - return nil -} - -func (l *win32PipeListener) Accept() (net.Conn, error) { - ch := make(chan acceptResponse) - select { - case l.acceptCh <- ch: - response := <-ch - err := response.err - if err != nil { - return nil, err - } - if l.config.MessageMode { - return &win32MessageBytePipe{ - win32Pipe: win32Pipe{win32File: response.f, path: l.path}, - }, nil - } - return &win32Pipe{win32File: response.f, path: l.path}, nil - case <-l.doneCh: - return nil, ErrPipeListenerClosed - } -} - -func (l *win32PipeListener) Close() error { - select { - case l.closeCh <- 1: - <-l.doneCh - case <-l.doneCh: - } - return nil -} - -func (l *win32PipeListener) Addr() net.Addr { - return pipeAddress(l.path) -} diff --git a/ui/wasm/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go b/ui/wasm/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go deleted file mode 100644 index f497c0e3917..00000000000 --- a/ui/wasm/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go +++ /dev/null @@ -1,237 +0,0 @@ -// +build windows - -// Package guid provides a GUID type. The backing structure for a GUID is -// identical to that used by the golang.org/x/sys/windows GUID type. -// There are two main binary encodings used for a GUID, the big-endian encoding, -// and the Windows (mixed-endian) encoding. See here for details: -// https://en.wikipedia.org/wiki/Universally_unique_identifier#Encoding -package guid - -import ( - "crypto/rand" - "crypto/sha1" - "encoding" - "encoding/binary" - "fmt" - "strconv" - - "golang.org/x/sys/windows" -) - -// Variant specifies which GUID variant (or "type") of the GUID. It determines -// how the entirety of the rest of the GUID is interpreted. -type Variant uint8 - -// The variants specified by RFC 4122. -const ( - // VariantUnknown specifies a GUID variant which does not conform to one of - // the variant encodings specified in RFC 4122. - VariantUnknown Variant = iota - VariantNCS - VariantRFC4122 - VariantMicrosoft - VariantFuture -) - -// Version specifies how the bits in the GUID were generated. For instance, a -// version 4 GUID is randomly generated, and a version 5 is generated from the -// hash of an input string. -type Version uint8 - -var _ = (encoding.TextMarshaler)(GUID{}) -var _ = (encoding.TextUnmarshaler)(&GUID{}) - -// GUID represents a GUID/UUID. It has the same structure as -// golang.org/x/sys/windows.GUID so that it can be used with functions expecting -// that type. It is defined as its own type so that stringification and -// marshaling can be supported. The representation matches that used by native -// Windows code. -type GUID windows.GUID - -// NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122. -func NewV4() (GUID, error) { - var b [16]byte - if _, err := rand.Read(b[:]); err != nil { - return GUID{}, err - } - - g := FromArray(b) - g.setVersion(4) // Version 4 means randomly generated. - g.setVariant(VariantRFC4122) - - return g, nil -} - -// NewV5 returns a new version 5 (generated from a string via SHA-1 hashing) -// GUID, as defined by RFC 4122. The RFC is unclear on the encoding of the name, -// and the sample code treats it as a series of bytes, so we do the same here. -// -// Some implementations, such as those found on Windows, treat the name as a -// big-endian UTF16 stream of bytes. If that is desired, the string can be -// encoded as such before being passed to this function. -func NewV5(namespace GUID, name []byte) (GUID, error) { - b := sha1.New() - namespaceBytes := namespace.ToArray() - b.Write(namespaceBytes[:]) - b.Write(name) - - a := [16]byte{} - copy(a[:], b.Sum(nil)) - - g := FromArray(a) - g.setVersion(5) // Version 5 means generated from a string. - g.setVariant(VariantRFC4122) - - return g, nil -} - -func fromArray(b [16]byte, order binary.ByteOrder) GUID { - var g GUID - g.Data1 = order.Uint32(b[0:4]) - g.Data2 = order.Uint16(b[4:6]) - g.Data3 = order.Uint16(b[6:8]) - copy(g.Data4[:], b[8:16]) - return g -} - -func (g GUID) toArray(order binary.ByteOrder) [16]byte { - b := [16]byte{} - order.PutUint32(b[0:4], g.Data1) - order.PutUint16(b[4:6], g.Data2) - order.PutUint16(b[6:8], g.Data3) - copy(b[8:16], g.Data4[:]) - return b -} - -// FromArray constructs a GUID from a big-endian encoding array of 16 bytes. -func FromArray(b [16]byte) GUID { - return fromArray(b, binary.BigEndian) -} - -// ToArray returns an array of 16 bytes representing the GUID in big-endian -// encoding. -func (g GUID) ToArray() [16]byte { - return g.toArray(binary.BigEndian) -} - -// FromWindowsArray constructs a GUID from a Windows encoding array of bytes. -func FromWindowsArray(b [16]byte) GUID { - return fromArray(b, binary.LittleEndian) -} - -// ToWindowsArray returns an array of 16 bytes representing the GUID in Windows -// encoding. -func (g GUID) ToWindowsArray() [16]byte { - return g.toArray(binary.LittleEndian) -} - -func (g GUID) String() string { - return fmt.Sprintf( - "%08x-%04x-%04x-%04x-%012x", - g.Data1, - g.Data2, - g.Data3, - g.Data4[:2], - g.Data4[2:]) -} - -// FromString parses a string containing a GUID and returns the GUID. The only -// format currently supported is the `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` -// format. -func FromString(s string) (GUID, error) { - if len(s) != 36 { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - - var g GUID - - data1, err := strconv.ParseUint(s[0:8], 16, 32) - if err != nil { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - g.Data1 = uint32(data1) - - data2, err := strconv.ParseUint(s[9:13], 16, 16) - if err != nil { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - g.Data2 = uint16(data2) - - data3, err := strconv.ParseUint(s[14:18], 16, 16) - if err != nil { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - g.Data3 = uint16(data3) - - for i, x := range []int{19, 21, 24, 26, 28, 30, 32, 34} { - v, err := strconv.ParseUint(s[x:x+2], 16, 8) - if err != nil { - return GUID{}, fmt.Errorf("invalid GUID %q", s) - } - g.Data4[i] = uint8(v) - } - - return g, nil -} - -func (g *GUID) setVariant(v Variant) { - d := g.Data4[0] - switch v { - case VariantNCS: - d = (d & 0x7f) - case VariantRFC4122: - d = (d & 0x3f) | 0x80 - case VariantMicrosoft: - d = (d & 0x1f) | 0xc0 - case VariantFuture: - d = (d & 0x0f) | 0xe0 - case VariantUnknown: - fallthrough - default: - panic(fmt.Sprintf("invalid variant: %d", v)) - } - g.Data4[0] = d -} - -// Variant returns the GUID variant, as defined in RFC 4122. -func (g GUID) Variant() Variant { - b := g.Data4[0] - if b&0x80 == 0 { - return VariantNCS - } else if b&0xc0 == 0x80 { - return VariantRFC4122 - } else if b&0xe0 == 0xc0 { - return VariantMicrosoft - } else if b&0xe0 == 0xe0 { - return VariantFuture - } - return VariantUnknown -} - -func (g *GUID) setVersion(v Version) { - g.Data3 = (g.Data3 & 0x0fff) | (uint16(v) << 12) -} - -// Version returns the GUID version, as defined in RFC 4122. -func (g GUID) Version() Version { - return Version((g.Data3 & 0xF000) >> 12) -} - -// MarshalText returns the textual representation of the GUID. -func (g GUID) MarshalText() ([]byte, error) { - return []byte(g.String()), nil -} - -// UnmarshalText takes the textual representation of a GUID, and unmarhals it -// into this GUID. -func (g *GUID) UnmarshalText(text []byte) error { - g2, err := FromString(string(text)) - if err != nil { - return err - } - *g = g2 - return nil -} diff --git a/ui/wasm/vendor/github.com/Microsoft/go-winio/privilege.go b/ui/wasm/vendor/github.com/Microsoft/go-winio/privilege.go deleted file mode 100644 index c3dd7c21769..00000000000 --- a/ui/wasm/vendor/github.com/Microsoft/go-winio/privilege.go +++ /dev/null @@ -1,203 +0,0 @@ -// +build windows - -package winio - -import ( - "bytes" - "encoding/binary" - "fmt" - "runtime" - "sync" - "syscall" - "unicode/utf16" - - "golang.org/x/sys/windows" -) - -//sys adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges -//sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf -//sys revertToSelf() (err error) = advapi32.RevertToSelf -//sys openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken -//sys getCurrentThread() (h syscall.Handle) = GetCurrentThread -//sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW -//sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW -//sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW - -const ( - SE_PRIVILEGE_ENABLED = 2 - - ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300 - - SeBackupPrivilege = "SeBackupPrivilege" - SeRestorePrivilege = "SeRestorePrivilege" - SeSecurityPrivilege = "SeSecurityPrivilege" -) - -const ( - securityAnonymous = iota - securityIdentification - securityImpersonation - securityDelegation -) - -var ( - privNames = make(map[string]uint64) - privNameMutex sync.Mutex -) - -// PrivilegeError represents an error enabling privileges. -type PrivilegeError struct { - privileges []uint64 -} - -func (e *PrivilegeError) Error() string { - s := "" - if len(e.privileges) > 1 { - s = "Could not enable privileges " - } else { - s = "Could not enable privilege " - } - for i, p := range e.privileges { - if i != 0 { - s += ", " - } - s += `"` - s += getPrivilegeName(p) - s += `"` - } - return s -} - -// RunWithPrivilege enables a single privilege for a function call. -func RunWithPrivilege(name string, fn func() error) error { - return RunWithPrivileges([]string{name}, fn) -} - -// RunWithPrivileges enables privileges for a function call. -func RunWithPrivileges(names []string, fn func() error) error { - privileges, err := mapPrivileges(names) - if err != nil { - return err - } - runtime.LockOSThread() - defer runtime.UnlockOSThread() - token, err := newThreadToken() - if err != nil { - return err - } - defer releaseThreadToken(token) - err = adjustPrivileges(token, privileges, SE_PRIVILEGE_ENABLED) - if err != nil { - return err - } - return fn() -} - -func mapPrivileges(names []string) ([]uint64, error) { - var privileges []uint64 - privNameMutex.Lock() - defer privNameMutex.Unlock() - for _, name := range names { - p, ok := privNames[name] - if !ok { - err := lookupPrivilegeValue("", name, &p) - if err != nil { - return nil, err - } - privNames[name] = p - } - privileges = append(privileges, p) - } - return privileges, nil -} - -// EnableProcessPrivileges enables privileges globally for the process. -func EnableProcessPrivileges(names []string) error { - return enableDisableProcessPrivilege(names, SE_PRIVILEGE_ENABLED) -} - -// DisableProcessPrivileges disables privileges globally for the process. -func DisableProcessPrivileges(names []string) error { - return enableDisableProcessPrivilege(names, 0) -} - -func enableDisableProcessPrivilege(names []string, action uint32) error { - privileges, err := mapPrivileges(names) - if err != nil { - return err - } - - p, _ := windows.GetCurrentProcess() - var token windows.Token - err = windows.OpenProcessToken(p, windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, &token) - if err != nil { - return err - } - - defer token.Close() - return adjustPrivileges(token, privileges, action) -} - -func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error { - var b bytes.Buffer - binary.Write(&b, binary.LittleEndian, uint32(len(privileges))) - for _, p := range privileges { - binary.Write(&b, binary.LittleEndian, p) - binary.Write(&b, binary.LittleEndian, action) - } - prevState := make([]byte, b.Len()) - reqSize := uint32(0) - success, err := adjustTokenPrivileges(token, false, &b.Bytes()[0], uint32(len(prevState)), &prevState[0], &reqSize) - if !success { - return err - } - if err == ERROR_NOT_ALL_ASSIGNED { - return &PrivilegeError{privileges} - } - return nil -} - -func getPrivilegeName(luid uint64) string { - var nameBuffer [256]uint16 - bufSize := uint32(len(nameBuffer)) - err := lookupPrivilegeName("", &luid, &nameBuffer[0], &bufSize) - if err != nil { - return fmt.Sprintf("", luid) - } - - var displayNameBuffer [256]uint16 - displayBufSize := uint32(len(displayNameBuffer)) - var langID uint32 - err = lookupPrivilegeDisplayName("", &nameBuffer[0], &displayNameBuffer[0], &displayBufSize, &langID) - if err != nil { - return fmt.Sprintf("", string(utf16.Decode(nameBuffer[:bufSize]))) - } - - return string(utf16.Decode(displayNameBuffer[:displayBufSize])) -} - -func newThreadToken() (windows.Token, error) { - err := impersonateSelf(securityImpersonation) - if err != nil { - return 0, err - } - - var token windows.Token - err = openThreadToken(getCurrentThread(), syscall.TOKEN_ADJUST_PRIVILEGES|syscall.TOKEN_QUERY, false, &token) - if err != nil { - rerr := revertToSelf() - if rerr != nil { - panic(rerr) - } - return 0, err - } - return token, nil -} - -func releaseThreadToken(h windows.Token) { - err := revertToSelf() - if err != nil { - panic(err) - } - h.Close() -} diff --git a/ui/wasm/vendor/github.com/Microsoft/go-winio/reparse.go b/ui/wasm/vendor/github.com/Microsoft/go-winio/reparse.go deleted file mode 100644 index fc1ee4d3a3e..00000000000 --- a/ui/wasm/vendor/github.com/Microsoft/go-winio/reparse.go +++ /dev/null @@ -1,128 +0,0 @@ -package winio - -import ( - "bytes" - "encoding/binary" - "fmt" - "strings" - "unicode/utf16" - "unsafe" -) - -const ( - reparseTagMountPoint = 0xA0000003 - reparseTagSymlink = 0xA000000C -) - -type reparseDataBuffer struct { - ReparseTag uint32 - ReparseDataLength uint16 - Reserved uint16 - SubstituteNameOffset uint16 - SubstituteNameLength uint16 - PrintNameOffset uint16 - PrintNameLength uint16 -} - -// ReparsePoint describes a Win32 symlink or mount point. -type ReparsePoint struct { - Target string - IsMountPoint bool -} - -// UnsupportedReparsePointError is returned when trying to decode a non-symlink or -// mount point reparse point. -type UnsupportedReparsePointError struct { - Tag uint32 -} - -func (e *UnsupportedReparsePointError) Error() string { - return fmt.Sprintf("unsupported reparse point %x", e.Tag) -} - -// DecodeReparsePoint decodes a Win32 REPARSE_DATA_BUFFER structure containing either a symlink -// or a mount point. -func DecodeReparsePoint(b []byte) (*ReparsePoint, error) { - tag := binary.LittleEndian.Uint32(b[0:4]) - return DecodeReparsePointData(tag, b[8:]) -} - -func DecodeReparsePointData(tag uint32, b []byte) (*ReparsePoint, error) { - isMountPoint := false - switch tag { - case reparseTagMountPoint: - isMountPoint = true - case reparseTagSymlink: - default: - return nil, &UnsupportedReparsePointError{tag} - } - nameOffset := 8 + binary.LittleEndian.Uint16(b[4:6]) - if !isMountPoint { - nameOffset += 4 - } - nameLength := binary.LittleEndian.Uint16(b[6:8]) - name := make([]uint16, nameLength/2) - err := binary.Read(bytes.NewReader(b[nameOffset:nameOffset+nameLength]), binary.LittleEndian, &name) - if err != nil { - return nil, err - } - return &ReparsePoint{string(utf16.Decode(name)), isMountPoint}, nil -} - -func isDriveLetter(c byte) bool { - return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') -} - -// EncodeReparsePoint encodes a Win32 REPARSE_DATA_BUFFER structure describing a symlink or -// mount point. -func EncodeReparsePoint(rp *ReparsePoint) []byte { - // Generate an NT path and determine if this is a relative path. - var ntTarget string - relative := false - if strings.HasPrefix(rp.Target, `\\?\`) { - ntTarget = `\??\` + rp.Target[4:] - } else if strings.HasPrefix(rp.Target, `\\`) { - ntTarget = `\??\UNC\` + rp.Target[2:] - } else if len(rp.Target) >= 2 && isDriveLetter(rp.Target[0]) && rp.Target[1] == ':' { - ntTarget = `\??\` + rp.Target - } else { - ntTarget = rp.Target - relative = true - } - - // The paths must be NUL-terminated even though they are counted strings. - target16 := utf16.Encode([]rune(rp.Target + "\x00")) - ntTarget16 := utf16.Encode([]rune(ntTarget + "\x00")) - - size := int(unsafe.Sizeof(reparseDataBuffer{})) - 8 - size += len(ntTarget16)*2 + len(target16)*2 - - tag := uint32(reparseTagMountPoint) - if !rp.IsMountPoint { - tag = reparseTagSymlink - size += 4 // Add room for symlink flags - } - - data := reparseDataBuffer{ - ReparseTag: tag, - ReparseDataLength: uint16(size), - SubstituteNameOffset: 0, - SubstituteNameLength: uint16((len(ntTarget16) - 1) * 2), - PrintNameOffset: uint16(len(ntTarget16) * 2), - PrintNameLength: uint16((len(target16) - 1) * 2), - } - - var b bytes.Buffer - binary.Write(&b, binary.LittleEndian, &data) - if !rp.IsMountPoint { - flags := uint32(0) - if relative { - flags |= 1 - } - binary.Write(&b, binary.LittleEndian, flags) - } - - binary.Write(&b, binary.LittleEndian, ntTarget16) - binary.Write(&b, binary.LittleEndian, target16) - return b.Bytes() -} diff --git a/ui/wasm/vendor/github.com/Microsoft/go-winio/sd.go b/ui/wasm/vendor/github.com/Microsoft/go-winio/sd.go deleted file mode 100644 index db1b370a1b5..00000000000 --- a/ui/wasm/vendor/github.com/Microsoft/go-winio/sd.go +++ /dev/null @@ -1,98 +0,0 @@ -// +build windows - -package winio - -import ( - "syscall" - "unsafe" -) - -//sys lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountNameW -//sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW -//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW -//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW -//sys localFree(mem uintptr) = LocalFree -//sys getSecurityDescriptorLength(sd uintptr) (len uint32) = advapi32.GetSecurityDescriptorLength - -const ( - cERROR_NONE_MAPPED = syscall.Errno(1332) -) - -type AccountLookupError struct { - Name string - Err error -} - -func (e *AccountLookupError) Error() string { - if e.Name == "" { - return "lookup account: empty account name specified" - } - var s string - switch e.Err { - case cERROR_NONE_MAPPED: - s = "not found" - default: - s = e.Err.Error() - } - return "lookup account " + e.Name + ": " + s -} - -type SddlConversionError struct { - Sddl string - Err error -} - -func (e *SddlConversionError) Error() string { - return "convert " + e.Sddl + ": " + e.Err.Error() -} - -// LookupSidByName looks up the SID of an account by name -func LookupSidByName(name string) (sid string, err error) { - if name == "" { - return "", &AccountLookupError{name, cERROR_NONE_MAPPED} - } - - var sidSize, sidNameUse, refDomainSize uint32 - err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse) - if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER { - return "", &AccountLookupError{name, err} - } - sidBuffer := make([]byte, sidSize) - refDomainBuffer := make([]uint16, refDomainSize) - err = lookupAccountName(nil, name, &sidBuffer[0], &sidSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse) - if err != nil { - return "", &AccountLookupError{name, err} - } - var strBuffer *uint16 - err = convertSidToStringSid(&sidBuffer[0], &strBuffer) - if err != nil { - return "", &AccountLookupError{name, err} - } - sid = syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:]) - localFree(uintptr(unsafe.Pointer(strBuffer))) - return sid, nil -} - -func SddlToSecurityDescriptor(sddl string) ([]byte, error) { - var sdBuffer uintptr - err := convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &sdBuffer, nil) - if err != nil { - return nil, &SddlConversionError{sddl, err} - } - defer localFree(sdBuffer) - sd := make([]byte, getSecurityDescriptorLength(sdBuffer)) - copy(sd, (*[0xffff]byte)(unsafe.Pointer(sdBuffer))[:len(sd)]) - return sd, nil -} - -func SecurityDescriptorToSddl(sd []byte) (string, error) { - var sddl *uint16 - // The returned string length seems to including an aribtrary number of terminating NULs. - // Don't use it. - err := convertSecurityDescriptorToStringSecurityDescriptor(&sd[0], 1, 0xff, &sddl, nil) - if err != nil { - return "", err - } - defer localFree(uintptr(unsafe.Pointer(sddl))) - return syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(sddl))[:]), nil -} diff --git a/ui/wasm/vendor/github.com/Microsoft/go-winio/syscall.go b/ui/wasm/vendor/github.com/Microsoft/go-winio/syscall.go deleted file mode 100644 index 5955c99fdea..00000000000 --- a/ui/wasm/vendor/github.com/Microsoft/go-winio/syscall.go +++ /dev/null @@ -1,3 +0,0 @@ -package winio - -//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go hvsock.go diff --git a/ui/wasm/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go b/ui/wasm/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go deleted file mode 100644 index 176ff75e320..00000000000 --- a/ui/wasm/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go +++ /dev/null @@ -1,427 +0,0 @@ -// Code generated by 'go generate'; DO NOT EDIT. - -package winio - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) - errERROR_EINVAL error = syscall.EINVAL -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return errERROR_EINVAL - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) - return e -} - -var ( - modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") - modkernel32 = windows.NewLazySystemDLL("kernel32.dll") - modntdll = windows.NewLazySystemDLL("ntdll.dll") - modws2_32 = windows.NewLazySystemDLL("ws2_32.dll") - - procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") - procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") - procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") - procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") - procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength") - procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") - procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") - procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW") - procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW") - procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") - procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") - procRevertToSelf = modadvapi32.NewProc("RevertToSelf") - procBackupRead = modkernel32.NewProc("BackupRead") - procBackupWrite = modkernel32.NewProc("BackupWrite") - procCancelIoEx = modkernel32.NewProc("CancelIoEx") - procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") - procCreateFileW = modkernel32.NewProc("CreateFileW") - procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") - procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") - procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") - procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") - procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") - procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") - procLocalAlloc = modkernel32.NewProc("LocalAlloc") - procLocalFree = modkernel32.NewProc("LocalFree") - procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") - procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") - procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl") - procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U") - procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") - procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") - procbind = modws2_32.NewProc("bind") -) - -func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) { - var _p0 uint32 - if releaseAll { - _p0 = 1 - } - r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize))) - success = r0 != 0 - if true { - err = errnoErr(e1) - } - return -} - -func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func convertSidToStringSid(sid *byte, str **uint16) (err error) { - r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(str) - if err != nil { - return - } - return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size) -} - -func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd *uintptr, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func getSecurityDescriptorLength(sd uintptr) (len uint32) { - r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0) - len = uint32(r0) - return -} - -func impersonateSelf(level uint32) (err error) { - r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(accountName) - if err != nil { - return - } - return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse) -} - -func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(systemName) - if err != nil { - return - } - return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId) -} - -func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(systemName) - if err != nil { - return - } - return _lookupPrivilegeName(_p0, luid, buffer, size) -} - -func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(systemName) - if err != nil { - return - } - var _p1 *uint16 - _p1, err = syscall.UTF16PtrFromString(name) - if err != nil { - return - } - return _lookupPrivilegeValue(_p0, _p1, luid) -} - -func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) { - r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) { - var _p0 uint32 - if openAsSelf { - _p0 = 1 - } - r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func revertToSelf() (err error) { - r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { - var _p0 *byte - if len(b) > 0 { - _p0 = &b[0] - } - var _p1 uint32 - if abort { - _p1 = 1 - } - var _p2 uint32 - if processSecurity { - _p2 = 1 - } - r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { - var _p0 *byte - if len(b) > 0 { - _p0 = &b[0] - } - var _p1 uint32 - if abort { - _p1 = 1 - } - var _p2 uint32 - if processSecurity { - _p2 = 1 - } - r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(name) - if err != nil { - return - } - return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile) -} - -func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { - err = errnoErr(e1) - } - return -} - -func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0) - newport = syscall.Handle(r0) - if newport == 0 { - err = errnoErr(e1) - } - return -} - -func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(name) - if err != nil { - return - } - return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa) -} - -func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { - err = errnoErr(e1) - } - return -} - -func getCurrentThread() (h syscall.Handle) { - r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0) - h = syscall.Handle(r0) - return -} - -func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func localAlloc(uFlags uint32, length uint32) (ptr uintptr) { - r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(uFlags), uintptr(length), 0) - ptr = uintptr(r0) - return -} - -func localFree(mem uintptr) { - syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0) - return -} - -func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) { - r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0) - status = ntstatus(r0) - return -} - -func rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) { - r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0) - status = ntstatus(r0) - return -} - -func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) { - r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0) - status = ntstatus(r0) - return -} - -func rtlNtStatusToDosError(status ntstatus) (winerr error) { - r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0) - if r0 != 0 { - winerr = syscall.Errno(r0) - } - return -} - -func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) { - var _p0 uint32 - if wait { - _p0 = 1 - } - r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - -func bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) { - r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) - if r1 == socketError { - err = errnoErr(e1) - } - return -} diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/AUTHORS b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/AUTHORS deleted file mode 100644 index 2b00ddba0df..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at https://tip.golang.org/AUTHORS. diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/CONTRIBUTORS b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/CONTRIBUTORS deleted file mode 100644 index 1fbd3e976fa..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at https://tip.golang.org/CONTRIBUTORS. diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/LICENSE b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/LICENSE deleted file mode 100644 index 6a66aea5eaf..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/PATENTS b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/PATENTS deleted file mode 100644 index 733099041f8..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/bitcurves/bitcurve.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/bitcurves/bitcurve.go deleted file mode 100644 index 3ed3f435737..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/bitcurves/bitcurve.go +++ /dev/null @@ -1,381 +0,0 @@ -package bitcurves - -// Copyright 2010 The Go Authors. All rights reserved. -// Copyright 2011 ThePiachu. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package bitelliptic implements several Koblitz elliptic curves over prime -// fields. - -// This package operates, internally, on Jacobian coordinates. For a given -// (x, y) position on the curve, the Jacobian coordinates are (x1, y1, z1) -// where x = x1/z1² and y = y1/z1³. The greatest speedups come when the whole -// calculation can be performed within the transform (as in ScalarMult and -// ScalarBaseMult). But even for Add and Double, it's faster to apply and -// reverse the transform than to operate in affine coordinates. - -import ( - "crypto/elliptic" - "io" - "math/big" - "sync" -) - -// A BitCurve represents a Koblitz Curve with a=0. -// See http://www.hyperelliptic.org/EFD/g1p/auto-shortw.html -type BitCurve struct { - Name string - P *big.Int // the order of the underlying field - N *big.Int // the order of the base point - B *big.Int // the constant of the BitCurve equation - Gx, Gy *big.Int // (x,y) of the base point - BitSize int // the size of the underlying field -} - -// Params returns the parameters of the given BitCurve (see BitCurve struct) -func (bitCurve *BitCurve) Params() (cp *elliptic.CurveParams) { - cp = new(elliptic.CurveParams) - cp.Name = bitCurve.Name - cp.P = bitCurve.P - cp.N = bitCurve.N - cp.Gx = bitCurve.Gx - cp.Gy = bitCurve.Gy - cp.BitSize = bitCurve.BitSize - return cp -} - -// IsOnCurve returns true if the given (x,y) lies on the BitCurve. -func (bitCurve *BitCurve) IsOnCurve(x, y *big.Int) bool { - // y² = x³ + b - y2 := new(big.Int).Mul(y, y) //y² - y2.Mod(y2, bitCurve.P) //y²%P - - x3 := new(big.Int).Mul(x, x) //x² - x3.Mul(x3, x) //x³ - - x3.Add(x3, bitCurve.B) //x³+B - x3.Mod(x3, bitCurve.P) //(x³+B)%P - - return x3.Cmp(y2) == 0 -} - -// affineFromJacobian reverses the Jacobian transform. See the comment at the -// top of the file. -func (bitCurve *BitCurve) affineFromJacobian(x, y, z *big.Int) (xOut, yOut *big.Int) { - if z.Cmp(big.NewInt(0)) == 0 { - panic("bitcurve: Can't convert to affine with Jacobian Z = 0") - } - // x = YZ^2 mod P - zinv := new(big.Int).ModInverse(z, bitCurve.P) - zinvsq := new(big.Int).Mul(zinv, zinv) - - xOut = new(big.Int).Mul(x, zinvsq) - xOut.Mod(xOut, bitCurve.P) - // y = YZ^3 mod P - zinvsq.Mul(zinvsq, zinv) - yOut = new(big.Int).Mul(y, zinvsq) - yOut.Mod(yOut, bitCurve.P) - return xOut, yOut -} - -// Add returns the sum of (x1,y1) and (x2,y2) -func (bitCurve *BitCurve) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) { - z := new(big.Int).SetInt64(1) - x, y, z := bitCurve.addJacobian(x1, y1, z, x2, y2, z) - return bitCurve.affineFromJacobian(x, y, z) -} - -// addJacobian takes two points in Jacobian coordinates, (x1, y1, z1) and -// (x2, y2, z2) and returns their sum, also in Jacobian form. -func (bitCurve *BitCurve) addJacobian(x1, y1, z1, x2, y2, z2 *big.Int) (*big.Int, *big.Int, *big.Int) { - // See http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-add-2007-bl - z1z1 := new(big.Int).Mul(z1, z1) - z1z1.Mod(z1z1, bitCurve.P) - z2z2 := new(big.Int).Mul(z2, z2) - z2z2.Mod(z2z2, bitCurve.P) - - u1 := new(big.Int).Mul(x1, z2z2) - u1.Mod(u1, bitCurve.P) - u2 := new(big.Int).Mul(x2, z1z1) - u2.Mod(u2, bitCurve.P) - h := new(big.Int).Sub(u2, u1) - if h.Sign() == -1 { - h.Add(h, bitCurve.P) - } - i := new(big.Int).Lsh(h, 1) - i.Mul(i, i) - j := new(big.Int).Mul(h, i) - - s1 := new(big.Int).Mul(y1, z2) - s1.Mul(s1, z2z2) - s1.Mod(s1, bitCurve.P) - s2 := new(big.Int).Mul(y2, z1) - s2.Mul(s2, z1z1) - s2.Mod(s2, bitCurve.P) - r := new(big.Int).Sub(s2, s1) - if r.Sign() == -1 { - r.Add(r, bitCurve.P) - } - r.Lsh(r, 1) - v := new(big.Int).Mul(u1, i) - - x3 := new(big.Int).Set(r) - x3.Mul(x3, x3) - x3.Sub(x3, j) - x3.Sub(x3, v) - x3.Sub(x3, v) - x3.Mod(x3, bitCurve.P) - - y3 := new(big.Int).Set(r) - v.Sub(v, x3) - y3.Mul(y3, v) - s1.Mul(s1, j) - s1.Lsh(s1, 1) - y3.Sub(y3, s1) - y3.Mod(y3, bitCurve.P) - - z3 := new(big.Int).Add(z1, z2) - z3.Mul(z3, z3) - z3.Sub(z3, z1z1) - if z3.Sign() == -1 { - z3.Add(z3, bitCurve.P) - } - z3.Sub(z3, z2z2) - if z3.Sign() == -1 { - z3.Add(z3, bitCurve.P) - } - z3.Mul(z3, h) - z3.Mod(z3, bitCurve.P) - - return x3, y3, z3 -} - -// Double returns 2*(x,y) -func (bitCurve *BitCurve) Double(x1, y1 *big.Int) (*big.Int, *big.Int) { - z1 := new(big.Int).SetInt64(1) - return bitCurve.affineFromJacobian(bitCurve.doubleJacobian(x1, y1, z1)) -} - -// doubleJacobian takes a point in Jacobian coordinates, (x, y, z), and -// returns its double, also in Jacobian form. -func (bitCurve *BitCurve) doubleJacobian(x, y, z *big.Int) (*big.Int, *big.Int, *big.Int) { - // See http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-dbl-2009-l - - a := new(big.Int).Mul(x, x) //X1² - b := new(big.Int).Mul(y, y) //Y1² - c := new(big.Int).Mul(b, b) //B² - - d := new(big.Int).Add(x, b) //X1+B - d.Mul(d, d) //(X1+B)² - d.Sub(d, a) //(X1+B)²-A - d.Sub(d, c) //(X1+B)²-A-C - d.Mul(d, big.NewInt(2)) //2*((X1+B)²-A-C) - - e := new(big.Int).Mul(big.NewInt(3), a) //3*A - f := new(big.Int).Mul(e, e) //E² - - x3 := new(big.Int).Mul(big.NewInt(2), d) //2*D - x3.Sub(f, x3) //F-2*D - x3.Mod(x3, bitCurve.P) - - y3 := new(big.Int).Sub(d, x3) //D-X3 - y3.Mul(e, y3) //E*(D-X3) - y3.Sub(y3, new(big.Int).Mul(big.NewInt(8), c)) //E*(D-X3)-8*C - y3.Mod(y3, bitCurve.P) - - z3 := new(big.Int).Mul(y, z) //Y1*Z1 - z3.Mul(big.NewInt(2), z3) //3*Y1*Z1 - z3.Mod(z3, bitCurve.P) - - return x3, y3, z3 -} - -//TODO: double check if it is okay -// ScalarMult returns k*(Bx,By) where k is a number in big-endian form. -func (bitCurve *BitCurve) ScalarMult(Bx, By *big.Int, k []byte) (*big.Int, *big.Int) { - // We have a slight problem in that the identity of the group (the - // point at infinity) cannot be represented in (x, y) form on a finite - // machine. Thus the standard add/double algorithm has to be tweaked - // slightly: our initial state is not the identity, but x, and we - // ignore the first true bit in |k|. If we don't find any true bits in - // |k|, then we return nil, nil, because we cannot return the identity - // element. - - Bz := new(big.Int).SetInt64(1) - x := Bx - y := By - z := Bz - - seenFirstTrue := false - for _, byte := range k { - for bitNum := 0; bitNum < 8; bitNum++ { - if seenFirstTrue { - x, y, z = bitCurve.doubleJacobian(x, y, z) - } - if byte&0x80 == 0x80 { - if !seenFirstTrue { - seenFirstTrue = true - } else { - x, y, z = bitCurve.addJacobian(Bx, By, Bz, x, y, z) - } - } - byte <<= 1 - } - } - - if !seenFirstTrue { - return nil, nil - } - - return bitCurve.affineFromJacobian(x, y, z) -} - -// ScalarBaseMult returns k*G, where G is the base point of the group and k is -// an integer in big-endian form. -func (bitCurve *BitCurve) ScalarBaseMult(k []byte) (*big.Int, *big.Int) { - return bitCurve.ScalarMult(bitCurve.Gx, bitCurve.Gy, k) -} - -var mask = []byte{0xff, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f} - -//TODO: double check if it is okay -// GenerateKey returns a public/private key pair. The private key is generated -// using the given reader, which must return random data. -func (bitCurve *BitCurve) GenerateKey(rand io.Reader) (priv []byte, x, y *big.Int, err error) { - byteLen := (bitCurve.BitSize + 7) >> 3 - priv = make([]byte, byteLen) - - for x == nil { - _, err = io.ReadFull(rand, priv) - if err != nil { - return - } - // We have to mask off any excess bits in the case that the size of the - // underlying field is not a whole number of bytes. - priv[0] &= mask[bitCurve.BitSize%8] - // This is because, in tests, rand will return all zeros and we don't - // want to get the point at infinity and loop forever. - priv[1] ^= 0x42 - x, y = bitCurve.ScalarBaseMult(priv) - } - return -} - -// Marshal converts a point into the form specified in section 4.3.6 of ANSI -// X9.62. -func (bitCurve *BitCurve) Marshal(x, y *big.Int) []byte { - byteLen := (bitCurve.BitSize + 7) >> 3 - - ret := make([]byte, 1+2*byteLen) - ret[0] = 4 // uncompressed point - - xBytes := x.Bytes() - copy(ret[1+byteLen-len(xBytes):], xBytes) - yBytes := y.Bytes() - copy(ret[1+2*byteLen-len(yBytes):], yBytes) - return ret -} - -// Unmarshal converts a point, serialised by Marshal, into an x, y pair. On -// error, x = nil. -func (bitCurve *BitCurve) Unmarshal(data []byte) (x, y *big.Int) { - byteLen := (bitCurve.BitSize + 7) >> 3 - if len(data) != 1+2*byteLen { - return - } - if data[0] != 4 { // uncompressed form - return - } - x = new(big.Int).SetBytes(data[1 : 1+byteLen]) - y = new(big.Int).SetBytes(data[1+byteLen:]) - return -} - -//curve parameters taken from: -//http://www.secg.org/collateral/sec2_final.pdf - -var initonce sync.Once -var secp160k1 *BitCurve -var secp192k1 *BitCurve -var secp224k1 *BitCurve -var secp256k1 *BitCurve - -func initAll() { - initS160() - initS192() - initS224() - initS256() -} - -func initS160() { - // See SEC 2 section 2.4.1 - secp160k1 = new(BitCurve) - secp160k1.Name = "secp160k1" - secp160k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFAC73", 16) - secp160k1.N, _ = new(big.Int).SetString("0100000000000000000001B8FA16DFAB9ACA16B6B3", 16) - secp160k1.B, _ = new(big.Int).SetString("0000000000000000000000000000000000000007", 16) - secp160k1.Gx, _ = new(big.Int).SetString("3B4C382CE37AA192A4019E763036F4F5DD4D7EBB", 16) - secp160k1.Gy, _ = new(big.Int).SetString("938CF935318FDCED6BC28286531733C3F03C4FEE", 16) - secp160k1.BitSize = 160 -} - -func initS192() { - // See SEC 2 section 2.5.1 - secp192k1 = new(BitCurve) - secp192k1.Name = "secp192k1" - secp192k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFEE37", 16) - secp192k1.N, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFE26F2FC170F69466A74DEFD8D", 16) - secp192k1.B, _ = new(big.Int).SetString("000000000000000000000000000000000000000000000003", 16) - secp192k1.Gx, _ = new(big.Int).SetString("DB4FF10EC057E9AE26B07D0280B7F4341DA5D1B1EAE06C7D", 16) - secp192k1.Gy, _ = new(big.Int).SetString("9B2F2F6D9C5628A7844163D015BE86344082AA88D95E2F9D", 16) - secp192k1.BitSize = 192 -} - -func initS224() { - // See SEC 2 section 2.6.1 - secp224k1 = new(BitCurve) - secp224k1.Name = "secp224k1" - secp224k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFE56D", 16) - secp224k1.N, _ = new(big.Int).SetString("010000000000000000000000000001DCE8D2EC6184CAF0A971769FB1F7", 16) - secp224k1.B, _ = new(big.Int).SetString("00000000000000000000000000000000000000000000000000000005", 16) - secp224k1.Gx, _ = new(big.Int).SetString("A1455B334DF099DF30FC28A169A467E9E47075A90F7E650EB6B7A45C", 16) - secp224k1.Gy, _ = new(big.Int).SetString("7E089FED7FBA344282CAFBD6F7E319F7C0B0BD59E2CA4BDB556D61A5", 16) - secp224k1.BitSize = 224 -} - -func initS256() { - // See SEC 2 section 2.7.1 - secp256k1 = new(BitCurve) - secp256k1.Name = "secp256k1" - secp256k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F", 16) - secp256k1.N, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141", 16) - secp256k1.B, _ = new(big.Int).SetString("0000000000000000000000000000000000000000000000000000000000000007", 16) - secp256k1.Gx, _ = new(big.Int).SetString("79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798", 16) - secp256k1.Gy, _ = new(big.Int).SetString("483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8", 16) - secp256k1.BitSize = 256 -} - -// S160 returns a BitCurve which implements secp160k1 (see SEC 2 section 2.4.1) -func S160() *BitCurve { - initonce.Do(initAll) - return secp160k1 -} - -// S192 returns a BitCurve which implements secp192k1 (see SEC 2 section 2.5.1) -func S192() *BitCurve { - initonce.Do(initAll) - return secp192k1 -} - -// S224 returns a BitCurve which implements secp224k1 (see SEC 2 section 2.6.1) -func S224() *BitCurve { - initonce.Do(initAll) - return secp224k1 -} - -// S256 returns a BitCurve which implements bitcurves (see SEC 2 section 2.7.1) -func S256() *BitCurve { - initonce.Do(initAll) - return secp256k1 -} diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/brainpool/brainpool.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/brainpool/brainpool.go deleted file mode 100644 index 77fb8b9a046..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/brainpool/brainpool.go +++ /dev/null @@ -1,134 +0,0 @@ -// Package brainpool implements Brainpool elliptic curves. -// Implementation of rcurves is from github.com/ebfe/brainpool -// Note that these curves are implemented with naive, non-constant time operations -// and are likely not suitable for enviroments where timing attacks are a concern. -package brainpool - -import ( - "crypto/elliptic" - "math/big" - "sync" -) - -var ( - once sync.Once - p256t1, p384t1, p512t1 *elliptic.CurveParams - p256r1, p384r1, p512r1 *rcurve -) - -func initAll() { - initP256t1() - initP384t1() - initP512t1() - initP256r1() - initP384r1() - initP512r1() -} - -func initP256t1() { - p256t1 = &elliptic.CurveParams{Name: "brainpoolP256t1"} - p256t1.P, _ = new(big.Int).SetString("A9FB57DBA1EEA9BC3E660A909D838D726E3BF623D52620282013481D1F6E5377", 16) - p256t1.N, _ = new(big.Int).SetString("A9FB57DBA1EEA9BC3E660A909D838D718C397AA3B561A6F7901E0E82974856A7", 16) - p256t1.B, _ = new(big.Int).SetString("662C61C430D84EA4FE66A7733D0B76B7BF93EBC4AF2F49256AE58101FEE92B04", 16) - p256t1.Gx, _ = new(big.Int).SetString("A3E8EB3CC1CFE7B7732213B23A656149AFA142C47AAFBC2B79A191562E1305F4", 16) - p256t1.Gy, _ = new(big.Int).SetString("2D996C823439C56D7F7B22E14644417E69BCB6DE39D027001DABE8F35B25C9BE", 16) - p256t1.BitSize = 256 -} - -func initP256r1() { - twisted := p256t1 - params := &elliptic.CurveParams{ - Name: "brainpoolP256r1", - P: twisted.P, - N: twisted.N, - BitSize: twisted.BitSize, - } - params.Gx, _ = new(big.Int).SetString("8BD2AEB9CB7E57CB2C4B482FFC81B7AFB9DE27E1E3BD23C23A4453BD9ACE3262", 16) - params.Gy, _ = new(big.Int).SetString("547EF835C3DAC4FD97F8461A14611DC9C27745132DED8E545C1D54C72F046997", 16) - z, _ := new(big.Int).SetString("3E2D4BD9597B58639AE7AA669CAB9837CF5CF20A2C852D10F655668DFC150EF0", 16) - p256r1 = newrcurve(twisted, params, z) -} - -func initP384t1() { - p384t1 = &elliptic.CurveParams{Name: "brainpoolP384t1"} - p384t1.P, _ = new(big.Int).SetString("8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B412B1DA197FB71123ACD3A729901D1A71874700133107EC53", 16) - p384t1.N, _ = new(big.Int).SetString("8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B31F166E6CAC0425A7CF3AB6AF6B7FC3103B883202E9046565", 16) - p384t1.B, _ = new(big.Int).SetString("7F519EADA7BDA81BD826DBA647910F8C4B9346ED8CCDC64E4B1ABD11756DCE1D2074AA263B88805CED70355A33B471EE", 16) - p384t1.Gx, _ = new(big.Int).SetString("18DE98B02DB9A306F2AFCD7235F72A819B80AB12EBD653172476FECD462AABFFC4FF191B946A5F54D8D0AA2F418808CC", 16) - p384t1.Gy, _ = new(big.Int).SetString("25AB056962D30651A114AFD2755AD336747F93475B7A1FCA3B88F2B6A208CCFE469408584DC2B2912675BF5B9E582928", 16) - p384t1.BitSize = 384 -} - -func initP384r1() { - twisted := p384t1 - params := &elliptic.CurveParams{ - Name: "brainpoolP384r1", - P: twisted.P, - N: twisted.N, - BitSize: twisted.BitSize, - } - params.Gx, _ = new(big.Int).SetString("1D1C64F068CF45FFA2A63A81B7C13F6B8847A3E77EF14FE3DB7FCAFE0CBD10E8E826E03436D646AAEF87B2E247D4AF1E", 16) - params.Gy, _ = new(big.Int).SetString("8ABE1D7520F9C2A45CB1EB8E95CFD55262B70B29FEEC5864E19C054FF99129280E4646217791811142820341263C5315", 16) - z, _ := new(big.Int).SetString("41DFE8DD399331F7166A66076734A89CD0D2BCDB7D068E44E1F378F41ECBAE97D2D63DBC87BCCDDCCC5DA39E8589291C", 16) - p384r1 = newrcurve(twisted, params, z) -} - -func initP512t1() { - p512t1 = &elliptic.CurveParams{Name: "brainpoolP512t1"} - p512t1.P, _ = new(big.Int).SetString("AADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA703308717D4D9B009BC66842AECDA12AE6A380E62881FF2F2D82C68528AA6056583A48F3", 16) - p512t1.N, _ = new(big.Int).SetString("AADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA70330870553E5C414CA92619418661197FAC10471DB1D381085DDADDB58796829CA90069", 16) - p512t1.B, _ = new(big.Int).SetString("7CBBBCF9441CFAB76E1890E46884EAE321F70C0BCB4981527897504BEC3E36A62BCDFA2304976540F6450085F2DAE145C22553B465763689180EA2571867423E", 16) - p512t1.Gx, _ = new(big.Int).SetString("640ECE5C12788717B9C1BA06CBC2A6FEBA85842458C56DDE9DB1758D39C0313D82BA51735CDB3EA499AA77A7D6943A64F7A3F25FE26F06B51BAA2696FA9035DA", 16) - p512t1.Gy, _ = new(big.Int).SetString("5B534BD595F5AF0FA2C892376C84ACE1BB4E3019B71634C01131159CAE03CEE9D9932184BEEF216BD71DF2DADF86A627306ECFF96DBB8BACE198B61E00F8B332", 16) - p512t1.BitSize = 512 -} - -func initP512r1() { - twisted := p512t1 - params := &elliptic.CurveParams{ - Name: "brainpoolP512r1", - P: twisted.P, - N: twisted.N, - BitSize: twisted.BitSize, - } - params.Gx, _ = new(big.Int).SetString("81AEE4BDD82ED9645A21322E9C4C6A9385ED9F70B5D916C1B43B62EEF4D0098EFF3B1F78E2D0D48D50D1687B93B97D5F7C6D5047406A5E688B352209BCB9F822", 16) - params.Gy, _ = new(big.Int).SetString("7DDE385D566332ECC0EABFA9CF7822FDF209F70024A57B1AA000C55B881F8111B2DCDE494A5F485E5BCA4BD88A2763AED1CA2B2FA8F0540678CD1E0F3AD80892", 16) - z, _ := new(big.Int).SetString("12EE58E6764838B69782136F0F2D3BA06E27695716054092E60A80BEDB212B64E585D90BCE13761F85C3F1D2A64E3BE8FEA2220F01EBA5EEB0F35DBD29D922AB", 16) - p512r1 = newrcurve(twisted, params, z) -} - -// P256t1 returns a Curve which implements Brainpool P256t1 (see RFC 5639, section 3.4) -func P256t1() elliptic.Curve { - once.Do(initAll) - return p256t1 -} - -// P256r1 returns a Curve which implements Brainpool P256r1 (see RFC 5639, section 3.4) -func P256r1() elliptic.Curve { - once.Do(initAll) - return p256r1 -} - -// P384t1 returns a Curve which implements Brainpool P384t1 (see RFC 5639, section 3.6) -func P384t1() elliptic.Curve { - once.Do(initAll) - return p384t1 -} - -// P384r1 returns a Curve which implements Brainpool P384r1 (see RFC 5639, section 3.6) -func P384r1() elliptic.Curve { - once.Do(initAll) - return p384r1 -} - -// P512t1 returns a Curve which implements Brainpool P512t1 (see RFC 5639, section 3.7) -func P512t1() elliptic.Curve { - once.Do(initAll) - return p512t1 -} - -// P512r1 returns a Curve which implements Brainpool P512r1 (see RFC 5639, section 3.7) -func P512r1() elliptic.Curve { - once.Do(initAll) - return p512r1 -} diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/brainpool/rcurve.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/brainpool/rcurve.go deleted file mode 100644 index 2d5355085f2..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/brainpool/rcurve.go +++ /dev/null @@ -1,83 +0,0 @@ -package brainpool - -import ( - "crypto/elliptic" - "math/big" -) - -var _ elliptic.Curve = (*rcurve)(nil) - -type rcurve struct { - twisted elliptic.Curve - params *elliptic.CurveParams - z *big.Int - zinv *big.Int - z2 *big.Int - z3 *big.Int - zinv2 *big.Int - zinv3 *big.Int -} - -var ( - two = big.NewInt(2) - three = big.NewInt(3) -) - -func newrcurve(twisted elliptic.Curve, params *elliptic.CurveParams, z *big.Int) *rcurve { - zinv := new(big.Int).ModInverse(z, params.P) - return &rcurve{ - twisted: twisted, - params: params, - z: z, - zinv: zinv, - z2: new(big.Int).Exp(z, two, params.P), - z3: new(big.Int).Exp(z, three, params.P), - zinv2: new(big.Int).Exp(zinv, two, params.P), - zinv3: new(big.Int).Exp(zinv, three, params.P), - } -} - -func (curve *rcurve) toTwisted(x, y *big.Int) (*big.Int, *big.Int) { - var tx, ty big.Int - tx.Mul(x, curve.z2) - tx.Mod(&tx, curve.params.P) - ty.Mul(y, curve.z3) - ty.Mod(&ty, curve.params.P) - return &tx, &ty -} - -func (curve *rcurve) fromTwisted(tx, ty *big.Int) (*big.Int, *big.Int) { - var x, y big.Int - x.Mul(tx, curve.zinv2) - x.Mod(&x, curve.params.P) - y.Mul(ty, curve.zinv3) - y.Mod(&y, curve.params.P) - return &x, &y -} - -func (curve *rcurve) Params() *elliptic.CurveParams { - return curve.params -} - -func (curve *rcurve) IsOnCurve(x, y *big.Int) bool { - return curve.twisted.IsOnCurve(curve.toTwisted(x, y)) -} - -func (curve *rcurve) Add(x1, y1, x2, y2 *big.Int) (x, y *big.Int) { - tx1, ty1 := curve.toTwisted(x1, y1) - tx2, ty2 := curve.toTwisted(x2, y2) - return curve.fromTwisted(curve.twisted.Add(tx1, ty1, tx2, ty2)) -} - -func (curve *rcurve) Double(x1, y1 *big.Int) (x, y *big.Int) { - return curve.fromTwisted(curve.twisted.Double(curve.toTwisted(x1, y1))) -} - -func (curve *rcurve) ScalarMult(x1, y1 *big.Int, scalar []byte) (x, y *big.Int) { - tx1, ty1 := curve.toTwisted(x1, y1) - return curve.fromTwisted(curve.twisted.ScalarMult(tx1, ty1, scalar)) -} - -func (curve *rcurve) ScalarBaseMult(scalar []byte) (x, y *big.Int) { - return curve.fromTwisted(curve.twisted.ScalarBaseMult(scalar)) -} \ No newline at end of file diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/eax/eax.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/eax/eax.go deleted file mode 100644 index 6b6bc7aed04..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/eax/eax.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright (C) 2019 ProtonTech AG - -// Package eax provides an implementation of the EAX -// (encrypt-authenticate-translate) mode of operation, as described in -// Bellare, Rogaway, and Wagner "THE EAX MODE OF OPERATION: A TWO-PASS -// AUTHENTICATED-ENCRYPTION SCHEME OPTIMIZED FOR SIMPLICITY AND EFFICIENCY." -// In FSE'04, volume 3017 of LNCS, 2004 -package eax - -import ( - "crypto/cipher" - "crypto/subtle" - "errors" - "github.com/ProtonMail/go-crypto/internal/byteutil" -) - -const ( - defaultTagSize = 16 - defaultNonceSize = 16 -) - -type eax struct { - block cipher.Block // Only AES-{128, 192, 256} supported - tagSize int // At least 12 bytes recommended - nonceSize int -} - -func (e *eax) NonceSize() int { - return e.nonceSize -} - -func (e *eax) Overhead() int { - return e.tagSize -} - -// NewEAX returns an EAX instance with AES-{KEYLENGTH} and default nonce and -// tag lengths. Supports {128, 192, 256}- bit key length. -func NewEAX(block cipher.Block) (cipher.AEAD, error) { - return NewEAXWithNonceAndTagSize(block, defaultNonceSize, defaultTagSize) -} - -// NewEAXWithNonceAndTagSize returns an EAX instance with AES-{keyLength} and -// given nonce and tag lengths in bytes. Panics on zero nonceSize and -// exceedingly long tags. -// -// It is recommended to use at least 12 bytes as tag length (see, for instance, -// NIST SP 800-38D). -// -// Only to be used for compatibility with existing cryptosystems with -// non-standard parameters. For all other cases, prefer NewEAX. -func NewEAXWithNonceAndTagSize( - block cipher.Block, nonceSize, tagSize int) (cipher.AEAD, error) { - if nonceSize < 1 { - return nil, eaxError("Cannot initialize EAX with nonceSize = 0") - } - if tagSize > block.BlockSize() { - return nil, eaxError("Custom tag length exceeds blocksize") - } - return &eax{ - block: block, - tagSize: tagSize, - nonceSize: nonceSize, - }, nil -} - -func (e *eax) Seal(dst, nonce, plaintext, adata []byte) []byte { - if len(nonce) > e.nonceSize { - panic("crypto/eax: Nonce too long for this instance") - } - ret, out := byteutil.SliceForAppend(dst, len(plaintext) + e.tagSize) - omacNonce := e.omacT(0, nonce) - omacAdata := e.omacT(1, adata) - - // Encrypt message using CTR mode and omacNonce as IV - ctr := cipher.NewCTR(e.block, omacNonce) - ciphertextData := out[:len(plaintext)] - ctr.XORKeyStream(ciphertextData, plaintext) - - omacCiphertext := e.omacT(2, ciphertextData) - - tag := out[len(plaintext):] - for i := 0; i < e.tagSize; i++ { - tag[i] = omacCiphertext[i] ^ omacNonce[i] ^ omacAdata[i] - } - return ret -} - -func (e* eax) Open(dst, nonce, ciphertext, adata []byte) ([]byte, error) { - if len(nonce) > e.nonceSize { - panic("crypto/eax: Nonce too long for this instance") - } - if len(ciphertext) < e.tagSize { - return nil, eaxError("Ciphertext shorter than tag length") - } - sep := len(ciphertext) - e.tagSize - - // Compute tag - omacNonce := e.omacT(0, nonce) - omacAdata := e.omacT(1, adata) - omacCiphertext := e.omacT(2, ciphertext[:sep]) - - tag := make([]byte, e.tagSize) - for i := 0; i < e.tagSize; i++ { - tag[i] = omacCiphertext[i] ^ omacNonce[i] ^ omacAdata[i] - } - - // Compare tags - if subtle.ConstantTimeCompare(ciphertext[sep:], tag) != 1 { - return nil, eaxError("Tag authentication failed") - } - - // Decrypt ciphertext - ret, out := byteutil.SliceForAppend(dst, len(ciphertext)) - ctr := cipher.NewCTR(e.block, omacNonce) - ctr.XORKeyStream(out, ciphertext[:sep]) - - return ret[:sep], nil -} - -// Tweakable OMAC - Calls OMAC_K([t]_n || plaintext) -func (e *eax) omacT(t byte, plaintext []byte) []byte { - blockSize := e.block.BlockSize() - byteT := make([]byte, blockSize) - byteT[blockSize-1] = t - concat := append(byteT, plaintext...) - return e.omac(concat) -} - -func (e *eax) omac(plaintext []byte) []byte { - blockSize := e.block.BlockSize() - // L ← E_K(0^n); B ← 2L; P ← 4L - L := make([]byte, blockSize) - e.block.Encrypt(L, L) - B := byteutil.GfnDouble(L) - P := byteutil.GfnDouble(B) - - // CBC with IV = 0 - cbc := cipher.NewCBCEncrypter(e.block, make([]byte, blockSize)) - padded := e.pad(plaintext, B, P) - cbcCiphertext := make([]byte, len(padded)) - cbc.CryptBlocks(cbcCiphertext, padded) - - return cbcCiphertext[len(cbcCiphertext)-blockSize:] -} - -func (e *eax) pad(plaintext, B, P []byte) []byte { - // if |M| in {n, 2n, 3n, ...} - blockSize := e.block.BlockSize() - if len(plaintext) != 0 && len(plaintext)%blockSize == 0 { - return byteutil.RightXor(plaintext, B) - } - - // else return (M || 1 || 0^(n−1−(|M| % n))) xor→ P - ending := make([]byte, blockSize-len(plaintext)%blockSize) - ending[0] = 0x80 - padded := append(plaintext, ending...) - return byteutil.RightXor(padded, P) -} - -func eaxError(err string) error { - return errors.New("crypto/eax: " + err) -} diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/eax/eax_test_vectors.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/eax/eax_test_vectors.go deleted file mode 100644 index ddb53d07905..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/eax/eax_test_vectors.go +++ /dev/null @@ -1,58 +0,0 @@ -package eax - -// Test vectors from -// https://web.cs.ucdavis.edu/~rogaway/papers/eax.pdf -var testVectors = []struct { - msg, key, nonce, header, ciphertext string -}{ - {"", - "233952DEE4D5ED5F9B9C6D6FF80FF478", - "62EC67F9C3A4A407FCB2A8C49031A8B3", - "6BFB914FD07EAE6B", - "E037830E8389F27B025A2D6527E79D01"}, - {"F7FB", - "91945D3F4DCBEE0BF45EF52255F095A4", - "BECAF043B0A23D843194BA972C66DEBD", - "FA3BFD4806EB53FA", - "19DD5C4C9331049D0BDAB0277408F67967E5"}, - {"1A47CB4933", - "01F74AD64077F2E704C0F60ADA3DD523", - "70C3DB4F0D26368400A10ED05D2BFF5E", - "234A3463C1264AC6", - "D851D5BAE03A59F238A23E39199DC9266626C40F80"}, - {"481C9E39B1", - "D07CF6CBB7F313BDDE66B727AFD3C5E8", - "8408DFFF3C1A2B1292DC199E46B7D617", - "33CCE2EABFF5A79D", - "632A9D131AD4C168A4225D8E1FF755939974A7BEDE"}, - {"40D0C07DA5E4", - "35B6D0580005BBC12B0587124557D2C2", - "FDB6B06676EEDC5C61D74276E1F8E816", - "AEB96EAEBE2970E9", - "071DFE16C675CB0677E536F73AFE6A14B74EE49844DD"}, - {"4DE3B35C3FC039245BD1FB7D", - "BD8E6E11475E60B268784C38C62FEB22", - "6EAC5C93072D8E8513F750935E46DA1B", - "D4482D1CA78DCE0F", - "835BB4F15D743E350E728414ABB8644FD6CCB86947C5E10590210A4F"}, - {"8B0A79306C9CE7ED99DAE4F87F8DD61636", - "7C77D6E813BED5AC98BAA417477A2E7D", - "1A8C98DCD73D38393B2BF1569DEEFC19", - "65D2017990D62528", - "02083E3979DA014812F59F11D52630DA30137327D10649B0AA6E1C181DB617D7F2"}, - {"1BDA122BCE8A8DBAF1877D962B8592DD2D56", - "5FFF20CAFAB119CA2FC73549E20F5B0D", - "DDE59B97D722156D4D9AFF2BC7559826", - "54B9F04E6A09189A", - "2EC47B2C4954A489AFC7BA4897EDCDAE8CC33B60450599BD02C96382902AEF7F832A"}, - {"6CF36720872B8513F6EAB1A8A44438D5EF11", - "A4A4782BCFFD3EC5E7EF6D8C34A56123", - "B781FCF2F75FA5A8DE97A9CA48E522EC", - "899A175897561D7E", - "0DE18FD0FDD91E7AF19F1D8EE8733938B1E8E7F6D2231618102FDB7FE55FF1991700"}, - {"CA40D7446E545FFAED3BD12A740A659FFBBB3CEAB7", - "8395FCF1E95BEBD697BD010BC766AAC3", - "22E7ADD93CFC6393C57EC0B3C17D6B44", - "126735FCC320D25A", - "CB8920F87A6C75CFF39627B56E3ED197C552D295A7CFC46AFC253B4652B1AF3795B124AB6E"}, -} diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/eax/random_vectors.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/eax/random_vectors.go deleted file mode 100644 index 4eb19f28d9c..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/eax/random_vectors.go +++ /dev/null @@ -1,131 +0,0 @@ -// These vectors include key length in {128, 192, 256}, tag size 128, and -// random nonce, header, and plaintext lengths. - -// This file was automatically generated. - -package eax - -var randomVectors = []struct { - key, nonce, header, plaintext, ciphertext string -}{ - {"DFDE093F36B0356E5A81F609786982E3", - "1D8AC604419001816905BA72B14CED7E", - "152A1517A998D7A24163FCDD146DE81AC347C8B97088F502093C1ABB8F6E33D9A219C34D7603A18B1F5ABE02E56661B7D7F67E81EC08C1302EF38D80A859486D450E94A4F26AD9E68EEBBC0C857A0FC5CF9E641D63D565A7E361BC8908F5A8DC8FD6", - "1C8EAAB71077FE18B39730A3156ADE29C5EE824C7EE86ED2A253B775603FB237116E654F6FEC588DD27F523A0E01246FE73FE348491F2A8E9ABC6CA58D663F71CDBCF4AD798BE46C42AE6EE8B599DB44A1A48D7BBBBA0F7D2750181E1C5E66967F7D57CBD30AFBDA5727", - "79E7E150934BBEBF7013F61C60462A14D8B15AF7A248AFB8A344EF021C1500E16666891D6E973D8BB56B71A371F12CA34660C4410C016982B20F547E3762A58B7BF4F20236CADCF559E2BE7D783B13723B2741FC7CDC8997D839E39A3DDD2BADB96743DD7049F1BDB0516A262869915B3F70498AFB7B191BF960"}, - {"F10619EF02E5D94D7550EB84ED364A21", - "8DC0D4F2F745BBAE835CC5574B942D20", - "FE561358F2E8DF7E1024FF1AE9A8D36EBD01352214505CB99D644777A8A1F6027FA2BDBFC529A9B91136D5F2416CFC5F0F4EC3A1AFD32BDDA23CA504C5A5CB451785FABF4DFE4CD50D817491991A60615B30286361C100A95D1712F2A45F8E374461F4CA2B", - "D7B5A971FC219631D30EFC3664AE3127D9CF3097DAD9C24AC7905D15E8D9B25B026B31D68CAE00975CDB81EB1FD96FD5E1A12E2BB83FA25F1B1D91363457657FC03875C27F2946C5", - "2F336ED42D3CC38FC61660C4CD60BA4BD438B05F5965D8B7B399D2E7167F5D34F792D318F94DB15D67463AC449E13D568CC09BFCE32A35EE3EE96A041927680AE329811811E27F2D1E8E657707AF99BA96D13A478D695D59"}, - {"429F514EFC64D98A698A9247274CFF45", - "976AA5EB072F912D126ACEBC954FEC38", - "A71D89DC5B6CEDBB7451A27C3C2CAE09126DB4C421", - "5632FE62AB1DC549D54D3BC3FC868ACCEDEFD9ECF5E9F8", - "848AE4306CA8C7F416F8707625B7F55881C0AB430353A5C967CDA2DA787F581A70E34DBEBB2385"}, - {"398138F309085F47F8457CDF53895A63", - "F8A8A7F2D28E5FFF7BBC2F24353F7A36", - "5D633C21BA7764B8855CAB586F3746E236AD486039C83C6B56EFA9C651D38A41D6B20DAEE3418BFEA44B8BD6", - "A3BBAA91920AF5E10659818B1B3B300AC79BFC129C8329E75251F73A66D3AE0128EB91D5031E0A65C329DB7D1E9C0493E268", - "D078097267606E5FB07CFB7E2B4B718172A82C6A4CEE65D549A4DFB9838003BD2FBF64A7A66988AC1A632FD88F9E9FBB57C5A78AD2E086EACBA3DB68511D81C2970A"}, - {"7A4151EBD3901B42CBA45DAFB2E931BA", - "0FC88ACEE74DD538040321C330974EB8", - "250464FB04733BAB934C59E6AD2D6AE8D662CBCFEFBE61E5A308D4211E58C4C25935B72C69107722E946BFCBF416796600542D76AEB73F2B25BF53BAF97BDEB36ED3A7A51C31E7F170EB897457E7C17571D1BA0A908954E9", - "88C41F3EBEC23FAB8A362D969CAC810FAD4F7CA6A7F7D0D44F060F92E37E1183768DD4A8C733F71C96058D362A39876D183B86C103DE", - "74A25B2182C51096D48A870D80F18E1CE15867778E34FCBA6BD7BFB3739FDCD42AD0F2D9F4EBA29085285C6048C15BCE5E5166F1F962D3337AA88E6062F05523029D0A7F0BF9"}, - {"BFB147E1CD5459424F8C0271FC0E0DC5", - "EABCC126442BF373969EA3015988CC45", - "4C0880E1D71AA2C7", - "BE1B5EC78FBF73E7A6682B21BA7E0E5D2D1C7ABE", - "5660D7C1380E2F306895B1402CB2D6C37876504276B414D120F4CF92FDDDBB293A238EA0"}, - {"595DD6F52D18BC2CA8EB4EDAA18D9FA3", - "0F84B5D36CF4BC3B863313AF3B4D2E97", - "30AE6CC5F99580F12A779D98BD379A60948020C0B6FBD5746B30BA3A15C6CD33DAF376C70A9F15B6C0EB410A93161F7958AE23", - "8EF3687A1642B070970B0B91462229D1D76ABC154D18211F7152AA9FF368", - "317C1DDB11417E5A9CC4DDE7FDFF6659A5AC4B31DE025212580A05CDAC6024D3E4AE7C2966E52B9129E9ECDBED86"}, - {"44E6F2DC8FDC778AD007137D11410F50", - "270A237AD977F7187AA6C158A0BAB24F", - "509B0F0EB12E2AA5C5BA2DE553C07FAF4CE0C9E926531AA709A3D6224FCB783ACCF1559E10B1123EBB7D52E8AB54E6B5352A9ED0D04124BF0E9D9BACFD7E32B817B2E625F5EE94A64EDE9E470DE7FE6886C19B294F9F828209FE257A78", - "8B3D7815DF25618A5D0C55A601711881483878F113A12EC36CF64900549A3199555528559DC118F789788A55FAFD944E6E99A9CA3F72F238CD3F4D88223F7A745992B3FAED1848", - "1CC00D79F7AD82FDA71B58D286E5F34D0CC4CEF30704E771CC1E50746BDF83E182B078DB27149A42BAE619DF0F85B0B1090AD55D3B4471B0D6F6ECCD09C8F876B30081F0E7537A9624F8AAF29DA85E324122EFB4D68A56"}, - {"BB7BC352A03044B4428D8DBB4B0701FDEC4649FD17B81452", - "8B4BBE26CCD9859DCD84884159D6B0A4", - "2212BEB0E78E0F044A86944CF33C8D5C80D9DBE1034BF3BCF73611835C7D3A52F5BD2D81B68FD681B68540A496EE5DA16FD8AC8824E60E1EC2042BE28FB0BFAD4E4B03596446BDD8C37D936D9B3D5295BE19F19CF5ACE1D33A46C952CE4DE5C12F92C1DD051E04AEED", - "9037234CC44FFF828FABED3A7084AF40FA7ABFF8E0C0EFB57A1CC361E18FC4FAC1AB54F3ABFE9FF77263ACE16C3A", - "A9391B805CCD956081E0B63D282BEA46E7025126F1C1631239C33E92AA6F92CD56E5A4C56F00FF9658E93D48AF4EF0EF81628E34AD4DB0CDAEDCD2A17EE7"}, - {"99C0AD703196D2F60A74E6B378B838B31F82EA861F06FC4E", - "92745C018AA708ECFEB1667E9F3F1B01", - "828C69F376C0C0EC651C67749C69577D589EE39E51404D80EBF70C8660A8F5FD375473F4A7C611D59CB546A605D67446CE2AA844135FCD78BB5FBC90222A00D42920BB1D7EEDFB0C4672554F583EF23184F89063CDECBE482367B5F9AF3ACBC3AF61392BD94CBCD9B64677", - "A879214658FD0A5B0E09836639BF82E05EC7A5EF71D4701934BDA228435C68AC3D5CEB54997878B06A655EEACEFB1345C15867E7FE6C6423660C8B88DF128EBD6BCD85118DBAE16E9252FFB204324E5C8F38CA97759BDBF3CB0083", - "51FE87996F194A2585E438B023B345439EA60D1AEBED4650CDAF48A4D4EEC4FC77DC71CC4B09D3BEEF8B7B7AF716CE2B4EFFB3AC9E6323C18AC35E0AA6E2BBBC8889490EB6226C896B0D105EAB42BFE7053CCF00ED66BA94C1BA09A792AA873F0C3B26C5C5F9A936E57B25"}, - {"7086816D00D648FB8304AA8C9E552E1B69A9955FB59B25D1", - "0F45CF7F0BF31CCEB85D9DA10F4D749F", - "93F27C60A417D9F0669E86ACC784FC8917B502DAF30A6338F11B30B94D74FEFE2F8BE1BBE2EAD10FAB7EED3C6F72B7C3ECEE1937C32ED4970A6404E139209C05", - "877F046601F3CBE4FB1491943FA29487E738F94B99AF206262A1D6FF856C9AA0B8D4D08A54370C98F8E88FA3DCC2B14C1F76D71B2A4C7963AEE8AF960464C5BEC8357AD00DC8", - "FE96906B895CE6A8E72BC72344E2C8BB3C63113D70EAFA26C299BAFE77A8A6568172EB447FB3E86648A0AF3512DEB1AAC0819F3EC553903BF28A9FB0F43411237A774BF9EE03E445D280FBB9CD12B9BAAB6EF5E52691"}, - {"062F65A896D5BF1401BADFF70E91B458E1F9BD4888CB2E4D", - "5B11EA1D6008EBB41CF892FCA5B943D1", - "BAF4FF5C8242", - "A8870E091238355984EB2F7D61A865B9170F440BFF999A5993DD41A10F4440D21FF948DDA2BF663B2E03AC3324492DC5E40262ECC6A65C07672353BE23E7FB3A9D79FF6AA38D97960905A38DECC312CB6A59E5467ECF06C311CD43ADC0B543EDF34FE8BE611F176460D5627CA51F8F8D9FED71F55C", - "B10E127A632172CF8AA7539B140D2C9C2590E6F28C3CB892FC498FCE56A34F732FBFF32E79C7B9747D9094E8635A0C084D6F0247F9768FB5FF83493799A9BEC6C39572120C40E9292C8C947AE8573462A9108C36D9D7112E6995AE5867E6C8BB387D1C5D4BEF524F391B9FD9F0A3B4BFA079E915BCD920185CFD38D114C558928BD7D47877"}, - {"38A8E45D6D705A11AF58AED5A1344896998EACF359F2E26A", - "FD82B5B31804FF47D44199B533D0CF84", - "DE454D4E62FE879F2050EE3E25853623D3E9AC52EEC1A1779A48CFAF5ECA0BFDE44749391866D1", - "B804", - "164BB965C05EBE0931A1A63293EDF9C38C27"}, - {"34C33C97C6D7A0850DA94D78A58DC61EC717CD7574833068", - "343BE00DA9483F05C14F2E9EB8EA6AE8", - "78312A43EFDE3CAE34A65796FF059A3FE15304EEA5CF1D9306949FE5BF3349D4977D4EBE76C040FE894C5949E4E4D6681153DA87FB9AC5062063CA2EA183566343362370944CE0362D25FC195E124FD60E8682E665D13F2229DDA3E4B2CB1DCA", - "CC11BB284B1153578E4A5ED9D937B869DAF00F5B1960C23455CA9CC43F486A3BE0B66254F1041F04FDF459C8640465B6E1D2CF899A381451E8E7FCB50CF87823BE77E24B132BBEEDC72E53369B275E1D8F49ECE59F4F215230AC4FE133FC80E4F634EE80BA4682B62C86", - "E7F703DC31A95E3A4919FF957836CB76C063D81702AEA4703E1C2BF30831E58C4609D626EC6810E12EAA5B930F049FF9EFC22C3E3F1EBD4A1FB285CB02A1AC5AD46B425199FC0A85670A5C4E3DAA9636C8F64C199F42F18AAC8EA7457FD377F322DD7752D7D01B946C8F0A97E6113F0D50106F319AFD291AAACE"}, - {"C6ECF7F053573E403E61B83052A343D93CBCC179D1E835BE", - "E280E13D7367042E3AA09A80111B6184", - "21486C9D7A9647", - "5F2639AFA6F17931853791CD8C92382BBB677FD72D0AB1A080D0E49BFAA21810E963E4FACD422E92F65CBFAD5884A60CD94740DF31AF02F95AA57DA0C4401B0ED906", - "5C51DB20755302070C45F52E50128A67C8B2E4ED0EACB7E29998CCE2E8C289DD5655913EC1A51CC3AABE5CDC2402B2BE7D6D4BF6945F266FBD70BA9F37109067157AE7530678B45F64475D4EBFCB5FFF46A5"}, - {"5EC6CF7401BC57B18EF154E8C38ACCA8959E57D2F3975FF5", - "656B41CB3F9CF8C08BAD7EBFC80BD225", - "6B817C2906E2AF425861A7EF59BA5801F143EE2A139EE72697CDE168B4", - "2C0E1DDC9B1E5389BA63845B18B1F8A1DB062037151BCC56EF7C21C0BB4DAE366636BBA975685D7CC5A94AFBE89C769016388C56FB7B57CE750A12B718A8BDCF70E80E8659A8330EFC8F86640F21735E8C80E23FE43ABF23507CE3F964AE4EC99D", - "ED780CF911E6D1AA8C979B889B0B9DC1ABE261832980BDBFB576901D9EF5AB8048998E31A15BE54B3E5845A4D136AD24D0BDA1C3006168DF2F8AC06729CB0818867398150020131D8F04EDF1923758C9EABB5F735DE5EA1758D4BC0ACFCA98AFD202E9839B8720253693B874C65586C6F0"}, - {"C92F678EB2208662F5BCF3403EC05F5961E957908A3E79421E1D25FC19054153", - "DA0F3A40983D92F2D4C01FED33C7A192", - "2B6E9D26DB406A0FAB47608657AA10EFC2B4AA5F459B29FF85AC9A40BFFE7AEB04F77E9A11FAAA116D7F6D4DA417671A9AB02C588E0EF59CB1BFB4B1CC931B63A3B3A159FCEC97A04D1E6F0C7E6A9CEF6B0ABB04758A69F1FE754DF4C2610E8C46B6CF413BDB31351D55BEDCB7B4A13A1C98E10984475E0F2F957853", - "F37326A80E08", - "83519E53E321D334F7C10B568183775C0E9AAE55F806"}, - {"6847E0491BE57E72995D186D50094B0B3593957A5146798FCE68B287B2FB37B5", - "3EE1182AEBB19A02B128F28E1D5F7F99", - "D9F35ABB16D776CE", - "DB7566ED8EA95BDF837F23DB277BAFBC5E70D1105ADFD0D9EF15475051B1EF94709C67DCA9F8D5", - "2CDCED0C9EBD6E2A508822A685F7DCD1CDD99E7A5FCA786C234E7F7F1D27EC49751AD5DCFA30C5EDA87C43CAE3B919B6BBCFE34C8EDA59"}, - {"82B019673642C08388D3E42075A4D5D587558C229E4AB8F660E37650C4C41A0A", - "336F5D681E0410FAE7B607246092C6DC", - "D430CBD8FE435B64214E9E9CDC5DE99D31CFCFB8C10AA0587A49DF276611", - "998404153AD77003E1737EDE93ED79859EE6DCCA93CB40C4363AA817ABF2DBBD46E42A14A7183B6CC01E12A577888141363D0AE011EB6E8D28C0B235", - "9BEF69EEB60BD3D6065707B7557F25292A8872857CFBD24F2F3C088E4450995333088DA50FD9121221C504DF1D0CD5EFE6A12666C5D5BB12282CF4C19906E9CFAB97E9BDF7F49DC17CFC384B"}, - {"747B2E269B1859F0622C15C8BAD6A725028B1F94B8DB7326948D1E6ED663A8BC", - "AB91F7245DDCE3F1C747872D47BE0A8A", - "3B03F786EF1DDD76E1D42646DA4CD2A5165DC5383CE86D1A0B5F13F910DC278A4E451EE0192CBA178E13B3BA27FDC7840DF73D2E104B", - "6B803F4701114F3E5FE21718845F8416F70F626303F545BE197189E0A2BA396F37CE06D389EB2658BC7D56D67868708F6D0D32", - "1570DDB0BCE75AA25D1957A287A2C36B1A5F2270186DA81BA6112B7F43B0F3D1D0ED072591DCF1F1C99BBB25621FC39B896FF9BD9413A2845363A9DCD310C32CF98E57"}, - {"02E59853FB29AEDA0FE1C5F19180AD99A12FF2F144670BB2B8BADF09AD812E0A", - "C691294EF67CD04D1B9242AF83DD1421", - "879334DAE3", - "1E17F46A98FEF5CBB40759D95354", - "FED8C3FF27DDF6313AED444A2985B36CBA268AAD6AAC563C0BA28F6DB5DB"}, - {"F6C1FB9B4188F2288FF03BD716023198C3582CF2A037FC2F29760916C2B7FCDB", - "4228DA0678CA3534588859E77DFF014C", - "D8153CAF35539A61DD8D05B3C9B44F01E564FB9348BCD09A1C23B84195171308861058F0A3CD2A55B912A3AAEE06FF4D356C77275828F2157C2FC7C115DA39E443210CCC56BEDB0CC99BBFB227ABD5CC454F4E7F547C7378A659EEB6A7E809101A84F866503CB18D4484E1FA09B3EC7FC75EB2E35270800AA7", - "23B660A779AD285704B12EC1C580387A47BEC7B00D452C6570", - "5AA642BBABA8E49849002A2FAF31DB8FC7773EFDD656E469CEC19B3206D4174C9A263D0A05484261F6"}, - {"8FF6086F1FADB9A3FBE245EAC52640C43B39D43F89526BB5A6EBA47710931446", - "943188480C99437495958B0AE4831AA9", - "AD5CD0BDA426F6EBA23C8EB23DC73FF9FEC173355EDBD6C9344C4C4383F211888F7CE6B29899A6801DF6B38651A7C77150941A", - "80CD5EA8D7F81DDF5070B934937912E8F541A5301877528EB41AB60C020968D459960ED8FB73083329841A", - "ABAE8EB7F36FCA2362551E72DAC890BA1BB6794797E0FC3B67426EC9372726ED4725D379EA0AC9147E48DCD0005C502863C2C5358A38817C8264B5"}, - {"A083B54E6B1FE01B65D42FCD248F97BB477A41462BBFE6FD591006C022C8FD84", - "B0490F5BD68A52459556B3749ACDF40E", - "8892E047DA5CFBBDF7F3CFCBD1BD21C6D4C80774B1826999234394BD3E513CC7C222BB40E1E3140A152F19B3802F0D036C24A590512AD0E8", - "D7B15752789DC94ED0F36778A5C7BBB207BEC32BAC66E702B39966F06E381E090C6757653C3D26A81EC6AD6C364D66867A334C91BB0B8A8A4B6EACDF0783D09010AEBA2DD2062308FE99CC1F", - "C071280A732ADC93DF272BF1E613B2BB7D46FC6665EF2DC1671F3E211D6BDE1D6ADDD28DF3AA2E47053FC8BB8AE9271EC8BC8B2CFFA320D225B451685B6D23ACEFDD241FE284F8ADC8DB07F456985B14330BBB66E0FB212213E05B3E"}, -} diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go deleted file mode 100644 index a6bdf51232e..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright (C) 2019 ProtonTech AG -// This file contains necessary tools for the aex and ocb packages. -// -// These functions SHOULD NOT be used elsewhere, since they are optimized for -// specific input nature in the EAX and OCB modes of operation. - -package byteutil - -// GfnDouble computes 2 * input in the field of 2^n elements. -// The irreducible polynomial in the finite field for n=128 is -// x^128 + x^7 + x^2 + x + 1 (equals 0x87) -// Constant-time execution in order to avoid side-channel attacks -func GfnDouble(input []byte) []byte { - if len(input) != 16 { - panic("Doubling in GFn only implemented for n = 128") - } - // If the first bit is zero, return 2L = L << 1 - // Else return (L << 1) xor 0^120 10000111 - shifted := ShiftBytesLeft(input) - shifted[15] ^= ((input[0] >> 7) * 0x87) - return shifted -} - -// ShiftBytesLeft outputs the byte array corresponding to x << 1 in binary. -func ShiftBytesLeft(x []byte) []byte { - l := len(x) - dst := make([]byte, l) - for i := 0; i < l-1; i++ { - dst[i] = (x[i] << 1) | (x[i+1] >> 7) - } - dst[l-1] = x[l-1] << 1 - return dst -} - -// ShiftNBytesLeft puts in dst the byte array corresponding to x << n in binary. -func ShiftNBytesLeft(dst, x []byte, n int) { - // Erase first n / 8 bytes - copy(dst, x[n/8:]) - - // Shift the remaining n % 8 bits - bits := uint(n % 8) - l := len(dst) - for i := 0; i < l-1; i++ { - dst[i] = (dst[i] << bits) | (dst[i+1] >> uint(8 - bits)) - } - dst[l-1] = dst[l-1] << bits - - // Append trailing zeroes - dst = append(dst, make([]byte, n/8)...) -} - -// XorBytesMut assumes equal input length, replaces X with X XOR Y -func XorBytesMut(X, Y []byte) { - for i := 0; i < len(X); i++ { - X[i] ^= Y[i] - } -} - - -// XorBytes assumes equal input length, puts X XOR Y into Z -func XorBytes(Z, X, Y []byte) { - for i := 0; i < len(X); i++ { - Z[i] = X[i] ^ Y[i] - } -} - -// RightXor XORs smaller input (assumed Y) at the right of the larger input (assumed X) -func RightXor(X, Y []byte) []byte { - offset := len(X) - len(Y) - xored := make([]byte, len(X)); - copy(xored, X) - for i := 0; i < len(Y); i++ { - xored[offset + i] ^= Y[i] - } - return xored -} - -// SliceForAppend takes a slice and a requested number of bytes. It returns a -// slice with the contents of the given slice followed by that many bytes and a -// second slice that aliases into it and contains only the extra bytes. If the -// original slice has sufficient capacity then no allocation is performed. -func SliceForAppend(in []byte, n int) (head, tail []byte) { - if total := len(in) + n; cap(in) >= total { - head = in[:total] - } else { - head = make([]byte, total) - copy(head, in) - } - tail = head[len(in):] - return -} - diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go deleted file mode 100644 index 7f78cfa759b..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go +++ /dev/null @@ -1,317 +0,0 @@ -// Copyright (C) 2019 ProtonTech AG - -// Package ocb provides an implementation of the OCB (offset codebook) mode of -// operation, as described in RFC-7253 of the IRTF and in Rogaway, Bellare, -// Black and Krovetz - OCB: A BLOCK-CIPHER MODE OF OPERATION FOR EFFICIENT -// AUTHENTICATED ENCRYPTION (2003). -// Security considerations (from RFC-7253): A private key MUST NOT be used to -// encrypt more than 2^48 blocks. Tag length should be at least 12 bytes (a -// brute-force forging adversary succeeds after 2^{tag length} attempts). A -// single key SHOULD NOT be used to decrypt ciphertext with different tag -// lengths. Nonces need not be secret, but MUST NOT be reused. -// This package only supports underlying block ciphers with 128-bit blocks, -// such as AES-{128, 192, 256}, but may be extended to other sizes. -package ocb - -import ( - "bytes" - "crypto/cipher" - "crypto/subtle" - "errors" - "github.com/ProtonMail/go-crypto/internal/byteutil" - "math/bits" -) - -type ocb struct { - block cipher.Block - tagSize int - nonceSize int - mask mask - // Optimized en/decrypt: For each nonce N used to en/decrypt, the 'Ktop' - // internal variable can be reused for en/decrypting with nonces sharing - // all but the last 6 bits with N. The prefix of the first nonce used to - // compute the new Ktop, and the Ktop value itself, are stored in - // reusableKtop. If using incremental nonces, this saves one block cipher - // call every 63 out of 64 OCB encryptions, and stores one nonce and one - // output of the block cipher in memory only. - reusableKtop reusableKtop -} - -type mask struct { - // L_*, L_$, (L_i)_{i ∈ N} - lAst []byte - lDol []byte - L [][]byte -} - -type reusableKtop struct { - noncePrefix []byte - Ktop []byte -} - -const ( - defaultTagSize = 16 - defaultNonceSize = 15 -) - -const ( - enc = iota - dec -) - -func (o *ocb) NonceSize() int { - return o.nonceSize -} - -func (o *ocb) Overhead() int { - return o.tagSize -} - -// NewOCB returns an OCB instance with the given block cipher and default -// tag and nonce sizes. -func NewOCB(block cipher.Block) (cipher.AEAD, error) { - return NewOCBWithNonceAndTagSize(block, defaultNonceSize, defaultTagSize) -} - -// NewOCBWithNonceAndTagSize returns an OCB instance with the given block -// cipher, nonce length, and tag length. Panics on zero nonceSize and -// exceedingly long tag size. -// -// It is recommended to use at least 12 bytes as tag length. -func NewOCBWithNonceAndTagSize( - block cipher.Block, nonceSize, tagSize int) (cipher.AEAD, error) { - if block.BlockSize() != 16 { - return nil, ocbError("Block cipher must have 128-bit blocks") - } - if nonceSize < 1 { - return nil, ocbError("Incorrect nonce length") - } - if nonceSize >= block.BlockSize() { - return nil, ocbError("Nonce length exceeds blocksize - 1") - } - if tagSize > block.BlockSize() { - return nil, ocbError("Custom tag length exceeds blocksize") - } - return &ocb{ - block: block, - tagSize: tagSize, - nonceSize: nonceSize, - mask: initializeMaskTable(block), - reusableKtop: reusableKtop{ - noncePrefix: nil, - Ktop: nil, - }, - }, nil -} - -func (o *ocb) Seal(dst, nonce, plaintext, adata []byte) []byte { - if len(nonce) > o.nonceSize { - panic("crypto/ocb: Incorrect nonce length given to OCB") - } - ret, out := byteutil.SliceForAppend(dst, len(plaintext)+o.tagSize) - o.crypt(enc, out, nonce, adata, plaintext) - return ret -} - -func (o *ocb) Open(dst, nonce, ciphertext, adata []byte) ([]byte, error) { - if len(nonce) > o.nonceSize { - panic("Nonce too long for this instance") - } - if len(ciphertext) < o.tagSize { - return nil, ocbError("Ciphertext shorter than tag length") - } - sep := len(ciphertext) - o.tagSize - ret, out := byteutil.SliceForAppend(dst, len(ciphertext)) - ciphertextData := ciphertext[:sep] - tag := ciphertext[sep:] - o.crypt(dec, out, nonce, adata, ciphertextData) - if subtle.ConstantTimeCompare(ret[sep:], tag) == 1 { - ret = ret[:sep] - return ret, nil - } - for i := range out { - out[i] = 0 - } - return nil, ocbError("Tag authentication failed") -} - -// On instruction enc (resp. dec), crypt is the encrypt (resp. decrypt) -// function. It returns the resulting plain/ciphertext with the tag appended. -func (o *ocb) crypt(instruction int, Y, nonce, adata, X []byte) []byte { - // - // Consider X as a sequence of 128-bit blocks - // - // Note: For encryption (resp. decryption), X is the plaintext (resp., the - // ciphertext without the tag). - blockSize := o.block.BlockSize() - - // - // Nonce-dependent and per-encryption variables - // - // Zero out the last 6 bits of the nonce into truncatedNonce to see if Ktop - // is already computed. - truncatedNonce := make([]byte, len(nonce)) - copy(truncatedNonce, nonce) - truncatedNonce[len(truncatedNonce)-1] &= 192 - Ktop := make([]byte, blockSize) - if bytes.Equal(truncatedNonce, o.reusableKtop.noncePrefix) { - Ktop = o.reusableKtop.Ktop - } else { - // Nonce = num2str(TAGLEN mod 128, 7) || zeros(120 - bitlen(N)) || 1 || N - paddedNonce := append(make([]byte, blockSize-1-len(nonce)), 1) - paddedNonce = append(paddedNonce, truncatedNonce...) - paddedNonce[0] |= byte(((8 * o.tagSize) % (8 * blockSize)) << 1) - // Last 6 bits of paddedNonce are already zero. Encrypt into Ktop - paddedNonce[blockSize-1] &= 192 - Ktop = paddedNonce - o.block.Encrypt(Ktop, Ktop) - o.reusableKtop.noncePrefix = truncatedNonce - o.reusableKtop.Ktop = Ktop - } - - // Stretch = Ktop || ((lower half of Ktop) XOR (lower half of Ktop << 8)) - xorHalves := make([]byte, blockSize/2) - byteutil.XorBytes(xorHalves, Ktop[:blockSize/2], Ktop[1:1+blockSize/2]) - stretch := append(Ktop, xorHalves...) - bottom := int(nonce[len(nonce)-1] & 63) - offset := make([]byte, len(stretch)) - byteutil.ShiftNBytesLeft(offset, stretch, bottom) - offset = offset[:blockSize] - - // - // Process any whole blocks - // - // Note: For encryption Y is ciphertext || tag, for decryption Y is - // plaintext || tag. - checksum := make([]byte, blockSize) - m := len(X) / blockSize - for i := 0; i < m; i++ { - index := bits.TrailingZeros(uint(i + 1)) - if len(o.mask.L)-1 < index { - o.mask.extendTable(index) - } - byteutil.XorBytesMut(offset, o.mask.L[bits.TrailingZeros(uint(i+1))]) - blockX := X[i*blockSize : (i+1)*blockSize] - blockY := Y[i*blockSize : (i+1)*blockSize] - byteutil.XorBytes(blockY, blockX, offset) - switch instruction { - case enc: - o.block.Encrypt(blockY, blockY) - byteutil.XorBytesMut(blockY, offset) - byteutil.XorBytesMut(checksum, blockX) - case dec: - o.block.Decrypt(blockY, blockY) - byteutil.XorBytesMut(blockY, offset) - byteutil.XorBytesMut(checksum, blockY) - } - } - // - // Process any final partial block and compute raw tag - // - tag := make([]byte, blockSize) - if len(X)%blockSize != 0 { - byteutil.XorBytesMut(offset, o.mask.lAst) - pad := make([]byte, blockSize) - o.block.Encrypt(pad, offset) - chunkX := X[blockSize*m:] - chunkY := Y[blockSize*m : len(X)] - byteutil.XorBytes(chunkY, chunkX, pad[:len(chunkX)]) - // P_* || bit(1) || zeroes(127) - len(P_*) - switch instruction { - case enc: - paddedY := append(chunkX, byte(128)) - paddedY = append(paddedY, make([]byte, blockSize-len(chunkX)-1)...) - byteutil.XorBytesMut(checksum, paddedY) - case dec: - paddedX := append(chunkY, byte(128)) - paddedX = append(paddedX, make([]byte, blockSize-len(chunkY)-1)...) - byteutil.XorBytesMut(checksum, paddedX) - } - byteutil.XorBytes(tag, checksum, offset) - byteutil.XorBytesMut(tag, o.mask.lDol) - o.block.Encrypt(tag, tag) - byteutil.XorBytesMut(tag, o.hash(adata)) - copy(Y[blockSize*m+len(chunkY):], tag[:o.tagSize]) - } else { - byteutil.XorBytes(tag, checksum, offset) - byteutil.XorBytesMut(tag, o.mask.lDol) - o.block.Encrypt(tag, tag) - byteutil.XorBytesMut(tag, o.hash(adata)) - copy(Y[blockSize*m:], tag[:o.tagSize]) - } - return Y -} - -// This hash function is used to compute the tag. Per design, on empty input it -// returns a slice of zeros, of the same length as the underlying block cipher -// block size. -func (o *ocb) hash(adata []byte) []byte { - // - // Consider A as a sequence of 128-bit blocks - // - A := make([]byte, len(adata)) - copy(A, adata) - blockSize := o.block.BlockSize() - - // - // Process any whole blocks - // - sum := make([]byte, blockSize) - offset := make([]byte, blockSize) - m := len(A) / blockSize - for i := 0; i < m; i++ { - chunk := A[blockSize*i : blockSize*(i+1)] - index := bits.TrailingZeros(uint(i + 1)) - // If the mask table is too short - if len(o.mask.L)-1 < index { - o.mask.extendTable(index) - } - byteutil.XorBytesMut(offset, o.mask.L[index]) - byteutil.XorBytesMut(chunk, offset) - o.block.Encrypt(chunk, chunk) - byteutil.XorBytesMut(sum, chunk) - } - - // - // Process any final partial block; compute final hash value - // - if len(A)%blockSize != 0 { - byteutil.XorBytesMut(offset, o.mask.lAst) - // Pad block with 1 || 0 ^ 127 - bitlength(a) - ending := make([]byte, blockSize-len(A)%blockSize) - ending[0] = 0x80 - encrypted := append(A[blockSize*m:], ending...) - byteutil.XorBytesMut(encrypted, offset) - o.block.Encrypt(encrypted, encrypted) - byteutil.XorBytesMut(sum, encrypted) - } - return sum -} - -func initializeMaskTable(block cipher.Block) mask { - // - // Key-dependent variables - // - lAst := make([]byte, block.BlockSize()) - block.Encrypt(lAst, lAst) - lDol := byteutil.GfnDouble(lAst) - L := make([][]byte, 1) - L[0] = byteutil.GfnDouble(lDol) - - return mask{ - lAst: lAst, - lDol: lDol, - L: L, - } -} - -// Extends the L array of mask m up to L[limit], with L[i] = GfnDouble(L[i-1]) -func (m *mask) extendTable(limit int) { - for i := len(m.L); i <= limit; i++ { - m.L = append(m.L, byteutil.GfnDouble(m.L[i-1])) - } -} - -func ocbError(err string) error { - return errors.New("crypto/ocb: " + err) -} diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/ocb/random_vectors.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/ocb/random_vectors.go deleted file mode 100644 index 0efaf344fd5..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/ocb/random_vectors.go +++ /dev/null @@ -1,136 +0,0 @@ -// In the test vectors provided by RFC 7253, the "bottom" -// internal variable, which defines "offset" for the first time, does not -// exceed 15. However, it can attain values up to 63. - -// These vectors include key length in {128, 192, 256}, tag size 128, and -// random nonce, header, and plaintext lengths. - -// This file was automatically generated. - -package ocb - -var randomVectors = []struct { - key, nonce, header, plaintext, ciphertext string -}{ - - {"9438C5D599308EAF13F800D2D31EA7F0", - "C38EE4801BEBFFA1CD8635BE", - "0E507B7DADD8A98CDFE272D3CB6B3E8332B56AE583FB049C0874D4200BED16BD1A044182434E9DA0E841F182DFD5B3016B34641CED0784F1745F63AB3D0DA22D3351C9EF9A658B8081E24498EBF61FCE40DA6D8E184536", - "962D227786FB8913A8BAD5DC3250", - "EEDEF5FFA5986D1E3BF86DDD33EF9ADC79DCA06E215FA772CCBA814F63AD"}, - {"BA7DE631C7D6712167C6724F5B9A2B1D", - "35263EBDA05765DC0E71F1F5", - "0103257B4224507C0242FEFE821EA7FA42E0A82863E5F8B68F7D881B4B44FA428A2B6B21D2F591260802D8AB6D83", - "9D6D1FC93AE8A64E7889B7B2E3521EFA9B920A8DDB692E6F833DDC4A38AFA535E5E2A3ED82CB7E26404AB86C54D01C4668F28398C2DF33D5D561CBA1C8DCFA7A912F5048E545B59483C0E3221F54B14DAA2E4EB657B3BEF9554F34CAD69B2724AE962D3D8A", - "E93852D1985C5E775655E937FA79CE5BF28A585F2AF53A5018853B9634BE3C84499AC0081918FDCE0624494D60E25F76ACD6853AC7576E3C350F332249BFCABD4E73CEABC36BE4EDDA40914E598AE74174A0D7442149B26990899491BDDFE8FC54D6C18E83AE9E9A6FFBF5D376565633862EEAD88D"}, - {"2E74B25289F6FD3E578C24866E9C72A5", - "FD912F15025AF8414642BA1D1D", - "FB5FB8C26F365EEDAB5FE260C6E3CCD27806729C8335F146063A7F9EA93290E56CF84576EB446350D22AD730547C267B1F0BBB97EB34E1E2C41A", - "6C092EBF78F76EE8C1C6E592277D9545BA16EDB67BC7D8480B9827702DC2F8A129E2B08A2CE710CA7E1DA45CE162BB6CD4B512E632116E2211D3C90871EFB06B8D4B902681C7FB", - "6AC0A77F26531BF4F354A1737F99E49BE32ECD909A7A71AD69352906F54B08A9CE9B8CA5D724CBFFC5673437F23F630697F3B84117A1431D6FA8CC13A974FB4AD360300522E09511B99E71065D5AC4BBCB1D791E864EF4"}, - {"E7EC507C802528F790AFF5303A017B17", - "4B97A7A568940A9E3CE7A99E93031E", - "28349BDC5A09390C480F9B8AA3EDEA3DDB8B9D64BCA322C570B8225DF0E31190DAB25A4014BA39519E02ABFB12B89AA28BBFD29E486E7FB28734258C817B63CED9912DBAFEBB93E2798AB2890DE3B0ACFCFF906AB15563EF7823CE83D27CDB251195E22BD1337BCBDE65E7C2C427321C463C2777BFE5AEAA", - "9455B3EA706B74", - "7F33BA3EA848D48A96B9530E26888F43EBD4463C9399B6"}, - {"6C928AA3224736F28EE7378DE0090191", - "8936138E2E4C6A13280017A1622D", - "6202717F2631565BDCDC57C6584543E72A7C8BD444D0D108ED35069819633C", - "DA0691439E5F035F3E455269D14FE5C201C8C9B0A3FE2D3F86BCC59387C868FE65733D388360B31E3CE28B4BF6A8BE636706B536D5720DB66B47CF1C7A5AFD6F61E0EF90F1726D6B0E169F9A768B2B7AE4EE00A17F630AC905FCAAA1B707FFF25B3A1AAE83B504837C64A5639B2A34002B300EC035C9B43654DA55", - "B8804D182AB0F0EEB464FA7BD1329AD6154F982013F3765FEDFE09E26DAC078C9C1439BFC1159D6C02A25E3FF83EF852570117B315852AD5EE20E0FA3AA0A626B0E43BC0CEA38B44579DD36803455FB46989B90E6D229F513FD727AF8372517E9488384C515D6067704119C931299A0982EDDFB9C2E86A90C450C077EB222511EC9CCABC9FCFDB19F70088"}, - {"ECEA315CA4B3F425B0C9957A17805EA4", - "664CDAE18403F4F9BA13015A44FC", - "642AFB090D6C6DB46783F08B01A3EF2A8FEB5736B531EAC226E7888FCC8505F396818F83105065FACB3267485B9E5E4A0261F621041C08FCCB2A809A49AB5252A91D0971BCC620B9D614BD77E57A0EED2FA5", - "6852C31F8083E20E364CEA21BB7854D67CEE812FE1C9ED2425C0932A90D3780728D1BB", - "2ECEF962A9695A463ADABB275BDA9FF8B2BA57AEC2F52EFFB700CD9271A74D2A011C24AEA946051BD6291776429B7E681BA33E"}, - {"4EE616C4A58AAA380878F71A373461F6", - "91B8C9C176D9C385E9C47E52", - "CDA440B7F9762C572A718AC754EDEECC119E5EE0CCB9FEA4FFB22EEE75087C032EBF3DA9CDD8A28CC010B99ED45143B41A4BA50EA2A005473F89639237838867A57F23B0F0ED3BF22490E4501DAC9C658A9B9F", - "D6E645FA9AE410D15B8123FD757FA356A8DBE9258DDB5BE88832E615910993F497EC", - "B70ED7BF959FB2AAED4F36174A2A99BFB16992C8CDF369C782C4DB9C73DE78C5DB8E0615F647243B97ACDB24503BC9CADC48"}, - {"DCD475773136C830D5E3D0C5FE05B7FF", - "BB8E1FBB483BE7616A922C4A", - "36FEF2E1CB29E76A6EA663FC3AF66ECD7404F466382F7B040AABED62293302B56E8783EF7EBC21B4A16C3E78A7483A0A403F253A2CDC5BBF79DC3DAE6C73F39A961D8FBBE8D41B", - "441E886EA38322B2437ECA7DEB5282518865A66780A454E510878E61BFEC3106A3CD93D2A02052E6F9E1832F9791053E3B76BF4C07EFDD6D4106E3027FABB752E60C1AA425416A87D53938163817A1051EBA1D1DEEB4B9B25C7E97368B52E5911A31810B0EC5AF547559B6142D9F4C4A6EF24A4CF75271BF9D48F62B", - "1BE4DD2F4E25A6512C2CC71D24BBB07368589A94C2714962CD0ACE5605688F06342587521E75F0ACAFFD86212FB5C34327D238DB36CF2B787794B9A4412E7CD1410EA5DDD2450C265F29CF96013CD213FD2880657694D718558964BC189B4A84AFCF47EB012935483052399DBA5B088B0A0477F20DFE0E85DCB735E21F22A439FB837DD365A93116D063E607"}, - {"3FBA2B3D30177FFE15C1C59ED2148BB2C091F5615FBA7C07", - "FACF804A4BEBF998505FF9DE", - "8213B9263B2971A5BDA18DBD02208EE1", - "15B323926993B326EA19F892D704439FC478828322AF72118748284A1FD8A6D814E641F70512FD706980337379F31DC63355974738D7FEA87AD2858C0C2EBBFBE74371C21450072373C7B651B334D7C4D43260B9D7CCD3AF9EDB", - "6D35DC1469B26E6AAB26272A41B46916397C24C485B61162E640A062D9275BC33DDCFD3D9E1A53B6C8F51AC89B66A41D59B3574197A40D9B6DCF8A4E2A001409C8112F16B9C389E0096179DB914E05D6D11ED0005AD17E1CE105A2F0BAB8F6B1540DEB968B7A5428FF44"}, - {"53B52B8D4D748BCDF1DDE68857832FA46227FA6E2F32EFA1", - "0B0EF53D4606B28D1398355F", - "F23882436349094AF98BCACA8218E81581A043B19009E28EFBF2DE37883E04864148CC01D240552CA8844EC1456F42034653067DA67E80F87105FD06E14FF771246C9612867BE4D215F6D761", - "F15030679BD4088D42CAC9BF2E9606EAD4798782FA3ED8C57EBE7F84A53236F51B25967C6489D0CD20C9EEA752F9BC", - "67B96E2D67C3729C96DAEAEDF821D61C17E648643A2134C5621FEC621186915AD80864BFD1EB5B238BF526A679385E012A457F583AFA78134242E9D9C1B4E4"}, - {"0272DD80F23399F49BFC320381A5CD8225867245A49A7D41", - "5C83F4896D0738E1366B1836", - "69B0337289B19F73A12BAEEA857CCAF396C11113715D9500CCCF48BA08CFF12BC8B4BADB3084E63B85719DB5058FA7C2C11DEB096D7943CFA7CAF5", - "C01AD10FC8B562CD17C7BC2FAB3E26CBDFF8D7F4DEA816794BBCC12336991712972F52816AABAB244EB43B0137E2BAC1DD413CE79531E78BEF782E6B439612BB3AEF154DE3502784F287958EBC159419F9EBA27916A28D6307324129F506B1DE80C1755A929F87", - "FEFE52DD7159C8DD6E8EC2D3D3C0F37AB6CB471A75A071D17EC4ACDD8F3AA4D7D4F7BB559F3C09099E3D9003E5E8AA1F556B79CECDE66F85B08FA5955E6976BF2695EA076388A62D2AD5BAB7CBF1A7F3F4C8D5CDF37CDE99BD3E30B685D9E5EEE48C7C89118EF4878EB89747F28271FA2CC45F8E9E7601"}, - {"3EEAED04A455D6E5E5AB53CFD5AFD2F2BC625C7BF4BE49A5", - "36B88F63ADBB5668588181D774", - "D367E3CB3703E762D23C6533188EF7028EFF9D935A3977150361997EC9DEAF1E4794BDE26AA8B53C124980B1362EC86FCDDFC7A90073171C1BAEE351A53234B86C66E8AB92FAE99EC6967A6D3428892D80", - "573454C719A9A55E04437BF7CBAAF27563CCCD92ADD5E515CD63305DFF0687E5EEF790C5DCA5C0033E9AB129505E2775438D92B38F08F3B0356BA142C6F694", - "E9F79A5B432D9E682C9AAA5661CFC2E49A0FCB81A431E54B42EB73DD3BED3F377FEC556ABA81624BA64A5D739AD41467460088F8D4F442180A9382CA635745473794C382FCDDC49BA4EB6D8A44AE3C"}, - {"B695C691538F8CBD60F039D0E28894E3693CC7C36D92D79D", - "BC099AEB637361BAC536B57618", - "BFFF1A65AE38D1DC142C71637319F5F6508E2CB33C9DCB94202B359ED5A5ED8042E7F4F09231D32A7242976677E6F4C549BF65FADC99E5AF43F7A46FD95E16C2", - "081DF3FD85B415D803F0BE5AC58CFF0023FDDED99788296C3731D8", - "E50C64E3614D94FE69C47092E46ACC9957C6FEA2CCBF96BC62FBABE7424753C75F9C147C42AE26FE171531"}, - {"C9ACBD2718F0689A1BE9802A551B6B8D9CF5614DAF5E65ED", - "B1B0AAF373B8B026EB80422051D8", - "6648C0E61AC733C76119D23FB24548D637751387AA2EAE9D80E912B7BD486CAAD9EAF4D7A5FE2B54AAD481E8EC94BB4D558000896E2010462B70C9FED1E7273080D1", - "189F591F6CB6D59AFEDD14C341741A8F1037DC0DF00FC57CE65C30F49E860255CEA5DC6019380CC0FE8880BC1A9E685F41C239C38F36E3F2A1388865C5C311059C0A", - "922A5E949B61D03BE34AB5F4E58607D4504EA14017BB363DAE3C873059EA7A1C77A746FB78981671D26C2CF6D9F24952D510044CE02A10177E9DB42D0145211DFE6E84369C5E3BC2669EAB4147B2822895F9"}, - {"7A832BD2CF5BF4919F353CE2A8C86A5E406DA2D52BE16A72", - "2F2F17CECF7E5A756D10785A3CB9DB", - "61DA05E3788CC2D8405DBA70C7A28E5AF699863C9F72E6C6770126929F5D6FA267F005EBCF49495CB46400958A3AE80D1289D1C671", - "44E91121195A41AF14E8CFDBD39A4B517BE0DF1A72977ED8A3EEF8EEDA1166B2EB6DB2C4AE2E74FA0F0C74537F659BFBD141E5DDEC67E64EDA85AABD3F52C85A785B9FB3CECD70E7DF", - "BEDF596EA21288D2B84901E188F6EE1468B14D5161D3802DBFE00D60203A24E2AB62714BF272A45551489838C3A7FEAADC177B591836E73684867CCF4E12901DCF2064058726BBA554E84ADC5136F507E961188D4AF06943D3"}, - {"1508E8AE9079AA15F1CEC4F776B4D11BCCB061B58AA56C18", - "BCA625674F41D1E3AB47672DC0C3", - "8B12CF84F16360F0EAD2A41BC021530FFCEC7F3579CAE658E10E2D3D81870F65AFCED0C77C6C4C6E6BA424FF23088C796BA6195ABA35094BF1829E089662E7A95FC90750AE16D0C8AFA55DAC789D7735B970B58D4BE7CEC7341DA82A0179A01929C27A59C5063215B859EA43", - "E525422519ECE070E82C", - "B47BC07C3ED1C0A43BA52C43CBACBCDBB29CAF1001E09FDF7107"}, - {"7550C2761644E911FE9ADD119BAC07376BEA442845FEAD876D7E7AC1B713E464", - "36D2EC25ADD33CDEDF495205BBC923", - "7FCFE81A3790DE97FFC3DE160C470847EA7E841177C2F759571CBD837EA004A6CA8C6F4AEBFF2E9FD552D73EB8A30705D58D70C0B67AEEA280CBBF0A477358ACEF1E7508F2735CD9A0E4F9AC92B8C008F575D3B6278F1C18BD01227E3502E5255F3AB1893632AD00C717C588EF652A51A43209E7EE90", - "2B1A62F8FDFAA3C16470A21AD307C9A7D03ADE8EF72C69B06F8D738CDE578D7AEFD0D40BD9C022FB9F580DF5394C998ACCCEFC5471A3996FB8F1045A81FDC6F32D13502EA65A211390C8D882B8E0BEFD8DD8CBEF51D1597B124E9F7F", - "C873E02A22DB89EB0787DB6A60B99F7E4A0A085D5C4232A81ADCE2D60AA36F92DDC33F93DD8640AC0E08416B187FB382B3EC3EE85A64B0E6EE41C1366A5AD2A282F66605E87031CCBA2FA7B2DA201D975994AADE3DD1EE122AE09604AD489B84BF0C1AB7129EE16C6934850E"}, - {"A51300285E554FDBDE7F771A9A9A80955639DD87129FAEF74987C91FB9687C71", - "81691D5D20EC818FCFF24B33DECC", - "C948093218AA9EB2A8E44A87EEA73FC8B6B75A196819A14BD83709EA323E8DF8B491045220E1D88729A38DBCFFB60D3056DAD4564498FD6574F74512945DEB34B69329ACED9FFC05D5D59DFCD5B973E2ACAFE6AD1EF8BBBC49351A2DD12508ED89ED", - "EB861165DAF7625F827C6B574ED703F03215", - "C6CD1CE76D2B3679C1B5AA1CFD67CCB55444B6BFD3E22C81CBC9BB738796B83E54E3"}, - {"8CE0156D26FAEB7E0B9B800BBB2E9D4075B5EAC5C62358B0E7F6FCE610223282", - "D2A7B94DD12CDACA909D3AD7", - "E021A78F374FC271389AB9A3E97077D755", - "7C26000B58929F5095E1CEE154F76C2A299248E299F9B5ADE6C403AA1FD4A67FD4E0232F214CE7B919EE7A1027D2B76C57475715CD078461", - "C556FB38DF069B56F337B5FF5775CE6EAA16824DFA754F20B78819028EA635C3BB7AA731DE8776B2DCB67DCA2D33EEDF3C7E52EA450013722A41755A0752433ED17BDD5991AAE77A"}, - {"1E8000A2CE00A561C9920A30BF0D7B983FEF8A1014C8F04C35CA6970E6BA02BD", - "65ED3D63F79F90BBFD19775E", - "336A8C0B7243582A46B221AA677647FCAE91", - "134A8B34824A290E7B", - "914FBEF80D0E6E17F8BDBB6097EBF5FBB0554952DC2B9E5151"}, - {"53D5607BBE690B6E8D8F6D97F3DF2BA853B682597A214B8AA0EA6E598650AF15", - "C391A856B9FE234E14BA1AC7BB40FF", - "479682BC21349C4BE1641D5E78FE2C79EC1B9CF5470936DCAD9967A4DCD7C4EFADA593BC9EDE71E6A08829B8580901B61E274227E9D918502DE3", - "EAD154DC09C5E26C5D26FF33ED148B27120C7F2C23225CC0D0631B03E1F6C6D96FEB88C1A4052ACB4CE746B884B6502931F407021126C6AAB8C514C077A5A38438AE88EE", - "938821286EBB671D999B87C032E1D6055392EB564E57970D55E545FC5E8BAB90E6E3E3C0913F6320995FC636D72CD9919657CC38BD51552F4A502D8D1FE56DB33EBAC5092630E69EBB986F0E15CEE9FC8C052501"}, - {"294362FCC984F440CEA3E9F7D2C06AF20C53AAC1B3738CA2186C914A6E193ABB", - "B15B61C8BB39261A8F55AB178EC3", - "D0729B6B75BB", - "2BD089ADCE9F334BAE3B065996C7D616DD0C27DF4218DCEEA0FBCA0F968837CE26B0876083327E25681FDDD620A32EC0DA12F73FAE826CC94BFF2B90A54D2651", - "AC94B25E4E21DE2437B806966CCD5D9385EF0CD4A51AB9FA6DE675C7B8952D67802E9FEC1FDE9F5D1EAB06057498BC0EEA454804FC9D2068982A3E24182D9AC2E7AB9994DDC899A604264583F63D066B"}, - {"959DBFEB039B1A5B8CE6A44649B602AAA5F98A906DB96143D202CD2024F749D9", - "01D7BDB1133E9C347486C1EFA6", - "F3843955BD741F379DD750585EDC55E2CDA05CCBA8C1F4622AC2FE35214BC3A019B8BD12C4CC42D9213D1E1556941E8D8450830287FFB3B763A13722DD4140ED9846FB5FFF745D7B0B967D810A068222E10B259AF1D392035B0D83DC1498A6830B11B2418A840212599171E0258A1C203B05362978", - "A21811232C950FA8B12237C2EBD6A7CD2C3A155905E9E0C7C120", - "63C1CE397B22F1A03F1FA549B43178BC405B152D3C95E977426D519B3DFCA28498823240592B6EEE7A14"}, - {"096AE499F5294173F34FF2B375F0E5D5AB79D0D03B33B1A74D7D576826345DF4", - "0C52B3D11D636E5910A4DD76D32C", - "229E9ECA3053789E937447BC719467075B6138A142DA528DA8F0CF8DDF022FD9AF8E74779BA3AC306609", - "8B7A00038783E8BAF6EDEAE0C4EAB48FC8FD501A588C7E4A4DB71E3604F2155A97687D3D2FFF8569261375A513CF4398CE0F87CA1658A1050F6EF6C4EA3E25", - "C20B6CF8D3C8241825FD90B2EDAC7593600646E579A8D8DAAE9E2E40C3835FE801B2BE4379131452BC5182C90307B176DFBE2049544222FE7783147B690774F6D9D7CEF52A91E61E298E9AA15464AC"}, -} diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_a.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_a.go deleted file mode 100644 index 843085589c8..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_a.go +++ /dev/null @@ -1,78 +0,0 @@ -package ocb - -import ( - "encoding/hex" -) - -// Test vectors from https://tools.ietf.org/html/rfc7253. Note that key is -// shared accross tests. -var testKey, _ = hex.DecodeString("000102030405060708090A0B0C0D0E0F") - -var rfc7253testVectors = []struct { - nonce, header, plaintext, ciphertext string -}{ - {"BBAA99887766554433221100", - "", - "", - "785407BFFFC8AD9EDCC5520AC9111EE6"}, - {"BBAA99887766554433221101", - "0001020304050607", - "0001020304050607", - "6820B3657B6F615A5725BDA0D3B4EB3A257C9AF1F8F03009"}, - {"BBAA99887766554433221102", - "0001020304050607", - "", - "81017F8203F081277152FADE694A0A00"}, - {"BBAA99887766554433221103", - "", - "0001020304050607", - "45DD69F8F5AAE72414054CD1F35D82760B2CD00D2F99BFA9"}, - {"BBAA99887766554433221104", - "000102030405060708090A0B0C0D0E0F", - "000102030405060708090A0B0C0D0E0F", - "571D535B60B277188BE5147170A9A22C3AD7A4FF3835B8C5701C1CCEC8FC3358"}, - {"BBAA99887766554433221105", - "000102030405060708090A0B0C0D0E0F", - "", - "8CF761B6902EF764462AD86498CA6B97"}, - {"BBAA99887766554433221106", - "", - "000102030405060708090A0B0C0D0E0F", - "5CE88EC2E0692706A915C00AEB8B2396F40E1C743F52436BDF06D8FA1ECA343D"}, - {"BBAA99887766554433221107", - "000102030405060708090A0B0C0D0E0F1011121314151617", - "000102030405060708090A0B0C0D0E0F1011121314151617", - "1CA2207308C87C010756104D8840CE1952F09673A448A122C92C62241051F57356D7F3C90BB0E07F"}, - {"BBAA99887766554433221108", - "000102030405060708090A0B0C0D0E0F1011121314151617", - "", - "6DC225A071FC1B9F7C69F93B0F1E10DE"}, - {"BBAA99887766554433221109", - "", - "000102030405060708090A0B0C0D0E0F1011121314151617", - "221BD0DE7FA6FE993ECCD769460A0AF2D6CDED0C395B1C3CE725F32494B9F914D85C0B1EB38357FF"}, - {"BBAA9988776655443322110A", - "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F", - "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F", - "BD6F6C496201C69296C11EFD138A467ABD3C707924B964DEAFFC40319AF5A48540FBBA186C5553C68AD9F592A79A4240"}, - {"BBAA9988776655443322110B", - "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F", - "", - "FE80690BEE8A485D11F32965BC9D2A32"}, - {"BBAA9988776655443322110C", - "", - "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F", - "2942BFC773BDA23CABC6ACFD9BFD5835BD300F0973792EF46040C53F1432BCDFB5E1DDE3BC18A5F840B52E653444D5DF"}, - {"BBAA9988776655443322110D", - "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", - "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", - "D5CA91748410C1751FF8A2F618255B68A0A12E093FF454606E59F9C1D0DDC54B65E8628E568BAD7AED07BA06A4A69483A7035490C5769E60"}, - {"BBAA9988776655443322110E", - "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", - "", - "C5CD9D1850C141E358649994EE701B68"}, - {"BBAA9988776655443322110F", - "", - "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", - "4412923493C57D5DE0D700F753CCE0D1D2D95060122E9F15A5DDBFC5787E50B5CC55EE507BCB084E479AD363AC366B95A98CA5F3000B1479"}, -} diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_b.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_b.go deleted file mode 100644 index 5dc158f0128..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_b.go +++ /dev/null @@ -1,24 +0,0 @@ -package ocb - -// Second set of test vectors from https://tools.ietf.org/html/rfc7253 -var rfc7253TestVectorTaglen96 = struct { - key, nonce, header, plaintext, ciphertext string -}{"0F0E0D0C0B0A09080706050403020100", - "BBAA9988776655443322110D", - "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", - "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", - "1792A4E31E0755FB03E31B22116E6C2DDF9EFD6E33D536F1A0124B0A55BAE884ED93481529C76B6AD0C515F4D1CDD4FDAC4F02AA"} - -var rfc7253AlgorithmTest = []struct { - KEYLEN, TAGLEN int - OUTPUT string }{ - {128, 128, "67E944D23256C5E0B6C61FA22FDF1EA2"}, - {192, 128, "F673F2C3E7174AAE7BAE986CA9F29E17"}, - {256, 128, "D90EB8E9C977C88B79DD793D7FFA161C"}, - {128, 96, "77A3D8E73589158D25D01209"}, - {192, 96, "05D56EAD2752C86BE6932C5E"}, - {256, 96, "5458359AC23B0CBA9E6330DD"}, - {128, 64, "192C9B7BD90BA06A"}, - {192, 64, "0066BC6E0EF34E24"}, - {256, 64, "7D4EA5D445501CBE"}, - } diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/aes/keywrap/keywrap.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/aes/keywrap/keywrap.go deleted file mode 100644 index 3c6251d1ce6..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/aes/keywrap/keywrap.go +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2014 Matthew Endsley -// All rights reserved -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted providing that the following conditions -// are met: -// 1. Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// 2. Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -// ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS -// OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -// IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -// POSSIBILITY OF SUCH DAMAGE. - -// Package keywrap is an implementation of the RFC 3394 AES key wrapping -// algorithm. This is used in OpenPGP with elliptic curve keys. -package keywrap - -import ( - "crypto/aes" - "encoding/binary" - "errors" -) - -var ( - // ErrWrapPlaintext is returned if the plaintext is not a multiple - // of 64 bits. - ErrWrapPlaintext = errors.New("keywrap: plainText must be a multiple of 64 bits") - - // ErrUnwrapCiphertext is returned if the ciphertext is not a - // multiple of 64 bits. - ErrUnwrapCiphertext = errors.New("keywrap: cipherText must by a multiple of 64 bits") - - // ErrUnwrapFailed is returned if unwrapping a key fails. - ErrUnwrapFailed = errors.New("keywrap: failed to unwrap key") - - // NB: the AES NewCipher call only fails if the key is an invalid length. - - // ErrInvalidKey is returned when the AES key is invalid. - ErrInvalidKey = errors.New("keywrap: invalid AES key") -) - -// Wrap a key using the RFC 3394 AES Key Wrap Algorithm. -func Wrap(key, plainText []byte) ([]byte, error) { - if len(plainText)%8 != 0 { - return nil, ErrWrapPlaintext - } - - c, err := aes.NewCipher(key) - if err != nil { - return nil, ErrInvalidKey - } - - nblocks := len(plainText) / 8 - - // 1) Initialize variables. - var block [aes.BlockSize]byte - // - Set A = IV, an initial value (see 2.2.3) - for ii := 0; ii < 8; ii++ { - block[ii] = 0xA6 - } - - // - For i = 1 to n - // - Set R[i] = P[i] - intermediate := make([]byte, len(plainText)) - copy(intermediate, plainText) - - // 2) Calculate intermediate values. - for ii := 0; ii < 6; ii++ { - for jj := 0; jj < nblocks; jj++ { - // - B = AES(K, A | R[i]) - copy(block[8:], intermediate[jj*8:jj*8+8]) - c.Encrypt(block[:], block[:]) - - // - A = MSB(64, B) ^ t where t = (n*j)+1 - t := uint64(ii*nblocks + jj + 1) - val := binary.BigEndian.Uint64(block[:8]) ^ t - binary.BigEndian.PutUint64(block[:8], val) - - // - R[i] = LSB(64, B) - copy(intermediate[jj*8:jj*8+8], block[8:]) - } - } - - // 3) Output results. - // - Set C[0] = A - // - For i = 1 to n - // - C[i] = R[i] - return append(block[:8], intermediate...), nil -} - -// Unwrap a key using the RFC 3394 AES Key Wrap Algorithm. -func Unwrap(key, cipherText []byte) ([]byte, error) { - if len(cipherText)%8 != 0 { - return nil, ErrUnwrapCiphertext - } - - c, err := aes.NewCipher(key) - if err != nil { - return nil, ErrInvalidKey - } - - nblocks := len(cipherText)/8 - 1 - - // 1) Initialize variables. - var block [aes.BlockSize]byte - // - Set A = C[0] - copy(block[:8], cipherText[:8]) - - // - For i = 1 to n - // - Set R[i] = C[i] - intermediate := make([]byte, len(cipherText)-8) - copy(intermediate, cipherText[8:]) - - // 2) Compute intermediate values. - for jj := 5; jj >= 0; jj-- { - for ii := nblocks - 1; ii >= 0; ii-- { - // - B = AES-1(K, (A ^ t) | R[i]) where t = n*j+1 - // - A = MSB(64, B) - t := uint64(jj*nblocks + ii + 1) - val := binary.BigEndian.Uint64(block[:8]) ^ t - binary.BigEndian.PutUint64(block[:8], val) - - copy(block[8:], intermediate[ii*8:ii*8+8]) - c.Decrypt(block[:], block[:]) - - // - R[i] = LSB(B, 64) - copy(intermediate[ii*8:ii*8+8], block[8:]) - } - } - - // 3) Output results. - // - If A is an appropriate initial value (see 2.2.3), - for ii := 0; ii < 8; ii++ { - if block[ii] != 0xA6 { - return nil, ErrUnwrapFailed - } - } - - // - For i = 1 to n - // - P[i] = R[i] - return intermediate, nil -} diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go deleted file mode 100644 index 3b357e5851b..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go +++ /dev/null @@ -1,224 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package armor implements OpenPGP ASCII Armor, see RFC 4880. OpenPGP Armor is -// very similar to PEM except that it has an additional CRC checksum. -package armor // import "github.com/ProtonMail/go-crypto/openpgp/armor" - -import ( - "bufio" - "bytes" - "encoding/base64" - "github.com/ProtonMail/go-crypto/openpgp/errors" - "io" -) - -// A Block represents an OpenPGP armored structure. -// -// The encoded form is: -// -----BEGIN Type----- -// Headers -// -// base64-encoded Bytes -// '=' base64 encoded checksum -// -----END Type----- -// where Headers is a possibly empty sequence of Key: Value lines. -// -// Since the armored data can be very large, this package presents a streaming -// interface. -type Block struct { - Type string // The type, taken from the preamble (i.e. "PGP SIGNATURE"). - Header map[string]string // Optional headers. - Body io.Reader // A Reader from which the contents can be read - lReader lineReader - oReader openpgpReader -} - -var ArmorCorrupt error = errors.StructuralError("armor invalid") - -const crc24Init = 0xb704ce -const crc24Poly = 0x1864cfb -const crc24Mask = 0xffffff - -// crc24 calculates the OpenPGP checksum as specified in RFC 4880, section 6.1 -func crc24(crc uint32, d []byte) uint32 { - for _, b := range d { - crc ^= uint32(b) << 16 - for i := 0; i < 8; i++ { - crc <<= 1 - if crc&0x1000000 != 0 { - crc ^= crc24Poly - } - } - } - return crc -} - -var armorStart = []byte("-----BEGIN ") -var armorEnd = []byte("-----END ") -var armorEndOfLine = []byte("-----") - -// lineReader wraps a line based reader. It watches for the end of an armor -// block and records the expected CRC value. -type lineReader struct { - in *bufio.Reader - buf []byte - eof bool - crc uint32 - crcSet bool -} - -func (l *lineReader) Read(p []byte) (n int, err error) { - if l.eof { - return 0, io.EOF - } - - if len(l.buf) > 0 { - n = copy(p, l.buf) - l.buf = l.buf[n:] - return - } - - line, isPrefix, err := l.in.ReadLine() - if err != nil { - return - } - if isPrefix { - return 0, ArmorCorrupt - } - - if bytes.HasPrefix(line, armorEnd) { - l.eof = true - return 0, io.EOF - } - - if len(line) == 5 && line[0] == '=' { - // This is the checksum line - var expectedBytes [3]byte - var m int - m, err = base64.StdEncoding.Decode(expectedBytes[0:], line[1:]) - if m != 3 || err != nil { - return - } - l.crc = uint32(expectedBytes[0])<<16 | - uint32(expectedBytes[1])<<8 | - uint32(expectedBytes[2]) - - line, _, err = l.in.ReadLine() - if err != nil && err != io.EOF { - return - } - if !bytes.HasPrefix(line, armorEnd) { - return 0, ArmorCorrupt - } - - l.eof = true - l.crcSet = true - return 0, io.EOF - } - - if len(line) > 96 { - return 0, ArmorCorrupt - } - - n = copy(p, line) - bytesToSave := len(line) - n - if bytesToSave > 0 { - if cap(l.buf) < bytesToSave { - l.buf = make([]byte, 0, bytesToSave) - } - l.buf = l.buf[0:bytesToSave] - copy(l.buf, line[n:]) - } - - return -} - -// openpgpReader passes Read calls to the underlying base64 decoder, but keeps -// a running CRC of the resulting data and checks the CRC against the value -// found by the lineReader at EOF. -type openpgpReader struct { - lReader *lineReader - b64Reader io.Reader - currentCRC uint32 -} - -func (r *openpgpReader) Read(p []byte) (n int, err error) { - n, err = r.b64Reader.Read(p) - r.currentCRC = crc24(r.currentCRC, p[:n]) - - if err == io.EOF && r.lReader.crcSet && r.lReader.crc != uint32(r.currentCRC&crc24Mask) { - return 0, ArmorCorrupt - } - - return -} - -// Decode reads a PGP armored block from the given Reader. It will ignore -// leading garbage. If it doesn't find a block, it will return nil, io.EOF. The -// given Reader is not usable after calling this function: an arbitrary amount -// of data may have been read past the end of the block. -func Decode(in io.Reader) (p *Block, err error) { - r := bufio.NewReaderSize(in, 100) - var line []byte - ignoreNext := false - -TryNextBlock: - p = nil - - // Skip leading garbage - for { - ignoreThis := ignoreNext - line, ignoreNext, err = r.ReadLine() - if err != nil { - return - } - if ignoreNext || ignoreThis { - continue - } - line = bytes.TrimSpace(line) - if len(line) > len(armorStart)+len(armorEndOfLine) && bytes.HasPrefix(line, armorStart) { - break - } - } - - p = new(Block) - p.Type = string(line[len(armorStart) : len(line)-len(armorEndOfLine)]) - p.Header = make(map[string]string) - nextIsContinuation := false - var lastKey string - - // Read headers - for { - isContinuation := nextIsContinuation - line, nextIsContinuation, err = r.ReadLine() - if err != nil { - p = nil - return - } - if isContinuation { - p.Header[lastKey] += string(line) - continue - } - line = bytes.TrimSpace(line) - if len(line) == 0 { - break - } - - i := bytes.Index(line, []byte(": ")) - if i == -1 { - goto TryNextBlock - } - lastKey = string(line[:i]) - p.Header[lastKey] = string(line[i+2:]) - } - - p.lReader.in = r - p.oReader.currentCRC = crc24Init - p.oReader.lReader = &p.lReader - p.oReader.b64Reader = base64.NewDecoder(base64.StdEncoding, &p.lReader) - p.Body = &p.oReader - - return -} diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go deleted file mode 100644 index 6f07582c37c..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package armor - -import ( - "encoding/base64" - "io" -) - -var armorHeaderSep = []byte(": ") -var blockEnd = []byte("\n=") -var newline = []byte("\n") -var armorEndOfLineOut = []byte("-----\n") - -// writeSlices writes its arguments to the given Writer. -func writeSlices(out io.Writer, slices ...[]byte) (err error) { - for _, s := range slices { - _, err = out.Write(s) - if err != nil { - return err - } - } - return -} - -// lineBreaker breaks data across several lines, all of the same byte length -// (except possibly the last). Lines are broken with a single '\n'. -type lineBreaker struct { - lineLength int - line []byte - used int - out io.Writer - haveWritten bool -} - -func newLineBreaker(out io.Writer, lineLength int) *lineBreaker { - return &lineBreaker{ - lineLength: lineLength, - line: make([]byte, lineLength), - used: 0, - out: out, - } -} - -func (l *lineBreaker) Write(b []byte) (n int, err error) { - n = len(b) - - if n == 0 { - return - } - - if l.used == 0 && l.haveWritten { - _, err = l.out.Write([]byte{'\n'}) - if err != nil { - return - } - } - - if l.used+len(b) < l.lineLength { - l.used += copy(l.line[l.used:], b) - return - } - - l.haveWritten = true - _, err = l.out.Write(l.line[0:l.used]) - if err != nil { - return - } - excess := l.lineLength - l.used - l.used = 0 - - _, err = l.out.Write(b[0:excess]) - if err != nil { - return - } - - _, err = l.Write(b[excess:]) - return -} - -func (l *lineBreaker) Close() (err error) { - if l.used > 0 { - _, err = l.out.Write(l.line[0:l.used]) - if err != nil { - return - } - } - - return -} - -// encoding keeps track of a running CRC24 over the data which has been written -// to it and outputs a OpenPGP checksum when closed, followed by an armor -// trailer. -// -// It's built into a stack of io.Writers: -// encoding -> base64 encoder -> lineBreaker -> out -type encoding struct { - out io.Writer - breaker *lineBreaker - b64 io.WriteCloser - crc uint32 - blockType []byte -} - -func (e *encoding) Write(data []byte) (n int, err error) { - e.crc = crc24(e.crc, data) - return e.b64.Write(data) -} - -func (e *encoding) Close() (err error) { - err = e.b64.Close() - if err != nil { - return - } - e.breaker.Close() - - var checksumBytes [3]byte - checksumBytes[0] = byte(e.crc >> 16) - checksumBytes[1] = byte(e.crc >> 8) - checksumBytes[2] = byte(e.crc) - - var b64ChecksumBytes [4]byte - base64.StdEncoding.Encode(b64ChecksumBytes[:], checksumBytes[:]) - - return writeSlices(e.out, blockEnd, b64ChecksumBytes[:], newline, armorEnd, e.blockType, armorEndOfLine) -} - -// Encode returns a WriteCloser which will encode the data written to it in -// OpenPGP armor. -func Encode(out io.Writer, blockType string, headers map[string]string) (w io.WriteCloser, err error) { - bType := []byte(blockType) - err = writeSlices(out, armorStart, bType, armorEndOfLineOut) - if err != nil { - return - } - - for k, v := range headers { - err = writeSlices(out, []byte(k), armorHeaderSep, []byte(v), newline) - if err != nil { - return - } - } - - _, err = out.Write(newline) - if err != nil { - return - } - - e := &encoding{ - out: out, - breaker: newLineBreaker(out, 64), - crc: crc24Init, - blockType: bType, - } - e.b64 = base64.NewEncoder(base64.StdEncoding, e.breaker) - return e, nil -} diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go deleted file mode 100644 index a94f6150c4b..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package openpgp - -import ( - "hash" - "io" -) - -// NewCanonicalTextHash reformats text written to it into the canonical -// form and then applies the hash h. See RFC 4880, section 5.2.1. -func NewCanonicalTextHash(h hash.Hash) hash.Hash { - return &canonicalTextHash{h, 0} -} - -type canonicalTextHash struct { - h hash.Hash - s int -} - -var newline = []byte{'\r', '\n'} - -func writeCanonical(cw io.Writer, buf []byte, s *int) (int, error) { - start := 0 - for i, c := range buf { - switch *s { - case 0: - if c == '\r' { - *s = 1 - } else if c == '\n' { - cw.Write(buf[start:i]) - cw.Write(newline) - start = i + 1 - } - case 1: - *s = 0 - } - } - - cw.Write(buf[start:]) - return len(buf), nil -} - -func (cth *canonicalTextHash) Write(buf []byte) (int, error) { - return writeCanonical(cth.h, buf, &cth.s) -} - -func (cth *canonicalTextHash) Sum(in []byte) []byte { - return cth.h.Sum(in) -} - -func (cth *canonicalTextHash) Reset() { - cth.h.Reset() - cth.s = 0 -} - -func (cth *canonicalTextHash) Size() int { - return cth.h.Size() -} - -func (cth *canonicalTextHash) BlockSize() int { - return cth.h.BlockSize() -} diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go deleted file mode 100644 index 0b49be4bf37..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ecdh implements ECDH encryption, suitable for OpenPGP, -// as specified in RFC 6637, section 8. -package ecdh - -import ( - "bytes" - "crypto/elliptic" - "errors" - "io" - "math/big" - - "github.com/ProtonMail/go-crypto/openpgp/aes/keywrap" - "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" - "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" -) - -type KDF struct { - Hash algorithm.Hash - Cipher algorithm.Cipher -} - -type PublicKey struct { - ecc.CurveType - elliptic.Curve - X, Y *big.Int - KDF -} - -type PrivateKey struct { - PublicKey - D []byte -} - -func GenerateKey(c elliptic.Curve, kdf KDF, rand io.Reader) (priv *PrivateKey, err error) { - priv = new(PrivateKey) - priv.PublicKey.Curve = c - priv.PublicKey.KDF = kdf - priv.D, priv.PublicKey.X, priv.PublicKey.Y, err = elliptic.GenerateKey(c, rand) - return -} - -func Encrypt(random io.Reader, pub *PublicKey, msg, curveOID, fingerprint []byte) (vsG, c []byte, err error) { - if len(msg) > 40 { - return nil, nil, errors.New("ecdh: message too long") - } - // the sender MAY use 21, 13, and 5 bytes of padding for AES-128, - // AES-192, and AES-256, respectively, to provide the same number of - // octets, 40 total, as an input to the key wrapping method. - padding := make([]byte, 40-len(msg)) - for i := range padding { - padding[i] = byte(40 - len(msg)) - } - m := append(msg, padding...) - - if pub.CurveType == ecc.Curve25519 { - return X25519Encrypt(random, pub, m, curveOID, fingerprint) - } - - d, x, y, err := elliptic.GenerateKey(pub.Curve, random) - if err != nil { - return nil, nil, err - } - - vsG = elliptic.Marshal(pub.Curve, x, y) - zbBig, _ := pub.Curve.ScalarMult(pub.X, pub.Y, d) - - byteLen := (pub.Curve.Params().BitSize + 7) >> 3 - zb := make([]byte, byteLen) - zbBytes := zbBig.Bytes() - copy(zb[byteLen-len(zbBytes):], zbBytes) - - z, err := buildKey(pub, zb, curveOID, fingerprint, false, false) - if err != nil { - return nil, nil, err - } - - if c, err = keywrap.Wrap(z, m); err != nil { - return nil, nil, err - } - - return vsG, c, nil - -} - -func Decrypt(priv *PrivateKey, vsG, m, curveOID, fingerprint []byte) (msg []byte, err error) { - if priv.PublicKey.CurveType == ecc.Curve25519 { - return X25519Decrypt(priv, vsG, m, curveOID, fingerprint) - } - x, y := elliptic.Unmarshal(priv.Curve, vsG) - zbBig, _ := priv.Curve.ScalarMult(x, y, priv.D) - - byteLen := (priv.Curve.Params().BitSize + 7) >> 3 - zb := make([]byte, byteLen) - zbBytes := zbBig.Bytes() - copy(zb[byteLen-len(zbBytes):], zbBytes) - - z, err := buildKey(&priv.PublicKey, zb, curveOID, fingerprint, false, false) - if err != nil { - return nil, err - } - - c, err := keywrap.Unwrap(z, m) - if err != nil { - return nil, err - } - - return c[:len(c)-int(c[len(c)-1])], nil -} - -func buildKey(pub *PublicKey, zb []byte, curveOID, fingerprint []byte, stripLeading, stripTrailing bool) ([]byte, error) { - // Param = curve_OID_len || curve_OID || public_key_alg_ID || 03 - // || 01 || KDF_hash_ID || KEK_alg_ID for AESKeyWrap - // || "Anonymous Sender " || recipient_fingerprint; - param := new(bytes.Buffer) - if _, err := param.Write(curveOID); err != nil { - return nil, err - } - algKDF := []byte{18, 3, 1, pub.KDF.Hash.Id(), pub.KDF.Cipher.Id()} - if _, err := param.Write(algKDF); err != nil { - return nil, err - } - if _, err := param.Write([]byte("Anonymous Sender ")); err != nil { - return nil, err - } - // For v5 keys, the 20 leftmost octets of the fingerprint are used. - if _, err := param.Write(fingerprint[:20]); err != nil { - return nil, err - } - if param.Len() - len(curveOID) != 45 { - return nil, errors.New("ecdh: malformed KDF Param") - } - - // MB = Hash ( 00 || 00 || 00 || 01 || ZB || Param ); - h := pub.KDF.Hash.New() - if _, err := h.Write([]byte{0x0, 0x0, 0x0, 0x1}); err != nil { - return nil, err - } - zbLen := len(zb) - i := 0 - j := zbLen - 1 - if stripLeading { - // Work around old go crypto bug where the leading zeros are missing. - for ; i < zbLen && zb[i] == 0; i++ {} - } - if stripTrailing { - // Work around old OpenPGP.js bug where insignificant trailing zeros in - // this little-endian number are missing. - // (See https://github.com/openpgpjs/openpgpjs/pull/853.) - for ; j >= 0 && zb[j] == 0; j-- {} - } - if _, err := h.Write(zb[i:j+1]); err != nil { - return nil, err - } - if _, err := h.Write(param.Bytes()); err != nil { - return nil, err - } - mb := h.Sum(nil) - - return mb[:pub.KDF.Cipher.KeySize()], nil // return oBits leftmost bits of MB. - -} diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/x25519.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/x25519.go deleted file mode 100644 index 15b41a31dff..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/x25519.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ecdh implements ECDH encryption, suitable for OpenPGP, -// as specified in RFC 6637, section 8. -package ecdh - -import ( - "errors" - "io" - "math/big" - - "github.com/ProtonMail/go-crypto/openpgp/aes/keywrap" - "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" - "golang.org/x/crypto/curve25519" -) - -// Generates a private-public key-pair. -// 'priv' is a private key; a scalar belonging to the set -// 2^{254} + 8 * [0, 2^{251}), in order to avoid the small subgroup of the -// curve. 'pub' is simply 'priv' * G where G is the base point. -// See https://cr.yp.to/ecdh.html and RFC7748, sec 5. -func x25519GenerateKeyPairBytes(rand io.Reader) (priv [32]byte, pub [32]byte, err error) { - var n, helper = new(big.Int), new(big.Int) - n.SetUint64(1) - n.Lsh(n, 252) - helper.SetString("27742317777372353535851937790883648493", 10) - n.Add(n, helper) - - for true { - _, err = io.ReadFull(rand, priv[:]) - if err != nil { - return - } - // The following ensures that the private key is a number of the form - // 2^{254} + 8 * [0, 2^{251}), in order to avoid the small subgroup of - // of the curve. - priv[0] &= 248 - priv[31] &= 127 - priv[31] |= 64 - - // If the scalar is out of range, sample another random number. - if new(big.Int).SetBytes(priv[:]).Cmp(n) >= 0 { - continue - } - - curve25519.ScalarBaseMult(&pub, &priv) - return - } - return -} - -// X25519GenerateKey samples the key pair according to the correct distribution. -// It also sets the given key-derivation function and returns the *PrivateKey -// object along with an error. -func X25519GenerateKey(rand io.Reader, kdf KDF) (priv *PrivateKey, err error) { - ci := ecc.FindByName("Curve25519") - priv = new(PrivateKey) - priv.PublicKey.Curve = ci.Curve - d, pubKey, err := x25519GenerateKeyPairBytes(rand) - if err != nil { - return nil, err - } - priv.PublicKey.KDF = kdf - priv.D = make([]byte, 32) - copyReversed(priv.D, d[:]) - priv.PublicKey.CurveType = ci.CurveType - priv.PublicKey.Curve = ci.Curve - /* - * Note that ECPoint.point differs from the definition of public keys in - * [Curve25519] in two ways: (1) the byte-ordering is big-endian, which is - * more uniform with how big integers are represented in TLS, and (2) there - * is an additional length byte (so ECpoint.point is actually 33 bytes), - * again for uniformity (and extensibility). - */ - var encodedKey = make([]byte, 33) - encodedKey[0] = 0x40 - copy(encodedKey[1:], pubKey[:]) - priv.PublicKey.X = new(big.Int).SetBytes(encodedKey[:]) - priv.PublicKey.Y = new(big.Int) - return priv, nil -} - -func X25519Encrypt(random io.Reader, pub *PublicKey, msg, curveOID, fingerprint []byte) (vsG, c []byte, err error) { - d, ephemeralKey, err := x25519GenerateKeyPairBytes(random) - if err != nil { - return nil, nil, err - } - var pubKey [32]byte - - if pub.X.BitLen() > 33*264 { - return nil, nil, errors.New("ecdh: invalid key") - } - copy(pubKey[:], pub.X.Bytes()[1:]) - - var zb [32]byte - curve25519.ScalarBaseMult(&zb, &d) - curve25519.ScalarMult(&zb, &d, &pubKey) - z, err := buildKey(pub, zb[:], curveOID, fingerprint, false, false) - - if err != nil { - return nil, nil, err - } - - if c, err = keywrap.Wrap(z, msg); err != nil { - return nil, nil, err - } - - var vsg [33]byte - vsg[0] = 0x40 - copy(vsg[1:], ephemeralKey[:]) - - return vsg[:], c, nil -} - -func X25519Decrypt(priv *PrivateKey, vsG, m, curveOID, fingerprint []byte) (msg []byte, err error) { - var zb, d, ephemeralKey [32]byte - if len(vsG) != 33 || vsG[0] != 0x40 { - return nil, errors.New("ecdh: invalid key") - } - copy(ephemeralKey[:], vsG[1:33]) - - copyReversed(d[:], priv.D) - curve25519.ScalarBaseMult(&zb, &d) - curve25519.ScalarMult(&zb, &d, &ephemeralKey) - - var c []byte - - for i := 0; i < 3; i++ { - // Try buildKey three times for compat, see comments in buildKey. - z, err := buildKey(&priv.PublicKey, zb[:], curveOID, fingerprint, i == 1, i == 2) - if err != nil { - return nil, err - } - - res, err := keywrap.Unwrap(z, m) - if i == 2 && err != nil { - // Only return an error after we've tried all variants of buildKey. - return nil, err - } - - c = res - if err == nil { - break - } - } - - return c[:len(c)-int(c[len(c)-1])], nil -} - -func copyReversed(out []byte, in []byte) { - l := len(in) - for i := 0; i < l; i++ { - out[i] = in[l-i-1] - } -} diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/elgamal/elgamal.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/elgamal/elgamal.go deleted file mode 100644 index 6a07d8ff279..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/elgamal/elgamal.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package elgamal implements ElGamal encryption, suitable for OpenPGP, -// as specified in "A Public-Key Cryptosystem and a Signature Scheme Based on -// Discrete Logarithms," IEEE Transactions on Information Theory, v. IT-31, -// n. 4, 1985, pp. 469-472. -// -// This form of ElGamal embeds PKCS#1 v1.5 padding, which may make it -// unsuitable for other protocols. RSA should be used in preference in any -// case. -package elgamal // import "github.com/ProtonMail/go-crypto/openpgp/elgamal" - -import ( - "crypto/rand" - "crypto/subtle" - "errors" - "io" - "math/big" -) - -// PublicKey represents an ElGamal public key. -type PublicKey struct { - G, P, Y *big.Int -} - -// PrivateKey represents an ElGamal private key. -type PrivateKey struct { - PublicKey - X *big.Int -} - -// Encrypt encrypts the given message to the given public key. The result is a -// pair of integers. Errors can result from reading random, or because msg is -// too large to be encrypted to the public key. -func Encrypt(random io.Reader, pub *PublicKey, msg []byte) (c1, c2 *big.Int, err error) { - pLen := (pub.P.BitLen() + 7) / 8 - if len(msg) > pLen-11 { - err = errors.New("elgamal: message too long") - return - } - - // EM = 0x02 || PS || 0x00 || M - em := make([]byte, pLen-1) - em[0] = 2 - ps, mm := em[1:len(em)-len(msg)-1], em[len(em)-len(msg):] - err = nonZeroRandomBytes(ps, random) - if err != nil { - return - } - em[len(em)-len(msg)-1] = 0 - copy(mm, msg) - - m := new(big.Int).SetBytes(em) - - k, err := rand.Int(random, pub.P) - if err != nil { - return - } - - c1 = new(big.Int).Exp(pub.G, k, pub.P) - s := new(big.Int).Exp(pub.Y, k, pub.P) - c2 = s.Mul(s, m) - c2.Mod(c2, pub.P) - - return -} - -// Decrypt takes two integers, resulting from an ElGamal encryption, and -// returns the plaintext of the message. An error can result only if the -// ciphertext is invalid. Users should keep in mind that this is a padding -// oracle and thus, if exposed to an adaptive chosen ciphertext attack, can -// be used to break the cryptosystem. See ``Chosen Ciphertext Attacks -// Against Protocols Based on the RSA Encryption Standard PKCS #1'', Daniel -// Bleichenbacher, Advances in Cryptology (Crypto '98), -func Decrypt(priv *PrivateKey, c1, c2 *big.Int) (msg []byte, err error) { - s := new(big.Int).Exp(c1, priv.X, priv.P) - if s.ModInverse(s, priv.P) == nil { - return nil, errors.New("elgamal: invalid private key") - } - s.Mul(s, c2) - s.Mod(s, priv.P) - em := s.Bytes() - - firstByteIsTwo := subtle.ConstantTimeByteEq(em[0], 2) - - // The remainder of the plaintext must be a string of non-zero random - // octets, followed by a 0, followed by the message. - // lookingForIndex: 1 iff we are still looking for the zero. - // index: the offset of the first zero byte. - var lookingForIndex, index int - lookingForIndex = 1 - - for i := 1; i < len(em); i++ { - equals0 := subtle.ConstantTimeByteEq(em[i], 0) - index = subtle.ConstantTimeSelect(lookingForIndex&equals0, i, index) - lookingForIndex = subtle.ConstantTimeSelect(equals0, 0, lookingForIndex) - } - - if firstByteIsTwo != 1 || lookingForIndex != 0 || index < 9 { - return nil, errors.New("elgamal: decryption error") - } - return em[index+1:], nil -} - -// nonZeroRandomBytes fills the given slice with non-zero random octets. -func nonZeroRandomBytes(s []byte, rand io.Reader) (err error) { - _, err = io.ReadFull(rand, s) - if err != nil { - return - } - - for i := 0; i < len(s); i++ { - for s[i] == 0 { - _, err = io.ReadFull(rand, s[i:i+1]) - if err != nil { - return - } - } - } - - return -} diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go deleted file mode 100644 index 17e2bcfed20..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package errors contains common error types for the OpenPGP packages. -package errors // import "github.com/ProtonMail/go-crypto/openpgp/errors" - -import ( - "strconv" -) - -// A StructuralError is returned when OpenPGP data is found to be syntactically -// invalid. -type StructuralError string - -func (s StructuralError) Error() string { - return "openpgp: invalid data: " + string(s) -} - -// UnsupportedError indicates that, although the OpenPGP data is valid, it -// makes use of currently unimplemented features. -type UnsupportedError string - -func (s UnsupportedError) Error() string { - return "openpgp: unsupported feature: " + string(s) -} - -// InvalidArgumentError indicates that the caller is in error and passed an -// incorrect value. -type InvalidArgumentError string - -func (i InvalidArgumentError) Error() string { - return "openpgp: invalid argument: " + string(i) -} - -// SignatureError indicates that a syntactically valid signature failed to -// validate. -type SignatureError string - -func (b SignatureError) Error() string { - return "openpgp: invalid signature: " + string(b) -} - -var ErrMDCHashMismatch error = SignatureError("MDC hash mismatch") -var ErrMDCMissing error = SignatureError("MDC packet not found") - -type signatureExpiredError int - -func (se signatureExpiredError) Error() string { - return "openpgp: signature expired" -} - -var ErrSignatureExpired error = signatureExpiredError(0) - -type keyExpiredError int - -func (ke keyExpiredError) Error() string { - return "openpgp: key expired" -} - -var ErrKeyExpired error = keyExpiredError(0) - -type keyIncorrectError int - -func (ki keyIncorrectError) Error() string { - return "openpgp: incorrect key" -} - -var ErrKeyIncorrect error = keyIncorrectError(0) - -// KeyInvalidError indicates that the public key parameters are invalid -// as they do not match the private ones -type KeyInvalidError string - -func (e KeyInvalidError) Error() string { - return "openpgp: invalid key: " + string(e) -} - -type unknownIssuerError int - -func (unknownIssuerError) Error() string { - return "openpgp: signature made by unknown entity" -} - -var ErrUnknownIssuer error = unknownIssuerError(0) - -type keyRevokedError int - -func (keyRevokedError) Error() string { - return "openpgp: signature made by revoked key" -} - -var ErrKeyRevoked error = keyRevokedError(0) - -type UnknownPacketTypeError uint8 - -func (upte UnknownPacketTypeError) Error() string { - return "openpgp: unknown packet type: " + strconv.Itoa(int(upte)) -} - -// AEADError indicates that there is a problem when initializing or using a -// AEAD instance, configuration struct, nonces or index values. -type AEADError string - -func (ae AEADError) Error() string { - return "openpgp: aead error: " + string(ae) -} - -// ErrDummyPrivateKey results when operations are attempted on a private key -// that is just a dummy key. See -// https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=blob;f=doc/DETAILS;h=fe55ae16ab4e26d8356dc574c9e8bc935e71aef1;hb=23191d7851eae2217ecdac6484349849a24fd94a#l1109 -type ErrDummyPrivateKey string - -func (dke ErrDummyPrivateKey) Error() string { - return "openpgp: s2k GNU dummy key: " + string(dke) -} diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/aead.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/aead.go deleted file mode 100644 index 17a1bfe9cb5..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/aead.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright (C) 2019 ProtonTech AG - -package algorithm - -import ( - "crypto/cipher" - "github.com/ProtonMail/go-crypto/eax" - "github.com/ProtonMail/go-crypto/ocb" -) - -// AEADMode defines the Authenticated Encryption with Associated Data mode of -// operation. -type AEADMode uint8 - -// Supported modes of operation (see RFC4880bis [EAX] and RFC7253) -const ( - AEADModeEAX = AEADMode(1) - AEADModeOCB = AEADMode(2) - AEADModeGCM = AEADMode(100) -) - -// TagLength returns the length in bytes of authentication tags. -func (mode AEADMode) TagLength() int { - switch mode { - case AEADModeEAX: - return 16 - case AEADModeOCB: - return 16 - case AEADModeGCM: - return 16 - default: - return 0 - } -} - -// NonceLength returns the length in bytes of nonces. -func (mode AEADMode) NonceLength() int { - switch mode { - case AEADModeEAX: - return 16 - case AEADModeOCB: - return 15 - case AEADModeGCM: - return 12 - default: - return 0 - } -} - -// New returns a fresh instance of the given mode -func (mode AEADMode) New(block cipher.Block) (alg cipher.AEAD) { - var err error - switch mode { - case AEADModeEAX: - alg, err = eax.NewEAX(block) - case AEADModeOCB: - alg, err = ocb.NewOCB(block) - case AEADModeGCM: - alg, err = cipher.NewGCM(block) - } - if err != nil { - panic(err.Error()) - } - return alg -} diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go deleted file mode 100644 index 5760cff80ea..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package algorithm - -import ( - "crypto/aes" - "crypto/cipher" - "crypto/des" - - "golang.org/x/crypto/cast5" -) - -// Cipher is an official symmetric key cipher algorithm. See RFC 4880, -// section 9.2. -type Cipher interface { - // Id returns the algorithm ID, as a byte, of the cipher. - Id() uint8 - // KeySize returns the key size, in bytes, of the cipher. - KeySize() int - // BlockSize returns the block size, in bytes, of the cipher. - BlockSize() int - // New returns a fresh instance of the given cipher. - New(key []byte) cipher.Block -} - -// The following constants mirror the OpenPGP standard (RFC 4880). -const ( - TripleDES = CipherFunction(2) - CAST5 = CipherFunction(3) - AES128 = CipherFunction(7) - AES192 = CipherFunction(8) - AES256 = CipherFunction(9) -) - -// CipherById represents the different block ciphers specified for OpenPGP. See -// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13 -var CipherById = map[uint8]Cipher{ - TripleDES.Id(): TripleDES, - CAST5.Id(): CAST5, - AES128.Id(): AES128, - AES192.Id(): AES192, - AES256.Id(): AES256, -} - -type CipherFunction uint8 - -// ID returns the algorithm Id, as a byte, of cipher. -func (sk CipherFunction) Id() uint8 { - return uint8(sk) -} - -var keySizeByID = map[uint8]int{ - TripleDES.Id(): 24, - CAST5.Id(): cast5.KeySize, - AES128.Id(): 16, - AES192.Id(): 24, - AES256.Id(): 32, -} - -// KeySize returns the key size, in bytes, of cipher. -func (cipher CipherFunction) KeySize() int { - switch cipher { - case TripleDES: - return 24 - case CAST5: - return cast5.KeySize - case AES128: - return 16 - case AES192: - return 24 - case AES256: - return 32 - } - return 0 -} - -// BlockSize returns the block size, in bytes, of cipher. -func (cipher CipherFunction) BlockSize() int { - switch cipher { - case TripleDES: - return des.BlockSize - case CAST5: - return 8 - case AES128, AES192, AES256: - return 16 - } - return 0 -} - -// New returns a fresh instance of the given cipher. -func (cipher CipherFunction) New(key []byte) (block cipher.Block) { - var err error - switch cipher { - case TripleDES: - block, err = des.NewTripleDESCipher(key) - case CAST5: - block, err = cast5.NewCipher(key) - case AES128, AES192, AES256: - block, err = aes.NewCipher(key) - } - if err != nil { - panic(err.Error()) - } - return -} diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/hash.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/hash.go deleted file mode 100644 index 3f1b61b88e6..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/hash.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package algorithm - -import ( - "crypto" - "fmt" - "hash" -) - -// Hash is an official hash function algorithm. See RFC 4880, section 9.4. -type Hash interface { - // Id returns the algorithm ID, as a byte, of Hash. - Id() uint8 - // Available reports whether the given hash function is linked into the binary. - Available() bool - // HashFunc simply returns the value of h so that Hash implements SignerOpts. - HashFunc() crypto.Hash - // New returns a new hash.Hash calculating the given hash function. New - // panics if the hash function is not linked into the binary. - New() hash.Hash - // Size returns the length, in bytes, of a digest resulting from the given - // hash function. It doesn't require that the hash function in question be - // linked into the program. - Size() int - // String is the name of the hash function corresponding to the given - // OpenPGP hash id. - String() string -} - -// The following vars mirror the crypto/Hash supported hash functions. -var ( - MD5 Hash = cryptoHash{1, crypto.MD5} - SHA1 Hash = cryptoHash{2, crypto.SHA1} - RIPEMD160 Hash = cryptoHash{3, crypto.RIPEMD160} - SHA256 Hash = cryptoHash{8, crypto.SHA256} - SHA384 Hash = cryptoHash{9, crypto.SHA384} - SHA512 Hash = cryptoHash{10, crypto.SHA512} - SHA224 Hash = cryptoHash{11, crypto.SHA224} -) - -// HashById represents the different hash functions specified for OpenPGP. See -// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-14 -var ( - HashById = map[uint8]Hash{ - MD5.Id(): MD5, - SHA1.Id(): SHA1, - RIPEMD160.Id(): RIPEMD160, - SHA256.Id(): SHA256, - SHA384.Id(): SHA384, - SHA512.Id(): SHA512, - SHA224.Id(): SHA224, - } -) - -// cryptoHash contains pairs relating OpenPGP's hash identifier with -// Go's crypto.Hash type. See RFC 4880, section 9.4. -type cryptoHash struct { - id uint8 - crypto.Hash -} - -// Id returns the algorithm ID, as a byte, of cryptoHash. -func (h cryptoHash) Id() uint8 { - return h.id -} - -var hashNames = map[uint8]string{ - MD5.Id(): "MD5", - SHA1.Id(): "SHA1", - RIPEMD160.Id(): "RIPEMD160", - SHA256.Id(): "SHA256", - SHA384.Id(): "SHA384", - SHA512.Id(): "SHA512", - SHA224.Id(): "SHA224", -} - -func (h cryptoHash) String() string { - s, ok := hashNames[h.id] - if !ok { - panic(fmt.Sprintf("Unsupported hash function %d", h.id)) - } - return s -} diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curveInfo.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curveInfo.go deleted file mode 100644 index f91042fd85e..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curveInfo.go +++ /dev/null @@ -1,118 +0,0 @@ -package ecc - -import ( - "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" - "crypto/elliptic" - "bytes" - "github.com/ProtonMail/go-crypto/bitcurves" - "github.com/ProtonMail/go-crypto/brainpool" -) - -type SignatureAlgorithm uint8 - -const ( - ECDSA SignatureAlgorithm = 1 - EdDSA SignatureAlgorithm = 2 -) - -type CurveInfo struct { - Name string - Oid *encoding.OID - Curve elliptic.Curve - SigAlgorithm SignatureAlgorithm - CurveType CurveType -} - -var curves = []CurveInfo{ - { - Name: "NIST curve P-256", - Oid: encoding.NewOID([]byte{0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x03, 0x01, 0x07}), - Curve: elliptic.P256(), - CurveType: NISTCurve, - SigAlgorithm: ECDSA, - }, - { - Name: "NIST curve P-384", - Oid: encoding.NewOID([]byte{0x2B, 0x81, 0x04, 0x00, 0x22}), - Curve: elliptic.P384(), - CurveType: NISTCurve, - SigAlgorithm: ECDSA, - }, - { - Name: "NIST curve P-521", - Oid: encoding.NewOID([]byte{0x2B, 0x81, 0x04, 0x00, 0x23}), - Curve: elliptic.P521(), - CurveType: NISTCurve, - SigAlgorithm: ECDSA, - }, - { - Name: "SecP256k1", - Oid: encoding.NewOID([]byte{0x2B, 0x81, 0x04, 0x00, 0x0A}), - Curve: bitcurves.S256(), - CurveType: BitCurve, - SigAlgorithm: ECDSA, - }, - { - Name: "Curve25519", - Oid: encoding.NewOID([]byte{0x2B, 0x06, 0x01, 0x04, 0x01, 0x97, 0x55, 0x01, 0x05, 0x01}), - Curve: elliptic.P256(),// filler - CurveType: Curve25519, - SigAlgorithm: ECDSA, - }, - { - Name: "Ed25519", - Oid: encoding.NewOID([]byte{0x2B, 0x06, 0x01, 0x04, 0x01, 0xDA, 0x47, 0x0F, 0x01}), - Curve: elliptic.P256(), // filler - CurveType: NISTCurve, - SigAlgorithm: EdDSA, - }, - { - Name: "Brainpool P256r1", - Oid: encoding.NewOID([]byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x07}), - Curve: brainpool.P256r1(), - CurveType: BrainpoolCurve, - SigAlgorithm: ECDSA, - }, - { - Name: "BrainpoolP384r1", - Oid: encoding.NewOID([]byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x0B}), - Curve: brainpool.P384r1(), - CurveType: BrainpoolCurve, - SigAlgorithm: ECDSA, - }, - { - Name: "BrainpoolP512r1", - Oid: encoding.NewOID([]byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x0D}), - Curve: brainpool.P512r1(), - CurveType: BrainpoolCurve, - SigAlgorithm: ECDSA, - }, -} - -func FindByCurve(curve elliptic.Curve) *CurveInfo { - for _, curveInfo := range curves { - if curveInfo.Curve == curve { - return &curveInfo - } - } - return nil -} - -func FindByOid(oid encoding.Field) *CurveInfo { - var rawBytes = oid.Bytes() - for _, curveInfo := range curves { - if bytes.Equal(curveInfo.Oid.Bytes(), rawBytes) { - return &curveInfo - } - } - return nil -} - -func FindByName(name string) *CurveInfo { - for _, curveInfo := range curves { - if curveInfo.Name == name { - return &curveInfo - } - } - return nil -} \ No newline at end of file diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curveType.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curveType.go deleted file mode 100644 index de8bca0ac23..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curveType.go +++ /dev/null @@ -1,10 +0,0 @@ -package ecc - -type CurveType uint8 - -const ( - NISTCurve CurveType = 1 - Curve25519 CurveType = 2 - BitCurve CurveType = 3 - BrainpoolCurve CurveType = 4 -) \ No newline at end of file diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/encoding.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/encoding.go deleted file mode 100644 index 6c921481b7b..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/encoding.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package encoding implements openpgp packet field encodings as specified in -// RFC 4880 and 6637. -package encoding - -import "io" - -// Field is an encoded field of an openpgp packet. -type Field interface { - // Bytes returns the decoded data. - Bytes() []byte - - // BitLength is the size in bits of the decoded data. - BitLength() uint16 - - // EncodedBytes returns the encoded data. - EncodedBytes() []byte - - // EncodedLength is the size in bytes of the encoded data. - EncodedLength() uint16 - - // ReadFrom reads the next Field from r. - ReadFrom(r io.Reader) (int64, error) -} diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/mpi.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/mpi.go deleted file mode 100644 index 02e5e695c38..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/mpi.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package encoding - -import ( - "io" - "math/big" - "math/bits" -) - -// An MPI is used to store the contents of a big integer, along with the bit -// length that was specified in the original input. This allows the MPI to be -// reserialized exactly. -type MPI struct { - bytes []byte - bitLength uint16 -} - -// NewMPI returns a MPI initialized with bytes. -func NewMPI(bytes []byte) *MPI { - for len(bytes) != 0 && bytes[0] == 0 { - bytes = bytes[1:] - } - if len(bytes) == 0 { - bitLength := uint16(0) - return &MPI{bytes, bitLength} - } - bitLength := 8*uint16(len(bytes)-1) + uint16(bits.Len8(bytes[0])) - return &MPI{bytes, bitLength} -} - -// Bytes returns the decoded data. -func (m *MPI) Bytes() []byte { - return m.bytes -} - -// BitLength is the size in bits of the decoded data. -func (m *MPI) BitLength() uint16 { - return m.bitLength -} - -// EncodedBytes returns the encoded data. -func (m *MPI) EncodedBytes() []byte { - return append([]byte{byte(m.bitLength >> 8), byte(m.bitLength)}, m.bytes...) -} - -// EncodedLength is the size in bytes of the encoded data. -func (m *MPI) EncodedLength() uint16 { - return uint16(2 + len(m.bytes)) -} - -// ReadFrom reads into m the next MPI from r. -func (m *MPI) ReadFrom(r io.Reader) (int64, error) { - var buf [2]byte - n, err := io.ReadFull(r, buf[0:]) - if err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return int64(n), err - } - - m.bitLength = uint16(buf[0])<<8 | uint16(buf[1]) - m.bytes = make([]byte, (int(m.bitLength)+7)/8) - - nn, err := io.ReadFull(r, m.bytes) - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - - // remove leading zero bytes from malformed GnuPG encoded MPIs: - // https://bugs.gnupg.org/gnupg/issue1853 - // for _, b := range m.bytes { - // if b != 0 { - // break - // } - // m.bytes = m.bytes[1:] - // m.bitLength -= 8 - // } - - return int64(n) + int64(nn), err -} - -// SetBig initializes m with the bits from n. -func (m *MPI) SetBig(n *big.Int) *MPI { - m.bytes = n.Bytes() - m.bitLength = uint16(n.BitLen()) - return m -} diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/oid.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/oid.go deleted file mode 100644 index ee39fd6bbb1..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/oid.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package encoding - -import ( - "io" - - "github.com/ProtonMail/go-crypto/openpgp/errors" -) - -// OID is used to store a variable-length field with a one-octet size -// prefix. See https://tools.ietf.org/html/rfc6637#section-9. -type OID struct { - bytes []byte -} - -const ( - // maxOID is the maximum number of bytes in a OID. - maxOID = 254 - // reservedOIDLength1 and reservedOIDLength2 are OID lengths that the RFC - // specifies are reserved. - reservedOIDLength1 = 0 - reservedOIDLength2 = 0xff -) - -// NewOID returns a OID initialized with bytes. -func NewOID(bytes []byte) *OID { - switch len(bytes) { - case reservedOIDLength1, reservedOIDLength2: - panic("encoding: NewOID argument length is reserved") - default: - if len(bytes) > maxOID { - panic("encoding: NewOID argment too large") - } - } - - return &OID{ - bytes: bytes, - } -} - -// Bytes returns the decoded data. -func (o *OID) Bytes() []byte { - return o.bytes -} - -// BitLength is the size in bits of the decoded data. -func (o *OID) BitLength() uint16 { - return uint16(len(o.bytes) * 8) -} - -// EncodedBytes returns the encoded data. -func (o *OID) EncodedBytes() []byte { - return append([]byte{byte(len(o.bytes))}, o.bytes...) -} - -// EncodedLength is the size in bytes of the encoded data. -func (o *OID) EncodedLength() uint16 { - return uint16(1 + len(o.bytes)) -} - -// ReadFrom reads into b the next OID from r. -func (o *OID) ReadFrom(r io.Reader) (int64, error) { - var buf [1]byte - n, err := io.ReadFull(r, buf[:]) - if err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return int64(n), err - } - - switch buf[0] { - case reservedOIDLength1, reservedOIDLength2: - return int64(n), errors.UnsupportedError("reserved for future extensions") - } - - o.bytes = make([]byte, buf[0]) - - nn, err := io.ReadFull(r, o.bytes) - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - - return int64(n) + int64(nn), err -} diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go deleted file mode 100644 index 0d2eb45b8e1..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go +++ /dev/null @@ -1,375 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package openpgp - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" - goerrors "errors" - "io" - "math/big" - - "github.com/ProtonMail/go-crypto/openpgp/ecdh" - "github.com/ProtonMail/go-crypto/openpgp/errors" - "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" - "github.com/ProtonMail/go-crypto/openpgp/packet" - "golang.org/x/crypto/ed25519" -) - -// NewEntity returns an Entity that contains a fresh RSA/RSA keypair with a -// single identity composed of the given full name, comment and email, any of -// which may be empty but must not contain any of "()<>\x00". -// If config is nil, sensible defaults will be used. -func NewEntity(name, comment, email string, config *packet.Config) (*Entity, error) { - creationTime := config.Now() - keyLifetimeSecs := config.KeyLifetime() - - uid := packet.NewUserId(name, comment, email) - if uid == nil { - return nil, errors.InvalidArgumentError("user id field contained invalid characters") - } - - // Generate a primary signing key - primaryPrivRaw, err := newSigner(config) - if err != nil { - return nil, err - } - primary := packet.NewSignerPrivateKey(creationTime, primaryPrivRaw) - if config != nil && config.V5Keys { - primary.UpgradeToV5() - } - - isPrimaryId := true - selfSignature := &packet.Signature{ - Version: primary.PublicKey.Version, - SigType: packet.SigTypePositiveCert, - PubKeyAlgo: primary.PublicKey.PubKeyAlgo, - Hash: config.Hash(), - CreationTime: creationTime, - KeyLifetimeSecs: &keyLifetimeSecs, - IssuerKeyId: &primary.PublicKey.KeyId, - IssuerFingerprint: primary.PublicKey.Fingerprint, - IsPrimaryId: &isPrimaryId, - FlagsValid: true, - FlagSign: true, - FlagCertify: true, - MDC: true, // true by default, see 5.8 vs. 5.14 - AEAD: config.AEAD() != nil, - V5Keys: config != nil && config.V5Keys, - } - - // Set the PreferredHash for the SelfSignature from the packet.Config. - // If it is not the must-implement algorithm from rfc4880bis, append that. - selfSignature.PreferredHash = []uint8{hashToHashId(config.Hash())} - if config.Hash() != crypto.SHA256 { - selfSignature.PreferredHash = append(selfSignature.PreferredHash, hashToHashId(crypto.SHA256)) - } - - // Likewise for DefaultCipher. - selfSignature.PreferredSymmetric = []uint8{uint8(config.Cipher())} - if config.Cipher() != packet.CipherAES128 { - selfSignature.PreferredSymmetric = append(selfSignature.PreferredSymmetric, uint8(packet.CipherAES128)) - } - - // And for DefaultMode. - selfSignature.PreferredAEAD = []uint8{uint8(config.AEAD().Mode())} - if config.AEAD().Mode() != packet.AEADModeEAX { - selfSignature.PreferredAEAD = append(selfSignature.PreferredAEAD, uint8(packet.AEADModeEAX)) - } - - // User ID binding signature - err = selfSignature.SignUserId(uid.Id, &primary.PublicKey, primary, config) - if err != nil { - return nil, err - } - - // Generate an encryption subkey - subPrivRaw, err := newDecrypter(config) - if err != nil { - return nil, err - } - sub := packet.NewDecrypterPrivateKey(creationTime, subPrivRaw) - sub.IsSubkey = true - sub.PublicKey.IsSubkey = true - if config != nil && config.V5Keys { - sub.UpgradeToV5() - } - - // NOTE: No KeyLifetimeSecs here, but we will not return this subkey in EncryptionKey() - // if the primary/master key has expired. - subKey := Subkey{ - PublicKey: &sub.PublicKey, - PrivateKey: sub, - Sig: &packet.Signature{ - Version: primary.PublicKey.Version, - CreationTime: creationTime, - SigType: packet.SigTypeSubkeyBinding, - PubKeyAlgo: primary.PublicKey.PubKeyAlgo, - Hash: config.Hash(), - FlagsValid: true, - FlagEncryptStorage: true, - FlagEncryptCommunications: true, - IssuerKeyId: &primary.PublicKey.KeyId, - }, - } - - // Subkey binding signature - err = subKey.Sig.SignKey(subKey.PublicKey, primary, config) - if err != nil { - return nil, err - } - - return &Entity{ - PrimaryKey: &primary.PublicKey, - PrivateKey: primary, - Identities: map[string]*Identity{ - uid.Id: &Identity{ - Name: uid.Id, - UserId: uid, - SelfSignature: selfSignature, - Signatures: []*packet.Signature{selfSignature}, - }, - }, - Subkeys: []Subkey{subKey}, - }, nil -} - -// AddSigningSubkey adds a signing keypair as a subkey to the Entity. -// If config is nil, sensible defaults will be used. -func (e *Entity) AddSigningSubkey(config *packet.Config) error { - creationTime := config.Now() - keyLifetimeSecs := config.KeyLifetime() - - subPrivRaw, err := newSigner(config) - if err != nil { - return err - } - sub := packet.NewSignerPrivateKey(creationTime, subPrivRaw) - - subkey := Subkey{ - PublicKey: &sub.PublicKey, - PrivateKey: sub, - Sig: &packet.Signature{ - Version: e.PrimaryKey.Version, - CreationTime: creationTime, - KeyLifetimeSecs: &keyLifetimeSecs, - SigType: packet.SigTypeSubkeyBinding, - PubKeyAlgo: e.PrimaryKey.PubKeyAlgo, - Hash: config.Hash(), - FlagsValid: true, - FlagSign: true, - IssuerKeyId: &e.PrimaryKey.KeyId, - EmbeddedSignature: &packet.Signature{ - Version: e.PrimaryKey.Version, - CreationTime: creationTime, - SigType: packet.SigTypePrimaryKeyBinding, - PubKeyAlgo: sub.PublicKey.PubKeyAlgo, - Hash: config.Hash(), - IssuerKeyId: &e.PrimaryKey.KeyId, - }, - }, - } - if config != nil && config.V5Keys { - subkey.PublicKey.UpgradeToV5() - } - - err = subkey.Sig.EmbeddedSignature.CrossSignKey(subkey.PublicKey, e.PrimaryKey, subkey.PrivateKey, config) - if err != nil { - return err - } - - subkey.PublicKey.IsSubkey = true - subkey.PrivateKey.IsSubkey = true - if err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config); err != nil { - return err - } - - e.Subkeys = append(e.Subkeys, subkey) - return nil -} - -// AddEncryptionSubkey adds an encryption keypair as a subkey to the Entity. -// If config is nil, sensible defaults will be used. -func (e *Entity) AddEncryptionSubkey(config *packet.Config) error { - creationTime := config.Now() - keyLifetimeSecs := config.KeyLifetime() - - subPrivRaw, err := newDecrypter(config) - if err != nil { - return err - } - sub := packet.NewDecrypterPrivateKey(creationTime, subPrivRaw) - - subkey := Subkey{ - PublicKey: &sub.PublicKey, - PrivateKey: sub, - Sig: &packet.Signature{ - Version: e.PrimaryKey.Version, - CreationTime: creationTime, - KeyLifetimeSecs: &keyLifetimeSecs, - SigType: packet.SigTypeSubkeyBinding, - PubKeyAlgo: e.PrimaryKey.PubKeyAlgo, - Hash: config.Hash(), - FlagsValid: true, - FlagEncryptStorage: true, - FlagEncryptCommunications: true, - IssuerKeyId: &e.PrimaryKey.KeyId, - }, - } - if config != nil && config.V5Keys { - subkey.PublicKey.UpgradeToV5() - } - - subkey.PublicKey.IsSubkey = true - subkey.PrivateKey.IsSubkey = true - if err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config); err != nil { - return err - } - - e.Subkeys = append(e.Subkeys, subkey) - return nil -} - -// Generates a signing key -func newSigner(config *packet.Config) (signer crypto.Signer, err error) { - switch config.PublicKeyAlgorithm() { - case packet.PubKeyAlgoRSA: - bits := config.RSAModulusBits() - if bits < 1024 { - return nil, errors.InvalidArgumentError("bits must be >= 1024") - } - if config != nil && len(config.RSAPrimes) >= 2 { - primes := config.RSAPrimes[0:2] - config.RSAPrimes = config.RSAPrimes[2:] - return generateRSAKeyWithPrimes(config.Random(), 2, bits, primes) - } - return rsa.GenerateKey(config.Random(), bits) - case packet.PubKeyAlgoEdDSA: - _, priv, err := ed25519.GenerateKey(config.Random()) - if err != nil { - return nil, err - } - return &priv, nil - default: - return nil, errors.InvalidArgumentError("unsupported public key algorithm") - } -} - -// Generates an encryption/decryption key -func newDecrypter(config *packet.Config) (decrypter interface{}, err error) { - switch config.PublicKeyAlgorithm() { - case packet.PubKeyAlgoRSA: - bits := config.RSAModulusBits() - if bits < 1024 { - return nil, errors.InvalidArgumentError("bits must be >= 1024") - } - if config != nil && len(config.RSAPrimes) >= 2 { - primes := config.RSAPrimes[0:2] - config.RSAPrimes = config.RSAPrimes[2:] - return generateRSAKeyWithPrimes(config.Random(), 2, bits, primes) - } - return rsa.GenerateKey(config.Random(), bits) - case packet.PubKeyAlgoEdDSA: - fallthrough // When passing EdDSA, we generate an ECDH subkey - case packet.PubKeyAlgoECDH: - var kdf = ecdh.KDF{ - Hash: algorithm.SHA512, - Cipher: algorithm.AES256, - } - return ecdh.X25519GenerateKey(config.Random(), kdf) - default: - return nil, errors.InvalidArgumentError("unsupported public key algorithm") - } -} - -var bigOne = big.NewInt(1) - -// generateRSAKeyWithPrimes generates a multi-prime RSA keypair of the -// given bit size, using the given random source and prepopulated primes. -func generateRSAKeyWithPrimes(random io.Reader, nprimes int, bits int, prepopulatedPrimes []*big.Int) (*rsa.PrivateKey, error) { - priv := new(rsa.PrivateKey) - priv.E = 65537 - - if nprimes < 2 { - return nil, goerrors.New("generateRSAKeyWithPrimes: nprimes must be >= 2") - } - - if bits < 1024 { - return nil, goerrors.New("generateRSAKeyWithPrimes: bits must be >= 1024") - } - - primes := make([]*big.Int, nprimes) - -NextSetOfPrimes: - for { - todo := bits - // crypto/rand should set the top two bits in each prime. - // Thus each prime has the form - // p_i = 2^bitlen(p_i) × 0.11... (in base 2). - // And the product is: - // P = 2^todo × α - // where α is the product of nprimes numbers of the form 0.11... - // - // If α < 1/2 (which can happen for nprimes > 2), we need to - // shift todo to compensate for lost bits: the mean value of 0.11... - // is 7/8, so todo + shift - nprimes * log2(7/8) ~= bits - 1/2 - // will give good results. - if nprimes >= 7 { - todo += (nprimes - 2) / 5 - } - for i := 0; i < nprimes; i++ { - var err error - if len(prepopulatedPrimes) == 0 { - primes[i], err = rand.Prime(random, todo/(nprimes-i)) - if err != nil { - return nil, err - } - } else { - primes[i] = prepopulatedPrimes[0] - prepopulatedPrimes = prepopulatedPrimes[1:] - } - - todo -= primes[i].BitLen() - } - - // Make sure that primes is pairwise unequal. - for i, prime := range primes { - for j := 0; j < i; j++ { - if prime.Cmp(primes[j]) == 0 { - continue NextSetOfPrimes - } - } - } - - n := new(big.Int).Set(bigOne) - totient := new(big.Int).Set(bigOne) - pminus1 := new(big.Int) - for _, prime := range primes { - n.Mul(n, prime) - pminus1.Sub(prime, bigOne) - totient.Mul(totient, pminus1) - } - if n.BitLen() != bits { - // This should never happen for nprimes == 2 because - // crypto/rand should set the top two bits in each prime. - // For nprimes > 2 we hope it does not happen often. - continue NextSetOfPrimes - } - - priv.D = new(big.Int) - e := big.NewInt(int64(priv.E)) - ok := priv.D.ModInverse(e, totient) - - if ok != nil { - priv.Primes = primes - priv.N = n - break - } - } - - priv.Precompute() - return priv, nil -} diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go deleted file mode 100644 index c3fedf7a250..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go +++ /dev/null @@ -1,707 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package openpgp - -import ( - goerrors "errors" - "io" - "time" - - "github.com/ProtonMail/go-crypto/openpgp/armor" - "github.com/ProtonMail/go-crypto/openpgp/errors" - "github.com/ProtonMail/go-crypto/openpgp/packet" -) - -// PublicKeyType is the armor type for a PGP public key. -var PublicKeyType = "PGP PUBLIC KEY BLOCK" - -// PrivateKeyType is the armor type for a PGP private key. -var PrivateKeyType = "PGP PRIVATE KEY BLOCK" - -// An Entity represents the components of an OpenPGP key: a primary public key -// (which must be a signing key), one or more identities claimed by that key, -// and zero or more subkeys, which may be encryption keys. -type Entity struct { - PrimaryKey *packet.PublicKey - PrivateKey *packet.PrivateKey - Identities map[string]*Identity // indexed by Identity.Name - Revocations []*packet.Signature - Subkeys []Subkey -} - -// An Identity represents an identity claimed by an Entity and zero or more -// assertions by other entities about that claim. -type Identity struct { - Name string // by convention, has the form "Full Name (comment) " - UserId *packet.UserId - SelfSignature *packet.Signature - Signatures []*packet.Signature -} - -// A Subkey is an additional public key in an Entity. Subkeys can be used for -// encryption. -type Subkey struct { - PublicKey *packet.PublicKey - PrivateKey *packet.PrivateKey - Sig *packet.Signature -} - -// A Key identifies a specific public key in an Entity. This is either the -// Entity's primary key or a subkey. -type Key struct { - Entity *Entity - PublicKey *packet.PublicKey - PrivateKey *packet.PrivateKey - SelfSignature *packet.Signature -} - -// A KeyRing provides access to public and private keys. -type KeyRing interface { - // KeysById returns the set of keys that have the given key id. - KeysById(id uint64) []Key - // KeysByIdAndUsage returns the set of keys with the given id - // that also meet the key usage given by requiredUsage. - // The requiredUsage is expressed as the bitwise-OR of - // packet.KeyFlag* values. - KeysByIdUsage(id uint64, requiredUsage byte) []Key - // DecryptionKeys returns all private keys that are valid for - // decryption. - DecryptionKeys() []Key -} - -// PrimaryIdentity returns the Identity marked as primary or the first identity -// if none are so marked. -func (e *Entity) PrimaryIdentity() *Identity { - var firstIdentity *Identity - for _, ident := range e.Identities { - if firstIdentity == nil { - firstIdentity = ident - } - if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { - return ident - } - } - return firstIdentity -} - -// EncryptionKey returns the best candidate Key for encrypting a message to the -// given Entity. -func (e *Entity) EncryptionKey(now time.Time) (Key, bool) { - // Fail to find any encryption key if the primary key has expired. - i := e.PrimaryIdentity() - primaryKeyExpired := e.PrimaryKey.KeyExpired(i.SelfSignature, now) - if primaryKeyExpired { - return Key{}, false - } - - // Iterate the keys to find the newest, unexpired one - candidateSubkey := -1 - var maxTime time.Time - for i, subkey := range e.Subkeys { - if subkey.Sig.FlagsValid && - subkey.Sig.FlagEncryptCommunications && - subkey.PublicKey.PubKeyAlgo.CanEncrypt() && - !subkey.PublicKey.KeyExpired(subkey.Sig, now) && - (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) { - candidateSubkey = i - maxTime = subkey.Sig.CreationTime - } - } - - if candidateSubkey != -1 { - subkey := e.Subkeys[candidateSubkey] - return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true - } - - // If we don't have any candidate subkeys for encryption and - // the primary key doesn't have any usage metadata then we - // assume that the primary key is ok. Or, if the primary key is - // marked as ok to encrypt with, then we can obviously use it. - // Also, check expiry again just to be safe. - if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagEncryptCommunications && - e.PrimaryKey.PubKeyAlgo.CanEncrypt() && !primaryKeyExpired { - return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true - } - - return Key{}, false -} - -// SigningKey return the best candidate Key for signing a message with this -// Entity. -func (e *Entity) SigningKey(now time.Time) (Key, bool) { - return e.SigningKeyById(now, 0) -} - -// SigningKeyById return the Key for signing a message with this -// Entity and keyID. -func (e *Entity) SigningKeyById(now time.Time, id uint64) (Key, bool) { - // Fail to find any signing key if the primary key has expired. - i := e.PrimaryIdentity() - primaryKeyExpired := e.PrimaryKey.KeyExpired(i.SelfSignature, now) - if primaryKeyExpired { - return Key{}, false - } - - // Iterate the keys to find the newest, unexpired one - candidateSubkey := -1 - var maxTime time.Time - for idx, subkey := range e.Subkeys { - if subkey.Sig.FlagsValid && - subkey.Sig.FlagSign && - subkey.PublicKey.PubKeyAlgo.CanSign() && - !subkey.PublicKey.KeyExpired(subkey.Sig, now) && - (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) && - (id == 0 || subkey.PrivateKey.KeyId == id) { - candidateSubkey = idx - maxTime = subkey.Sig.CreationTime - } - } - - if candidateSubkey != -1 { - subkey := e.Subkeys[candidateSubkey] - return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true - } - - // If we have no candidate subkey then we assume that it's ok to sign - // with the primary key. Or, if the primary key is marked as ok to - // sign with, then we can use it. Also, check expiry again just to be safe. - if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagSign && - e.PrimaryKey.PubKeyAlgo.CanSign() && !primaryKeyExpired && - (id == 0 || e.PrivateKey.KeyId == id) { - return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true - } - - // No keys with a valid Signing Flag or no keys matched the id passed in - return Key{}, false -} - -// An EntityList contains one or more Entities. -type EntityList []*Entity - -// KeysById returns the set of keys that have the given key id. -func (el EntityList) KeysById(id uint64) (keys []Key) { - for _, e := range el { - if e.PrimaryKey.KeyId == id { - var selfSig *packet.Signature - for _, ident := range e.Identities { - if selfSig == nil { - selfSig = ident.SelfSignature - } else if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { - selfSig = ident.SelfSignature - break - } - } - keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig}) - } - - for _, subKey := range e.Subkeys { - if subKey.PublicKey.KeyId == id { - keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig}) - } - } - } - return -} - -// KeysByIdAndUsage returns the set of keys with the given id that also meet -// the key usage given by requiredUsage. The requiredUsage is expressed as -// the bitwise-OR of packet.KeyFlag* values. -func (el EntityList) KeysByIdUsage(id uint64, requiredUsage byte) (keys []Key) { - for _, key := range el.KeysById(id) { - if len(key.Entity.Revocations) > 0 { - continue - } - - if key.SelfSignature.RevocationReason != nil { - continue - } - - if key.SelfSignature.FlagsValid && requiredUsage != 0 { - var usage byte - if key.SelfSignature.FlagCertify { - usage |= packet.KeyFlagCertify - } - if key.SelfSignature.FlagSign { - usage |= packet.KeyFlagSign - } - if key.SelfSignature.FlagEncryptCommunications { - usage |= packet.KeyFlagEncryptCommunications - } - if key.SelfSignature.FlagEncryptStorage { - usage |= packet.KeyFlagEncryptStorage - } - if usage&requiredUsage != requiredUsage { - continue - } - } - - keys = append(keys, key) - } - return -} - -// DecryptionKeys returns all private keys that are valid for decryption. -func (el EntityList) DecryptionKeys() (keys []Key) { - for _, e := range el { - for _, subKey := range e.Subkeys { - if subKey.PrivateKey != nil && (!subKey.Sig.FlagsValid || subKey.Sig.FlagEncryptStorage || subKey.Sig.FlagEncryptCommunications) { - keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig}) - } - } - } - return -} - -// ReadArmoredKeyRing reads one or more public/private keys from an armor keyring file. -func ReadArmoredKeyRing(r io.Reader) (EntityList, error) { - block, err := armor.Decode(r) - if err == io.EOF { - return nil, errors.InvalidArgumentError("no armored data found") - } - if err != nil { - return nil, err - } - if block.Type != PublicKeyType && block.Type != PrivateKeyType { - return nil, errors.InvalidArgumentError("expected public or private key block, got: " + block.Type) - } - - return ReadKeyRing(block.Body) -} - -// ReadKeyRing reads one or more public/private keys. Unsupported keys are -// ignored as long as at least a single valid key is found. -func ReadKeyRing(r io.Reader) (el EntityList, err error) { - packets := packet.NewReader(r) - var lastUnsupportedError error - - for { - var e *Entity - e, err = ReadEntity(packets) - if err != nil { - // TODO: warn about skipped unsupported/unreadable keys - if _, ok := err.(errors.UnsupportedError); ok { - lastUnsupportedError = err - err = readToNextPublicKey(packets) - } else if _, ok := err.(errors.StructuralError); ok { - // Skip unreadable, badly-formatted keys - lastUnsupportedError = err - err = readToNextPublicKey(packets) - } - if err == io.EOF { - err = nil - break - } - if err != nil { - el = nil - break - } - } else { - el = append(el, e) - } - } - - if len(el) == 0 && err == nil { - err = lastUnsupportedError - } - return -} - -// readToNextPublicKey reads packets until the start of the entity and leaves -// the first packet of the new entity in the Reader. -func readToNextPublicKey(packets *packet.Reader) (err error) { - var p packet.Packet - for { - p, err = packets.Next() - if err == io.EOF { - return - } else if err != nil { - if _, ok := err.(errors.UnsupportedError); ok { - err = nil - continue - } - return - } - - if pk, ok := p.(*packet.PublicKey); ok && !pk.IsSubkey { - packets.Unread(p) - return - } - } -} - -// ReadEntity reads an entity (public key, identities, subkeys etc) from the -// given Reader. -func ReadEntity(packets *packet.Reader) (*Entity, error) { - e := new(Entity) - e.Identities = make(map[string]*Identity) - - p, err := packets.Next() - if err != nil { - return nil, err - } - - var ok bool - if e.PrimaryKey, ok = p.(*packet.PublicKey); !ok { - if e.PrivateKey, ok = p.(*packet.PrivateKey); !ok { - packets.Unread(p) - return nil, errors.StructuralError("first packet was not a public/private key") - } - e.PrimaryKey = &e.PrivateKey.PublicKey - } - - if !e.PrimaryKey.PubKeyAlgo.CanSign() { - return nil, errors.StructuralError("primary key cannot be used for signatures") - } - - var revocations []*packet.Signature -EachPacket: - for { - p, err := packets.Next() - if err == io.EOF { - break - } else if err != nil { - return nil, err - } - - switch pkt := p.(type) { - case *packet.UserId: - if err := addUserID(e, packets, pkt); err != nil { - return nil, err - } - case *packet.Signature: - if pkt.SigType == packet.SigTypeKeyRevocation { - revocations = append(revocations, pkt) - } else if pkt.SigType == packet.SigTypeDirectSignature { - // TODO: RFC4880 5.2.1 permits signatures - // directly on keys (eg. to bind additional - // revocation keys). - } - // Else, ignoring the signature as it does not follow anything - // we would know to attach it to. - case *packet.PrivateKey: - if pkt.IsSubkey == false { - packets.Unread(p) - break EachPacket - } - err = addSubkey(e, packets, &pkt.PublicKey, pkt) - if err != nil { - return nil, err - } - case *packet.PublicKey: - if pkt.IsSubkey == false { - packets.Unread(p) - break EachPacket - } - err = addSubkey(e, packets, pkt, nil) - if err != nil { - return nil, err - } - default: - // we ignore unknown packets - } - } - - if len(e.Identities) == 0 { - return nil, errors.StructuralError("entity without any identities") - } - - for _, revocation := range revocations { - err = e.PrimaryKey.VerifyRevocationSignature(revocation) - if err == nil { - e.Revocations = append(e.Revocations, revocation) - } else { - // TODO: RFC 4880 5.2.3.15 defines revocation keys. - return nil, errors.StructuralError("revocation signature signed by alternate key") - } - } - - return e, nil -} - -func addUserID(e *Entity, packets *packet.Reader, pkt *packet.UserId) error { - // Make a new Identity object, that we might wind up throwing away. - // We'll only add it if we get a valid self-signature over this - // userID. - identity := new(Identity) - identity.Name = pkt.Id - identity.UserId = pkt - - for { - p, err := packets.Next() - if err == io.EOF { - break - } else if err != nil { - return err - } - - sig, ok := p.(*packet.Signature) - if !ok { - packets.Unread(p) - break - } - - if (sig.SigType == packet.SigTypePositiveCert || sig.SigType == packet.SigTypeGenericCert) && sig.CheckKeyIdOrFingerprint(e.PrimaryKey) { - if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, e.PrimaryKey, sig); err != nil { - return errors.StructuralError("user ID self-signature invalid: " + err.Error()) - } - if identity.SelfSignature == nil || sig.CreationTime.After(identity.SelfSignature.CreationTime) { - identity.SelfSignature = sig - } - identity.Signatures = append(identity.Signatures, sig) - e.Identities[pkt.Id] = identity - } else { - identity.Signatures = append(identity.Signatures, sig) - } - } - - return nil -} - -func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *packet.PrivateKey) error { - var subKey Subkey - subKey.PublicKey = pub - subKey.PrivateKey = priv - - for { - p, err := packets.Next() - if err == io.EOF { - break - } else if err != nil { - return errors.StructuralError("subkey signature invalid: " + err.Error()) - } - - sig, ok := p.(*packet.Signature) - if !ok { - packets.Unread(p) - break - } - - if sig.SigType != packet.SigTypeSubkeyBinding && sig.SigType != packet.SigTypeSubkeyRevocation { - return errors.StructuralError("subkey signature with wrong type") - } - - if err := e.PrimaryKey.VerifyKeySignature(subKey.PublicKey, sig); err != nil { - return errors.StructuralError("subkey signature invalid: " + err.Error()) - } - - switch sig.SigType { - case packet.SigTypeSubkeyRevocation: - subKey.Sig = sig - case packet.SigTypeSubkeyBinding: - if shouldReplaceSubkeySig(subKey.Sig, sig) { - subKey.Sig = sig - } - } - } - - if subKey.Sig == nil { - return errors.StructuralError("subkey packet not followed by signature") - } - - e.Subkeys = append(e.Subkeys, subKey) - - return nil -} - -func shouldReplaceSubkeySig(existingSig, potentialNewSig *packet.Signature) bool { - if potentialNewSig == nil { - return false - } - - if existingSig == nil { - return true - } - - if existingSig.SigType == packet.SigTypeSubkeyRevocation { - return false // never override a revocation signature - } - - return potentialNewSig.CreationTime.After(existingSig.CreationTime) -} - -// SerializePrivate serializes an Entity, including private key material, but -// excluding signatures from other entities, to the given Writer. -// Identities and subkeys are re-signed in case they changed since NewEntry. -// If config is nil, sensible defaults will be used. -func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error) { - if e.PrivateKey.Dummy() { - return errors.ErrDummyPrivateKey("dummy private key cannot re-sign identities") - } - return e.serializePrivate(w, config, true) -} - -// SerializePrivateWithoutSigning serializes an Entity, including private key -// material, but excluding signatures from other entities, to the given Writer. -// Self-signatures of identities and subkeys are not re-signed. This is useful -// when serializing GNU dummy keys, among other things. -// If config is nil, sensible defaults will be used. -func (e *Entity) SerializePrivateWithoutSigning(w io.Writer, config *packet.Config) (err error) { - return e.serializePrivate(w, config, false) -} - -func (e *Entity) serializePrivate(w io.Writer, config *packet.Config, reSign bool) (err error) { - if e.PrivateKey == nil { - return goerrors.New("openpgp: private key is missing") - } - err = e.PrivateKey.Serialize(w) - if err != nil { - return - } - for _, ident := range e.Identities { - err = ident.UserId.Serialize(w) - if err != nil { - return - } - if reSign { - err = ident.SelfSignature.SignUserId(ident.UserId.Id, e.PrimaryKey, e.PrivateKey, config) - if err != nil { - return - } - } - err = ident.SelfSignature.Serialize(w) - if err != nil { - return - } - } - for _, subkey := range e.Subkeys { - err = subkey.PrivateKey.Serialize(w) - if err != nil { - return - } - if reSign { - err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config) - if err != nil { - return - } - if subkey.Sig.EmbeddedSignature != nil { - err = subkey.Sig.EmbeddedSignature.CrossSignKey(subkey.PublicKey, e.PrimaryKey, - subkey.PrivateKey, config) - if err != nil { - return - } - } - } - err = subkey.Sig.Serialize(w) - if err != nil { - return - } - } - return nil -} - -// Serialize writes the public part of the given Entity to w, including -// signatures from other entities. No private key material will be output. -func (e *Entity) Serialize(w io.Writer) error { - err := e.PrimaryKey.Serialize(w) - if err != nil { - return err - } - for _, ident := range e.Identities { - err = ident.UserId.Serialize(w) - if err != nil { - return err - } - for _, sig := range ident.Signatures { - err = sig.Serialize(w) - if err != nil { - return err - } - } - } - for _, subkey := range e.Subkeys { - err = subkey.PublicKey.Serialize(w) - if err != nil { - return err - } - err = subkey.Sig.Serialize(w) - if err != nil { - return err - } - } - return nil -} - -// SignIdentity adds a signature to e, from signer, attesting that identity is -// associated with e. The provided identity must already be an element of -// e.Identities and the private key of signer must have been decrypted if -// necessary. -// If config is nil, sensible defaults will be used. -func (e *Entity) SignIdentity(identity string, signer *Entity, config *packet.Config) error { - if signer.PrivateKey == nil { - return errors.InvalidArgumentError("signing Entity must have a private key") - } - if signer.PrivateKey.Encrypted { - return errors.InvalidArgumentError("signing Entity's private key must be decrypted") - } - ident, ok := e.Identities[identity] - if !ok { - return errors.InvalidArgumentError("given identity string not found in Entity") - } - - sig := &packet.Signature{ - Version: signer.PrivateKey.Version, - SigType: packet.SigTypeGenericCert, - PubKeyAlgo: signer.PrivateKey.PubKeyAlgo, - Hash: config.Hash(), - CreationTime: config.Now(), - IssuerKeyId: &signer.PrivateKey.KeyId, - } - if err := sig.SignUserId(identity, e.PrimaryKey, signer.PrivateKey, config); err != nil { - return err - } - ident.Signatures = append(ident.Signatures, sig) - return nil -} - -// RevokeKey generates a key revocation signature (packet.SigTypeKeyRevocation) with the -// specified reason code and text (RFC4880 section-5.2.3.23). -// If config is nil, sensible defaults will be used. -func (e *Entity) RevokeKey(reason packet.ReasonForRevocation, reasonText string, config *packet.Config) error { - reasonCode := uint8(reason) - revSig := &packet.Signature{ - Version: e.PrimaryKey.Version, - CreationTime: config.Now(), - SigType: packet.SigTypeKeyRevocation, - PubKeyAlgo: packet.PubKeyAlgoRSA, - Hash: config.Hash(), - RevocationReason: &reasonCode, - RevocationReasonText: reasonText, - IssuerKeyId: &e.PrimaryKey.KeyId, - } - - if err := revSig.RevokeKey(e.PrimaryKey, e.PrivateKey, config); err != nil { - return err - } - e.Revocations = append(e.Revocations, revSig) - return nil -} - -// RevokeSubkey generates a subkey revocation signature (packet.SigTypeSubkeyRevocation) for -// a subkey with the specified reason code and text (RFC4880 section-5.2.3.23). -// If config is nil, sensible defaults will be used. -func (e *Entity) RevokeSubkey(sk *Subkey, reason packet.ReasonForRevocation, reasonText string, config *packet.Config) error { - if err := e.PrimaryKey.VerifyKeySignature(sk.PublicKey, sk.Sig); err != nil { - return errors.InvalidArgumentError("given subkey is not associated with this key") - } - - reasonCode := uint8(reason) - revSig := &packet.Signature{ - Version: e.PrimaryKey.Version, - CreationTime: config.Now(), - SigType: packet.SigTypeSubkeyRevocation, - PubKeyAlgo: packet.PubKeyAlgoRSA, - Hash: config.Hash(), - RevocationReason: &reasonCode, - RevocationReasonText: reasonText, - IssuerKeyId: &e.PrimaryKey.KeyId, - } - - if err := revSig.RevokeKey(sk.PublicKey, e.PrivateKey, config); err != nil { - return err - } - - sk.Sig = revSig - return nil -} diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go deleted file mode 100644 index 21d17c2f8be..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go +++ /dev/null @@ -1,336 +0,0 @@ -package openpgp - -const expiringKeyHex = "c6c04d0451d0c680010800abbb021fd03ffc4e96618901180c3fdcb060ee69eeead97b91256d11420d80b5f1b51930248044130bd300605cf8a05b7a40d3d8cfb0a910be2e3db50dcd50a9c54064c2a5550801daa834ff4480b33d3d3ca495ff8a4e84a886977d17d998f881241a874083d8b995beab555b6d22b8a4817ab17ac3e7304f7d4d2c05c495fb2218348d3bc13651db1d92732e368a9dd7dcefa6eddff30b94706a9aaee47e9d39321460b740c59c6fc3c2fd8ab6c0fb868cb87c0051f0321301fe0f0e1820b15e7fb7063395769b525005c7e30a7ce85984f5cac00504e7b4fdc45d74958de8388436fd5c7ba9ea121f1c851b5911dd1b47a14d81a09e92ef37721e2325b6790011010001cd00c2c07b041001080025050251d0c680050900278d00060b09070803020415080a0203160201021901021b03021e01000a0910e7b484133a890a35ae4b0800a1beb82e7f28eaf5273d6af9d3391314f6280b2b624eaca2851f89a9ebcaf80ac589ebd509f168bc4322106ca2e2ce77a76e071a3c7444787d65216b5f05e82c77928860b92aace3b7d0327db59492f422eb9dfab7249266d37429870b091a98aba8724c2259ebf8f85093f21255eafa75aa841e31d94f2ac891b9755fed455e539044ee69fc47950b80e003fc9f298d695660f28329eaa38037c367efde1727458e514faf990d439a21461b719edaddf9296d3d0647b43ca56cb8dbf63b4fcf8b9968e7928c463470fab3b98e44d0d95645062f94b2d04fe56bd52822b71934db8ce845622c40b92fcbe765a142e7f38b61a6aa9606c8e8858dcd3b6eb1894acec04d0451d1f06b01080088bea67444e1789390e7c0335c86775502d58ec783d99c8ef4e06de235ed3dd4b0467f6f358d818c7d8989d43ec6d69fcbc8c32632d5a1b605e3fa8e41d695fcdcaa535936cd0157f9040dce362519803b908eafe838bb13216c885c6f93e9e8d5745607f0d062322085d6bdc760969149a8ff8dd9f5c18d9bfe2e6f63a06e17694cf1f67587c6fb70e9aebf90ffc528ca3b615ac7c9d4a21ea4f7c06f2e98fbbd90a859b8608bf9ea638e3a54289ce44c283110d0c45fa458de6251cd6e7baf71f80f12c8978340490fd90c92b81736ae902ed958e478dceae2835953d189c45d182aff02ea2be61b81d8e94430f041d638647b43e2fcb45fd512fbf5068b810011010001c2c06504180108000f050251d1f06b050900081095021b0c000a0910e7b484133a890a35e63407fe2ec88d6d1e6c9ce7553ece0cb2524747217bad29f251d33df84599ffcc900141a355abd62126800744068a5e05dc167056aa9205273dc7765a2ed49db15c2a83b8d6e6429c902136f1e12229086c1c10c0053242c2a4ae1930db58163387a48cad64607ff2153c320e42843dec28e3fce90e7399d63ac0affa2fee1f0adc0953c89eb3f46ef1d6c04328ed13b491669d5120a3782e3ffb7c69575fb77eebd108794f4dda9d34be2bae57e8e59ec8ebfda2f6f06104b2321be408ea146e2db482b00c5055c8618de36ac9716f80da2617e225556d0fce61b01c8cea2d1e0ea982c31711060ca370f2739366e1e708f38405d784b49d16a26cf62d152eae734327cec04d0451d1f07b010800d5af91c5e7c2fd8951c8d254eab0c97cdcb66822f868b79b78c366255059a68fd74ebca9adb9b970cd9e586690e6e0756705432306878c897b10a4b4ca0005966f99ac8fa4e6f9caf54bf8e53844544beee9872a7ac64c119cf1393d96e674254b661f61ee975633d0e8a8672531edb6bb8e211204e7754a9efa802342118eee850beea742bac95a3f706cc2024cf6037a308bb68162b2f53b9a6346a96e6d31871a2456186e24a1c7a82b82ac04afdfd57cd7fb9ba77a9c760d40b76a170f7be525e5fb6a9848cc726e806187710d9b190387df28700f321f988a392899f93815cc937f309129eb94d5299c5547cb2c085898e6639496e70d746c9d3fb9881d0011010001c2c06504180108000f050251d1f07b050900266305021b0c000a0910e7b484133a890a35bff207fd10dfe8c4a6ea1dd30568012b6fd6891a763c87ad0f7a1d112aad9e8e3239378a3b85588c235865bac2e614348cb4f216d7217f53b3ef48c192e0a4d31d64d7bfa5faccf21155965fa156e887056db644a05ad08a85cc6152d1377d9e37b46f4ff462bbe68ace2dc586ef90070314576c985d8037c2ba63f0a7dc17a62e15bd77e88bc61d9d00858979709f12304264a4cf4225c5cf86f12c8e19486cb9cdcc69f18f027e5f16f4ca8b50e28b3115eaff3a345acd21f624aef81f6ede515c1b55b26b84c1e32264754eab672d5489b287e7277ea855e0a5ff2aa9e8b8c76d579a964ec225255f4d57bf66639ccb34b64798846943e162a41096a7002ca21c7f56" -const subkeyUsageHex = "988d04533a52bc010400d26af43085558f65b9e7dbc90cb9238015259aed5e954637adcfa2181548b2d0b60c65f1f42ec5081cbf1bc0a8aa4900acfb77070837c58f26012fbce297d70afe96e759ad63531f0037538e70dbf8e384569b9720d99d8eb39d8d0a2947233ed242436cb6ac7dfe74123354b3d0119b5c235d3dd9c9d6c004f8ffaf67ad8583001101000188b7041f010200210502533b8552170c8001ce094aa433f7040bb2ddf0be3893cb843d0fe70c020700000a0910a42704b92866382aa98404009d63d916a27543da4221c60087c33f1c44bec9998c5438018ed370cca4962876c748e94b73eb39c58eb698063f3fd6346d58dd2a11c0247934c4a9d71f24754f7468f96fb24c3e791dd2392b62f626148ad724189498cbf993db2df7c0cdc2d677c35da0f16cb16c9ce7c33b4de65a4a91b1d21a130ae9cc26067718910ef8e2b417556d627261203c756d627261407379642e65642e61753e88b80413010200220502533a52bc021b03060b090807030206150802090a0b0416020301021e01021780000a0910a42704b92866382a47840400c0c2bd04f5fca586de408b395b3c280a278259c93eaaa8b79a53b97003f8ed502a8a00446dd9947fb462677e4fcac0dac2f0701847d15130aadb6cd9e0705ea0cf5f92f129136c7be21a718d46c8e641eb7f044f2adae573e11ae423a0a9ca51324f03a8a2f34b91fa40c3cc764bee4dccadedb54c768ba0469b683ea53f1c29b88d04533a52bc01040099c92a5d6f8b744224da27bc2369127c35269b58bec179de6bbc038f749344222f85a31933224f26b70243c4e4b2d242f0c4777eaef7b5502f9dad6d8bf3aaeb471210674b74de2d7078af497d55f5cdad97c7bedfbc1b41e8065a97c9c3d344b21fc81d27723af8e374bc595da26ea242dccb6ae497be26eea57e563ed517e90011010001889f0418010200090502533a52bc021b0c000a0910a42704b92866382afa1403ff70284c2de8a043ff51d8d29772602fa98009b7861c540535f874f2c230af8caf5638151a636b21f8255003997ccd29747fdd06777bb24f9593bd7d98a3e887689bf902f999915fcc94625ae487e5d13e6616f89090ebc4fdc7eb5cad8943e4056995bb61c6af37f8043016876a958ec7ebf39c43d20d53b7f546cfa83e8d2604b88d04533b8283010400c0b529316dbdf58b4c54461e7e669dc11c09eb7f73819f178ccd4177b9182b91d138605fcf1e463262fabefa73f94a52b5e15d1904635541c7ea540f07050ce0fb51b73e6f88644cec86e91107c957a114f69554548a85295d2b70bd0b203992f76eb5d493d86d9eabcaa7ef3fc7db7e458438db3fcdb0ca1cc97c638439a9170011010001889f0418010200090502533b8283021b0c000a0910a42704b92866382adc6d0400cfff6258485a21675adb7a811c3e19ebca18851533f75a7ba317950b9997fda8d1a4c8c76505c08c04b6c2cc31dc704d33da36a21273f2b388a1a706f7c3378b66d887197a525936ed9a69acb57fe7f718133da85ec742001c5d1864e9c6c8ea1b94f1c3759cebfd93b18606066c063a63be86085b7e37bdbc65f9a915bf084bb901a204533b85cd110400aed3d2c52af2b38b5b67904b0ef73d6dd7aef86adb770e2b153cd22489654dcc91730892087bb9856ae2d9f7ed1eb48f214243fe86bfe87b349ebd7c30e630e49c07b21fdabf78b7a95c8b7f969e97e3d33f2e074c63552ba64a2ded7badc05ce0ea2be6d53485f6900c7860c7aa76560376ce963d7271b9b54638a4028b573f00a0d8854bfcdb04986141568046202192263b9b67350400aaa1049dbc7943141ef590a70dcb028d730371d92ea4863de715f7f0f16d168bd3dc266c2450457d46dcbbf0b071547e5fbee7700a820c3750b236335d8d5848adb3c0da010e998908dfd93d961480084f3aea20b247034f8988eccb5546efaa35a92d0451df3aaf1aee5aa36a4c4d462c760ecd9cebcabfbe1412b1f21450f203fd126687cd486496e971a87fd9e1a8a765fe654baa219a6871ab97768596ab05c26c1aeea8f1a2c72395a58dbc12ef9640d2b95784e974a4d2d5a9b17c25fedacfe551bda52602de8f6d2e48443f5dd1a2a2a8e6a5e70ecdb88cd6e766ad9745c7ee91d78cc55c3d06536b49c3fee6c3d0b6ff0fb2bf13a314f57c953b8f4d93bf88e70418010200090502533b85cd021b0200520910a42704b92866382a47200419110200060502533b85cd000a091042ce2c64bc0ba99214b2009e26b26852c8b13b10c35768e40e78fbbb48bd084100a0c79d9ea0844fa5853dd3c85ff3ecae6f2c9dd6c557aa04008bbbc964cd65b9b8299d4ebf31f41cc7264b8cf33a00e82c5af022331fac79efc9563a822497ba012953cefe2629f1242fcdcb911dbb2315985bab060bfd58261ace3c654bdbbe2e8ed27a46e836490145c86dc7bae15c011f7e1ffc33730109b9338cd9f483e7cef3d2f396aab5bd80efb6646d7e778270ee99d934d187dd98" -const revokedKeyHex = "988d045331ce82010400c4fdf7b40a5477f206e6ee278eaef888ca73bf9128a9eef9f2f1ddb8b7b71a4c07cfa241f028a04edb405e4d916c61d6beabc333813dc7b484d2b3c52ee233c6a79b1eea4e9cc51596ba9cd5ac5aeb9df62d86ea051055b79d03f8a4fa9f38386f5bd17529138f3325d46801514ea9047977e0829ed728e68636802796801be10011010001889f04200102000905025331d0e3021d03000a0910a401d9f09a34f7c042aa040086631196405b7e6af71026b88e98012eab44aa9849f6ef3fa930c7c9f23deaedba9db1538830f8652fb7648ec3fcade8dbcbf9eaf428e83c6cbcc272201bfe2fbb90d41963397a7c0637a1a9d9448ce695d9790db2dc95433ad7be19eb3de72dacf1d6db82c3644c13eae2a3d072b99bb341debba012c5ce4006a7d34a1f4b94b444526567205265766f6b657220283c52656727732022424d204261726973746122204b657920262530305c303e5c29203c72656740626d626172697374612e636f2e61753e88b704130102002205025331ce82021b03060b090807030206150802090a0b0416020301021e01021780000a0910a401d9f09a34f7c0019c03f75edfbeb6a73e7225ad3cc52724e2872e04260d7daf0d693c170d8c4b243b8767bc7785763533febc62ec2600c30603c433c095453ede59ff2fcabeb84ce32e0ed9d5cf15ffcbc816202b64370d4d77c1e9077d74e94a16fb4fa2e5bec23a56d7a73cf275f91691ae1801a976fcde09e981a2f6327ac27ea1fecf3185df0d56889c04100102000605025331cfb5000a0910fe9645554e8266b64b4303fc084075396674fb6f778d302ac07cef6bc0b5d07b66b2004c44aef711cbac79617ef06d836b4957522d8772dd94bf41a2f4ac8b1ee6d70c57503f837445a74765a076d07b829b8111fc2a918423ddb817ead7ca2a613ef0bfb9c6b3562aec6c3cf3c75ef3031d81d95f6563e4cdcc9960bcb386c5d757b104fcca5fe11fc709df884604101102000605025331cfe7000a09107b15a67f0b3ddc0317f6009e360beea58f29c1d963a22b962b80788c3fa6c84e009d148cfde6b351469b8eae91187eff07ad9d08fcaab88d045331ce820104009f25e20a42b904f3fa555530fe5c46737cf7bd076c35a2a0d22b11f7e0b61a69320b768f4a80fe13980ce380d1cfc4a0cd8fbe2d2e2ef85416668b77208baa65bf973fe8e500e78cc310d7c8705cdb34328bf80e24f0385fce5845c33bc7943cf6b11b02348a23da0bf6428e57c05135f2dc6bd7c1ce325d666d5a5fd2fd5e410011010001889f04180102000905025331ce82021b0c000a0910a401d9f09a34f7c0418003fe34feafcbeaef348a800a0d908a7a6809cc7304017d820f70f0474d5e23cb17e38b67dc6dca282c6ca00961f4ec9edf2738d0f087b1d81e4871ef08e1798010863afb4eac4c44a376cb343be929c5be66a78cfd4456ae9ec6a99d97f4e1c3ff3583351db2147a65c0acef5c003fb544ab3a2e2dc4d43646f58b811a6c3a369d1f" -const revokedSubkeyHex = "988d04533121f6010400aefc803a3e4bb1a61c86e8a86d2726c6a43e0079e9f2713f1fa017e9854c83877f4aced8e331d675c67ea83ddab80aacbfa0b9040bb12d96f5a3d6be09455e2a76546cbd21677537db941cab710216b6d24ec277ee0bd65b910f416737ed120f6b93a9d3b306245c8cfd8394606fdb462e5cf43c551438d2864506c63367fc890011010001b41d416c696365203c616c69636540626d626172697374612e636f2e61753e88bb041301020025021b03060b090807030206150802090a0b0416020301021e01021780050253312798021901000a09104ef7e4beccde97f015a803ff5448437780f63263b0df8442a995e7f76c221351a51edd06f2063d8166cf3157aada4923dfc44aa0f2a6a4da5cf83b7fe722ba8ab416c976e77c6b5682e7f1069026673bd0de56ba06fd5d7a9f177607f277d9b55ff940a638c3e68525c67517e2b3d976899b93ca267f705b3e5efad7d61220e96b618a4497eab8d04403d23f8846041011020006050253312910000a09107b15a67f0b3ddc03d96e009f50b6365d86c4be5d5e9d0ea42d5e56f5794c617700a0ab274e19c2827780016d23417ce89e0a2c0d987d889c04100102000605025331cf7a000a0910a401d9f09a34f7c0ee970400aca292f213041c9f3b3fc49148cbda9d84afee6183c8dd6c5ff2600b29482db5fecd4303797be1ee6d544a20a858080fec43412061c9a71fae4039fd58013b4ae341273e6c66ad4c7cdd9e68245bedb260562e7b166f2461a1032f2b38c0e0e5715fb3d1656979e052b55ca827a76f872b78a9fdae64bc298170bfcebedc1271b41a416c696365203c616c696365407379646973702e6f722e61753e88b804130102002205025331278b021b03060b090807030206150802090a0b0416020301021e01021780000a09104ef7e4beccde97f06a7003fa03c3af68d272ebc1fa08aa72a03b02189c26496a2833d90450801c4e42c5b5f51ad96ce2d2c9cef4b7c02a6a2fcf1412d6a2d486098eb762f5010a201819c17fd2888aec8eda20c65a3b75744de7ee5cc8ac7bfc470cbe3cb982720405a27a3c6a8c229cfe36905f881b02ed5680f6a8f05866efb9d6c5844897e631deb949ca8846041011020006050253312910000a09107b15a67f0b3ddc0347bc009f7fa35db59147469eb6f2c5aaf6428accb138b22800a0caa2f5f0874bacc5909c652a57a31beda65eddd5889c04100102000605025331cf7a000a0910a401d9f09a34f7c0316403ff46f2a5c101256627f16384d34a38fb47a6c88ba60506843e532d91614339fccae5f884a5741e7582ffaf292ba38ee10a270a05f139bde3814b6a077e8cd2db0f105ebea2a83af70d385f13b507fac2ad93ff79d84950328bb86f3074745a8b7f9b64990fb142e2a12976e27e8d09a28dc5621f957ac49091116da410ac3cbde1b88d04533121f6010400cbd785b56905e4192e2fb62a720727d43c4fa487821203cf72138b884b78b701093243e1d8c92a0248a6c0203a5a88693da34af357499abacaf4b3309c640797d03093870a323b4b6f37865f6eaa2838148a67df4735d43a90ca87942554cdf1c4a751b1e75f9fd4ce4e97e278d6c1c7ed59d33441df7d084f3f02beb68896c70011010001889f0418010200090502533121f6021b0c000a09104ef7e4beccde97f0b98b03fc0a5ccf6a372995835a2f5da33b282a7d612c0ab2a97f59cf9fff73e9110981aac2858c41399afa29624a7fd8a0add11654e3d882c0fd199e161bdad65e5e2548f7b68a437ea64293db1246e3011cbb94dc1bcdeaf0f2539bd88ff16d95547144d97cead6a8c5927660a91e6db0d16eb36b7b49a3525b54d1644e65599b032b7eb901a204533127a0110400bd3edaa09eff9809c4edc2c2a0ebe52e53c50a19c1e49ab78e6167bf61473bb08f2050d78a5cbbc6ed66aff7b42cd503f16b4a0b99fa1609681fca9b7ce2bbb1a5b3864d6cdda4d7ef7849d156d534dea30fb0efb9e4cf8959a2b2ce623905882d5430b995a15c3b9fe92906086788b891002924f94abe139b42cbbfaaabe42f00a0b65dc1a1ad27d798adbcb5b5ad02d2688c89477b03ff4eebb6f7b15a73b96a96bed201c0e5e4ea27e4c6e2dd1005b94d4b90137a5b1cf5e01c6226c070c4cc999938101578877ee76d296b9aab8246d57049caacf489e80a3f40589cade790a020b1ac146d6f7a6241184b8c7fcde680eae3188f5dcbe846d7f7bdad34f6fcfca08413e19c1d5df83fc7c7c627d493492e009c2f52a80400a2fe82de87136fd2e8845888c4431b032ba29d9a29a804277e31002a8201fb8591a3e55c7a0d0881496caf8b9fb07544a5a4879291d0dc026a0ea9e5bd88eb4aa4947bbd694b25012e208a250d65ddc6f1eea59d3aed3b4ec15fcab85e2afaa23a40ab1ef9ce3e11e1bc1c34a0e758e7aa64deb8739276df0af7d4121f834a9b88e70418010200090502533127a0021b02005209104ef7e4beccde97f047200419110200060502533127a0000a0910dbce4ee19529437fe045009c0b32f5ead48ee8a7e98fac0dea3d3e6c0e2c552500a0ad71fadc5007cfaf842d9b7db3335a8cdad15d3d1a6404009b08e2c68fe8f3b45c1bb72a4b3278cdf3012aa0f229883ad74aa1f6000bb90b18301b2f85372ca5d6b9bf478d235b733b1b197d19ccca48e9daf8e890cb64546b4ce1b178faccfff07003c172a2d4f5ebaba9f57153955f3f61a9b80a4f5cb959908f8b211b03b7026a8a82fc612bfedd3794969bcf458c4ce92be215a1176ab88d045331d144010400a5063000c5aaf34953c1aa3bfc95045b3aab9882b9a8027fecfe2142dc6b47ba8aca667399990244d513dd0504716908c17d92c65e74219e004f7b83fc125e575dd58efec3ab6dd22e3580106998523dea42ec75bf9aa111734c82df54630bebdff20fe981cfc36c76f865eb1c2fb62c9e85bc3a6e5015a361a2eb1c8431578d0011010001889f04280102000905025331d433021d03000a09104ef7e4beccde97f02e5503ff5e0630d1b65291f4882b6d40a29da4616bb5088717d469fbcc3648b8276de04a04988b1f1b9f3e18f52265c1f8b6c85861691c1a6b8a3a25a1809a0b32ad330aec5667cb4262f4450649184e8113849b05e5ad06a316ea80c001e8e71838190339a6e48bbde30647bcf245134b9a97fa875c1d83a9862cae87ffd7e2c4ce3a1b89013d04180102000905025331d144021b0200a809104ef7e4beccde97f09d2004190102000605025331d144000a0910677815e371c2fd23522203fe22ab62b8e7a151383cea3edd3a12995693911426f8ccf125e1f6426388c0010f88d9ca7da2224aee8d1c12135998640c5e1813d55a93df472faae75bef858457248db41b4505827590aeccf6f9eb646da7f980655dd3050c6897feddddaca90676dee856d66db8923477d251712bb9b3186b4d0114daf7d6b59272b53218dd1da94a03ff64006fcbe71211e5daecd9961fba66cdb6de3f914882c58ba5beddeba7dcb950c1156d7fba18c19ea880dccc800eae335deec34e3b84ac75ffa24864f782f87815cda1c0f634b3dd2fa67cea30811d21723d21d9551fa12ccbcfa62b6d3a15d01307b99925707992556d50065505b090aadb8579083a20fe65bd2a270da9b011" - -const missingCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- -Charset: UTF-8 - -mQENBFMYynYBCACVOZ3/e8Bm2b9KH9QyIlHGo/i1bnkpqsgXj8tpJ2MIUOnXMMAY -ztW7kKFLCmgVdLIC0vSoLA4yhaLcMojznh/2CcUglZeb6Ao8Gtelr//Rd5DRfPpG -zqcfUo+m+eO1co2Orabw0tZDfGpg5p3AYl0hmxhUyYSc/xUq93xL1UJzBFgYXY54 -QsM8dgeQgFseSk/YvdP5SMx1ev+eraUyiiUtWzWrWC1TdyRa5p4UZg6Rkoppf+WJ -QrW6BWrhAtqATHc8ozV7uJjeONjUEq24roRc/OFZdmQQGK6yrzKnnbA6MdHhqpdo -9kWDcXYb7pSE63Lc+OBa5X2GUVvXJLS/3nrtABEBAAG0F2ludmFsaWQtc2lnbmlu -Zy1zdWJrZXlziQEoBBMBAgASBQJTnKB5AhsBAgsHAhUIAh4BAAoJEO3UDQUIHpI/ -dN4H/idX4FQ1LIZCnpHS/oxoWQWfpRgdKAEM0qCqjMgiipJeEwSQbqjTCynuh5/R -JlODDz85ABR06aoF4l5ebGLQWFCYifPnJZ/Yf5OYcMGtb7dIbqxWVFL9iLMO/oDL -ioI3dotjPui5e+2hI9pVH1UHB/bZ/GvMGo6Zg0XxLPolKQODMVjpjLAQ0YJ3spew -RAmOGre6tIvbDsMBnm8qREt7a07cBJ6XK7xjxYaZHQBiHVxyEWDa6gyANONx8duW -/fhQ/zDTnyVM/ik6VO0Ty9BhPpcEYLFwh5c1ilFari1ta3e6qKo6ZGa9YMk/REhu -yBHd9nTkI+0CiQUmbckUiVjDKKe5AQ0EUxjKdgEIAJcXQeP+NmuciE99YcJoffxv -2gVLU4ZXBNHEaP0mgaJ1+tmMD089vUQAcyGRvw8jfsNsVZQIOAuRxY94aHQhIRHR -bUzBN28ofo/AJJtfx62C15xt6fDKRV6HXYqAiygrHIpEoRLyiN69iScUsjIJeyFL -C8wa72e8pSL6dkHoaV1N9ZH/xmrJ+k0vsgkQaAh9CzYufncDxcwkoP+aOlGtX1gP -WwWoIbz0JwLEMPHBWvDDXQcQPQTYQyj+LGC9U6f9VZHN25E94subM1MjuT9OhN9Y -MLfWaaIc5WyhLFyQKW2Upofn9wSFi8ubyBnv640Dfd0rVmaWv7LNTZpoZ/GbJAMA -EQEAAYkBHwQYAQIACQUCU5ygeQIbAgAKCRDt1A0FCB6SP0zCB/sEzaVR38vpx+OQ -MMynCBJrakiqDmUZv9xtplY7zsHSQjpd6xGflbU2n+iX99Q+nav0ETQZifNUEd4N -1ljDGQejcTyKD6Pkg6wBL3x9/RJye7Zszazm4+toJXZ8xJ3800+BtaPoI39akYJm -+ijzbskvN0v/j5GOFJwQO0pPRAFtdHqRs9Kf4YanxhedB4dIUblzlIJuKsxFit6N -lgGRblagG3Vv2eBszbxzPbJjHCgVLR3RmrVezKOsZjr/2i7X+xLWIR0uD3IN1qOW -CXQxLBizEEmSNVNxsp7KPGTLnqO3bPtqFirxS9PJLIMPTPLNBY7ZYuPNTMqVIUWF -4artDmrG -=7FfJ ------END PGP PUBLIC KEY BLOCK-----` - -const invalidCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- - -mQENBFMYynYBCACVOZ3/e8Bm2b9KH9QyIlHGo/i1bnkpqsgXj8tpJ2MIUOnXMMAY -ztW7kKFLCmgVdLIC0vSoLA4yhaLcMojznh/2CcUglZeb6Ao8Gtelr//Rd5DRfPpG -zqcfUo+m+eO1co2Orabw0tZDfGpg5p3AYl0hmxhUyYSc/xUq93xL1UJzBFgYXY54 -QsM8dgeQgFseSk/YvdP5SMx1ev+eraUyiiUtWzWrWC1TdyRa5p4UZg6Rkoppf+WJ -QrW6BWrhAtqATHc8ozV7uJjeONjUEq24roRc/OFZdmQQGK6yrzKnnbA6MdHhqpdo -9kWDcXYb7pSE63Lc+OBa5X2GUVvXJLS/3nrtABEBAAG0F2ludmFsaWQtc2lnbmlu -Zy1zdWJrZXlziQEoBBMBAgASBQJTnKB5AhsBAgsHAhUIAh4BAAoJEO3UDQUIHpI/ -dN4H/idX4FQ1LIZCnpHS/oxoWQWfpRgdKAEM0qCqjMgiipJeEwSQbqjTCynuh5/R -JlODDz85ABR06aoF4l5ebGLQWFCYifPnJZ/Yf5OYcMGtb7dIbqxWVFL9iLMO/oDL -ioI3dotjPui5e+2hI9pVH1UHB/bZ/GvMGo6Zg0XxLPolKQODMVjpjLAQ0YJ3spew -RAmOGre6tIvbDsMBnm8qREt7a07cBJ6XK7xjxYaZHQBiHVxyEWDa6gyANONx8duW -/fhQ/zDTnyVM/ik6VO0Ty9BhPpcEYLFwh5c1ilFari1ta3e6qKo6ZGa9YMk/REhu -yBHd9nTkI+0CiQUmbckUiVjDKKe5AQ0EUxjKdgEIAIINDqlj7X6jYKc6DjwrOkjQ -UIRWbQQar0LwmNilehmt70g5DCL1SYm9q4LcgJJ2Nhxj0/5qqsYib50OSWMcKeEe -iRXpXzv1ObpcQtI5ithp0gR53YPXBib80t3bUzomQ5UyZqAAHzMp3BKC54/vUrSK -FeRaxDzNLrCeyI00+LHNUtwghAqHvdNcsIf8VRumK8oTm3RmDh0TyjASWYbrt9c8 -R1Um3zuoACOVy+mEIgIzsfHq0u7dwYwJB5+KeM7ZLx+HGIYdUYzHuUE1sLwVoELh -+SHIGHI1HDicOjzqgajShuIjj5hZTyQySVprrsLKiXS6NEwHAP20+XjayJ/R3tEA -EQEAAYkCPgQYAQIBKAUCU5ygeQIbAsBdIAQZAQIABgUCU5ygeQAKCRCpVlnFZmhO -52RJB/9uD1MSa0wjY6tHOIgquZcP3bHBvHmrHNMw9HR2wRCMO91ZkhrpdS3ZHtgb -u3/55etj0FdvDo1tb8P8FGSVtO5Vcwf5APM8sbbqoi8L951Q3i7qt847lfhu6sMl -w0LWFvPTOLHrliZHItPRjOltS1WAWfr2jUYhsU9ytaDAJmvf9DujxEOsN5G1YJep -54JCKVCkM/y585Zcnn+yxk/XwqoNQ0/iJUT9qRrZWvoeasxhl1PQcwihCwss44A+ -YXaAt3hbk+6LEQuZoYS73yR3WHj+42tfm7YxRGeubXfgCEz/brETEWXMh4pe0vCL -bfWrmfSPq2rDegYcAybxRQz0lF8PAAoJEO3UDQUIHpI/exkH/0vQfdHA8g/N4T6E -i6b1CUVBAkvtdJpCATZjWPhXmShOw62gkDw306vHPilL4SCvEEi4KzG72zkp6VsB -DSRcpxCwT4mHue+duiy53/aRMtSJ+vDfiV1Vhq+3sWAck/yUtfDU9/u4eFaiNok1 -8/Gd7reyuZt5CiJnpdPpjCwelK21l2w7sHAnJF55ITXdOxI8oG3BRKufz0z5lyDY -s2tXYmhhQIggdgelN8LbcMhWs/PBbtUr6uZlNJG2lW1yscD4aI529VjwJlCeo745 -U7pO4eF05VViUJ2mmfoivL3tkhoTUWhx8xs8xCUcCg8DoEoSIhxtOmoTPR22Z9BL -6LCg2mg= -=Dhm4 ------END PGP PUBLIC KEY BLOCK-----` - -const goodCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- -Version: GnuPG v1 - -mI0EVUqeVwEEAMufHRrMPWK3gyvi0O0tABCs/oON9zV9KDZlr1a1M91ShCSFwCPo -7r80PxdWVWcj0V5h50/CJYtpN3eE/mUIgW2z1uDYQF1OzrQ8ubrksfsJvpAhENom -lTQEppv9mV8qhcM278teb7TX0pgrUHLYF5CfPdp1L957JLLXoQR/lwLVABEBAAG0 -E2dvb2Qtc2lnbmluZy1zdWJrZXmIuAQTAQIAIgUCVUqeVwIbAwYLCQgHAwIGFQgC -CQoLBBYCAwECHgECF4AACgkQNRjL95IRWP69XQQAlH6+eyXJN4DZTLX78KGjHrsw -6FCvxxClEPtPUjcJy/1KCRQmtLAt9PbbA78dvgzjDeZMZqRAwdjyJhjyg/fkU2OH -7wq4ktjUu+dLcOBb+BFMEY+YjKZhf6EJuVfxoTVr5f82XNPbYHfTho9/OABKH6kv -X70PaKZhbwnwij8Nts65AaIEVUqftREEAJ3WxZfqAX0bTDbQPf2CMT2IVMGDfhK7 -GyubOZgDFFjwUJQvHNvsrbeGLZ0xOBumLINyPO1amIfTgJNm1iiWFWfmnHReGcDl -y5mpYG60Mb79Whdcer7CMm3AqYh/dW4g6IB02NwZMKoUHo3PXmFLxMKXnWyJ0clw -R0LI/Qn509yXAKDh1SO20rqrBM+EAP2c5bfI98kyNwQAi3buu94qo3RR1ZbvfxgW -CKXDVm6N99jdZGNK7FbRifXqzJJDLcXZKLnstnC4Sd3uyfyf1uFhmDLIQRryn5m+ -LBYHfDBPN3kdm7bsZDDq9GbTHiFZUfm/tChVKXWxkhpAmHhU/tH6GGzNSMXuIWSO -aOz3Rqq0ED4NXyNKjdF9MiwD/i83S0ZBc0LmJYt4Z10jtH2B6tYdqnAK29uQaadx -yZCX2scE09UIm32/w7pV77CKr1Cp/4OzAXS1tmFzQ+bX7DR+Gl8t4wxr57VeEMvl -BGw4Vjh3X8//m3xynxycQU18Q1zJ6PkiMyPw2owZ/nss3hpSRKFJsxMLhW3fKmKr -Ey2KiOcEGAECAAkFAlVKn7UCGwIAUgkQNRjL95IRWP5HIAQZEQIABgUCVUqftQAK -CRD98VjDN10SqkWrAKDTpEY8D8HC02E/KVC5YUI01B30wgCgurpILm20kXEDCeHp -C5pygfXw1DJrhAP+NyPJ4um/bU1I+rXaHHJYroYJs8YSweiNcwiHDQn0Engh/mVZ -SqLHvbKh2dL/RXymC3+rjPvQf5cup9bPxNMa6WagdYBNAfzWGtkVISeaQW+cTEp/ -MtgVijRGXR/lGLGETPg2X3Afwn9N9bLMBkBprKgbBqU7lpaoPupxT61bL70= -=vtbN ------END PGP PUBLIC KEY BLOCK-----` - -const revokedUserIDKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- - -mQENBFsgO5EBCADhREPmcjsPkXe1z7ctvyWL0S7oa9JaoGZ9oPDHFDlQxd0qlX2e -DZJZDg0qYvVixmaULIulApq1puEsaJCn3lHUbHlb4PYKwLEywYXM28JN91KtLsz/ -uaEX2KC5WqeP40utmzkNLq+oRX/xnRMgwbO7yUNVG2UlEa6eI+xOXO3YtLdmJMBW -ClQ066ZnOIzEo1JxnIwha1CDBMWLLfOLrg6l8InUqaXbtEBbnaIYO6fXVXELUjkx -nmk7t/QOk0tXCy8muH9UDqJkwDUESY2l79XwBAcx9riX8vY7vwC34pm22fAUVLCJ -x1SJx0J8bkeNp38jKM2Zd9SUQqSbfBopQ4pPABEBAAG0I0dvbGFuZyBHb3BoZXIg -PG5vLXJlcGx5QGdvbGFuZy5jb20+iQFUBBMBCgA+FiEE5Ik5JLcNx6l6rZfw1oFy -9I6cUoMFAlsgO5ECGwMFCQPCZwAFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AACgkQ -1oFy9I6cUoMIkwf8DNPeD23i4jRwd/pylbvxwZintZl1fSwTJW1xcOa1emXaEtX2 -depuqhP04fjlRQGfsYAQh7X9jOJxAHjTmhqFBi5sD7QvKU00cPFYbJ/JTx0B41bl -aXnSbGhRPh63QtEZL7ACAs+shwvvojJqysx7kyVRu0EW2wqjXdHwR/SJO6nhNBa2 -DXzSiOU/SUA42mmG+5kjF8Aabq9wPwT9wjraHShEweNerNMmOqJExBOy3yFeyDpa -XwEZFzBfOKoxFNkIaVf5GSdIUGhFECkGvBMB935khftmgR8APxdU4BE7XrXexFJU -8RCuPXonm4WQOwTWR0vQg64pb2WKAzZ8HhwTGbQiR29sYW5nIEdvcGhlciA8cmV2 -b2tlZEBnb2xhbmcuY29tPokBNgQwAQoAIBYhBOSJOSS3Dcepeq2X8NaBcvSOnFKD -BQJbIDv3Ah0AAAoJENaBcvSOnFKDfWMIAKhI/Tvu3h8fSUxp/gSAcduT6bC1JttG -0lYQ5ilKB/58lBUA5CO3ZrKDKlzW3M8VEcvohVaqeTMKeoQd5rCZq8KxHn/KvN6N -s85REfXfniCKfAbnGgVXX3kDmZ1g63pkxrFu0fDZjVDXC6vy+I0sGyI/Inro0Pzb -tvn0QCsxjapKK15BtmSrpgHgzVqVg0cUp8vqZeKFxarYbYB2idtGRci4b9tObOK0 -BSTVFy26+I/mrFGaPrySYiy2Kz5NMEcRhjmTxJ8jSwEr2O2sUR0yjbgUAXbTxDVE -/jg5fQZ1ACvBRQnB7LvMHcInbzjyeTM3FazkkSYQD6b97+dkWwb1iWG5AQ0EWyA7 -kQEIALkg04REDZo1JgdYV4x8HJKFS4xAYWbIva1ZPqvDNmZRUbQZR2+gpJGEwn7z -VofGvnOYiGW56AS5j31SFf5kro1+1bZQ5iOONBng08OOo58/l1hRseIIVGB5TGSa -PCdChKKHreJI6hS3mShxH6hdfFtiZuB45rwoaArMMsYcjaezLwKeLc396cpUwwcZ -snLUNd1Xu5EWEF2OdFkZ2a1qYdxBvAYdQf4+1Nr+NRIx1u1NS9c8jp3PuMOkrQEi -bNtc1v6v0Jy52mKLG4y7mC/erIkvkQBYJdxPaP7LZVaPYc3/xskcyijrJ/5ufoD8 -K71/ShtsZUXSQn9jlRaYR0EbojMAEQEAAYkBPAQYAQoAJhYhBOSJOSS3Dcepeq2X -8NaBcvSOnFKDBQJbIDuRAhsMBQkDwmcAAAoJENaBcvSOnFKDkFMIAIt64bVZ8x7+ -TitH1bR4pgcNkaKmgKoZz6FXu80+SnbuEt2NnDyf1cLOSimSTILpwLIuv9Uft5Pb -OraQbYt3xi9yrqdKqGLv80bxqK0NuryNkvh9yyx5WoG1iKqMj9/FjGghuPrRaT4l -QinNAghGVkEy1+aXGFrG2DsOC1FFI51CC2WVTzZ5RwR2GpiNRfESsU1rZAUqf/2V -yJl9bD5R4SUNy8oQmhOxi+gbhD4Ao34e4W0ilibslI/uawvCiOwlu5NGd8zv5n+U -heiQvzkApQup5c+BhH5zFDFdKJ2CBByxw9+7QjMFI/wgLixKuE0Ob2kAokXf7RlB -7qTZOahrETw= -=IKnw ------END PGP PUBLIC KEY BLOCK-----` - -const keyWithSubKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- - -mI0EWyKwKQEEALwXhKBnyaaNFeK3ljfc/qn9X/QFw+28EUfgZPHjRmHubuXLE2uR -s3ZoSXY2z7Dkv+NyHYMt8p+X8q5fR7JvUjK2XbPyKoiJVnHINll83yl67DaWfKNL -EjNoO0kIfbXfCkZ7EG6DL+iKtuxniGTcnGT47e+HJSqb/STpLMnWwXjBABEBAAG0 -I0dvbGFuZyBHb3BoZXIgPG5vLXJlcGx5QGdvbGFuZy5jb20+iM4EEwEKADgWIQQ/ -lRafP/p9PytHbwxMvYJsOQdOOAUCWyKwKQIbAwULCQgHAwUVCgkICwUWAgMBAAIe -AQIXgAAKCRBMvYJsOQdOOOsFBAC62mXww8XuqvYLcVOvHkWLT6mhxrQOJXnlfpn7 -2uBV9CMhoG/Ycd43NONsJrB95Apr9TDIqWnVszNbqPCuBhZQSGLdbiDKjxnCWBk0 -69qv4RNtkpOhYB7jK4s8F5oQZqId6JasT/PmJTH92mhBYhhTQr0GYFuPX2UJdkw9 -Sn9C67iNBFsisDUBBAC3A+Yo9lgCnxi/pfskyLrweYif6kIXWLAtLTsM6g/6jt7b -wTrknuCPyTv0QKGXsAEe/cK/Xq3HvX9WfXPGIHc/X56ZIsHQ+RLowbZV/Lhok1IW -FAuQm8axr/by80cRwFnzhfPc/ukkAq2Qyj4hLsGblu6mxeAhzcp8aqmWOO2H9QAR -AQABiLYEKAEKACAWIQQ/lRafP/p9PytHbwxMvYJsOQdOOAUCWyK16gIdAAAKCRBM -vYJsOQdOOB1vA/4u4uLONsE+2GVOyBsHyy7uTdkuxaR9b54A/cz6jT/tzUbeIzgx -22neWhgvIEghnUZd0vEyK9k1wy5vbDlEo6nKzHso32N1QExGr5upRERAxweDxGOj -7luDwNypI7QcifE64lS/JmlnunwRCdRWMKc0Fp+7jtRc5mpwyHN/Suf5RokBagQY -AQoAIBYhBD+VFp8/+n0/K0dvDEy9gmw5B044BQJbIrA1AhsCAL8JEEy9gmw5B044 -tCAEGQEKAB0WIQSNdnkaWY6t62iX336UXbGvYdhXJwUCWyKwNQAKCRCUXbGvYdhX -JxJSA/9fCPHP6sUtGF1o3G1a3yvOUDGr1JWcct9U+QpbCt1mZoNopCNDDQAJvDWl -mvDgHfuogmgNJRjOMznvahbF+wpTXmB7LS0SK412gJzl1fFIpK4bgnhu0TwxNsO1 -8UkCZWqxRMgcNUn9z6XWONK8dgt5JNvHSHrwF4CxxwjL23AAtK+FA/UUoi3U4kbC -0XnSr1Sl+mrzQi1+H7xyMe7zjqe+gGANtskqexHzwWPUJCPZ5qpIa2l8ghiUim6b -4ymJ+N8/T8Yva1FaPEqfMzzqJr8McYFm0URioXJPvOAlRxdHPteZ0qUopt/Jawxl -Xt6B9h1YpeLoJwjwsvbi98UTRs0jXwoY -=3fWu ------END PGP PUBLIC KEY BLOCK-----` - -const keyWithSubKeyAndBadSelfSigOrder = `-----BEGIN PGP PUBLIC KEY BLOCK----- - -mI0EWyLLDQEEAOqIOpJ/ha1OYAGduu9tS3rBz5vyjbNgJO4sFveEM0mgsHQ0X9/L -plonW+d0gRoO1dhJ8QICjDAc6+cna1DE3tEb5m6JtQ30teLZuqrR398Cf6w7NNVz -r3lrlmnH9JaKRuXl7tZciwyovneBfZVCdtsRZjaLI1uMQCz/BToiYe3DABEBAAG0 -I0dvbGFuZyBHb3BoZXIgPG5vLXJlcGx5QGdvbGFuZy5jb20+iM4EEwEKADgWIQRZ -sixZOfQcZdW0wUqmgmdsv1O9xgUCWyLLDQIbAwULCQgHAwUVCgkICwUWAgMBAAIe -AQIXgAAKCRCmgmdsv1O9xql2A/4pix98NxjhdsXtazA9agpAKeADf9tG4Za27Gj+ -3DCww/E4iP2X35jZimSm/30QRB6j08uGCqd9vXkkJxtOt63y/IpVOtWX6vMWSTUm -k8xKkaYMP0/IzKNJ1qC/qYEUYpwERBKg9Z+k99E2Ql4kRHdxXUHq6OzY79H18Y+s -GdeM/riNBFsiyxsBBAC54Pxg/8ZWaZX1phGdwfe5mek27SOYpC0AxIDCSOdMeQ6G -HPk38pywl1d+S+KmF/F4Tdi+kWro62O4eG2uc/T8JQuRDUhSjX0Qa51gPzJrUOVT -CFyUkiZ/3ZDhtXkgfuso8ua2ChBgR9Ngr4v43tSqa9y6AK7v0qjxD1x+xMrjXQAR -AQABiQFxBBgBCgAmAhsCFiEEWbIsWTn0HGXVtMFKpoJnbL9TvcYFAlsizTIFCQAN -MRcAv7QgBBkBCgAdFiEEJcoVUVJIk5RWj1c/o62jUpRPICQFAlsiyxsACgkQo62j -UpRPICQq5gQApoWIigZxXFoM0uw4uJBS5JFZtirTANvirZV5RhndwHeMN6JttaBS -YnjyA4+n1D+zB2VqliD2QrsX12KJN6rGOehCtEIClQ1Hodo9nC6kMzzAwW1O8bZs -nRJmXV+bsvD4sidLZLjdwOVa3Cxh6pvq4Uur6a7/UYx121hEY0Qx0s8JEKaCZ2y/ -U73GGi0D/i20VW8AWYAPACm2zMlzExKTOAV01YTQH/3vW0WLrOse53WcIVZga6es -HuO4So0SOEAvxKMe5HpRIu2dJxTvd99Bo9xk9xJU0AoFrO0vNCRnL+5y68xMlODK -lEw5/kl0jeaTBp6xX0HDQOEVOpPGUwWV4Ij2EnvfNDXaE1vK1kffiQFrBBgBCgAg -AhsCFiEEWbIsWTn0HGXVtMFKpoJnbL9TvcYFAlsi0AYAv7QgBBkBCgAdFiEEJcoV -UVJIk5RWj1c/o62jUpRPICQFAlsiyxsACgkQo62jUpRPICQq5gQApoWIigZxXFoM -0uw4uJBS5JFZtirTANvirZV5RhndwHeMN6JttaBSYnjyA4+n1D+zB2VqliD2QrsX -12KJN6rGOehCtEIClQ1Hodo9nC6kMzzAwW1O8bZsnRJmXV+bsvD4sidLZLjdwOVa -3Cxh6pvq4Uur6a7/UYx121hEY0Qx0s8JEKaCZ2y/U73GRl0EAJokkXmy4zKDHWWi -wvK9gi2gQgRkVnu2AiONxJb5vjeLhM/07BRmH6K1o+w3fOeEQp4FjXj1eQ5fPSM6 -Hhwx2CTl9SDnPSBMiKXsEFRkmwQ2AAsQZLmQZvKBkLZYeBiwf+IY621eYDhZfo+G -1dh1WoUCyREZsJQg2YoIpWIcvw+a -=bNRo ------END PGP PUBLIC KEY BLOCK----- -` - -const onlySubkeyNoPrivateKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- -Version: GnuPG v1 - -lQCVBFggvocBBAC7vBsHn7MKmS6IiiZNTXdciplVgS9cqVd+RTdIAoyNTcsiV1H0 -GQ3QtodOPeDlQDNoqinqaobd7R9g3m3hS53Nor7yBZkCWQ5x9v9JxRtoAq0sklh1 -I1X2zEqZk2l6YrfBF/64zWrhjnW3j23szkrAIVu0faQXbQ4z56tmZrw11wARAQAB -/gdlAkdOVQG0CUdOVSBEdW1teYi4BBMBAgAiBQJYIL6HAhsDBgsJCAcDAgYVCAIJ -CgsEFgIDAQIeAQIXgAAKCRCd1xxWp1CYAnjGA/9synn6ZXJUKAXQzySgmCZvCIbl -rqBfEpxwLG4Q/lONhm5vthAE0z49I8hj5Gc5e2tLYUtq0o0OCRdCrYHa/efOYWpJ -6RsK99bePOisVzmOABLIgZkcr022kHoMCmkPgv9CUGKP1yqbGl+zzAwQfUjRUmvD -ZIcWLHi2ge4GzPMPi50B2ARYIL6cAQQAxWHnicKejAFcFcF1/3gUSgSH7eiwuBPX -M7vDdgGzlve1o1jbV4tzrjN9jsCl6r0nJPDMfBSzgLr1auNTRG6HpJ4abcOx86ED -Ad+avDcQPZb7z3dPhH/gb2lQejZsHh7bbeOS8WMSzHV3RqCLd8J/xwWPNR5zKn1f -yp4IGfopidMAEQEAAQAD+wQOelnR82+dxyM2IFmZdOB9wSXQeCVOvxSaNMh6Y3lk -UOOkO8Nlic4x0ungQRvjoRs4wBmCuwFK/MII6jKui0B7dn/NDf51i7rGdNGuJXDH -e676By1sEY/NGkc74jr74T+5GWNU64W0vkpfgVmjSAzsUtpmhJMXsc7beBhJdnVl -AgDKCb8hZqj1alcdmLoNvb7ibA3K/V8J462CPD7bMySPBa/uayoFhNxibpoXml2r -oOtHa5izF3b0/9JY97F6rqkdAgD6GdTJ+xmlCoz1Sewoif1I6krq6xoa7gOYpIXo -UL1Afr+LiJeyAnF/M34j/kjIVmPanZJjry0kkjHE5ILjH3uvAf4/6n9np+Th8ujS -YDCIzKwR7639+H+qccOaddCep8Y6KGUMVdD/vTKEx1rMtK+hK/CDkkkxnFslifMJ -kqoqv3WUqCWJAT0EGAECAAkFAlggvpwCGwIAqAkQndccVqdQmAKdIAQZAQIABgUC -WCC+nAAKCRDmGUholQPwvQk+A/9latnSsR5s5/1A9TFki11GzSEnfLbx46FYOdkW -n3YBxZoPQGxNA1vIn8GmouxZInw9CF4jdOJxEdzLlYQJ9YLTLtN5tQEMl/19/bR8 -/qLacAZ9IOezYRWxxZsyn6//jfl7A0Y+FV59d4YajKkEfItcIIlgVBSW6T+TNQT3 -R+EH5HJ/A/4/AN0CmBhhE2vGzTnVU0VPrE4V64pjn1rufFdclgpixNZCuuqpKpoE -VVHn6mnBf4njKjZrAGPs5kfQ+H4NsM7v3Zz4yV6deu9FZc4O6E+V1WJ38rO8eBix -7G2jko106CC6vtxsCPVIzY7aaG3H5pjRtomw+pX7SzrQ7FUg2PGumg== -=F/T0 ------END PGP PRIVATE KEY BLOCK-----` - -const ecdsaPrivateKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- - -xaUEX1KsSRMIKoZIzj0DAQcCAwTpYqJsnJiFhKKh+8TulWD+lVmerBFNS+Ii -B+nlG3T0xQQ4Sy5eIjJ0CExIQQzi3EElF/Z2l4F3WC5taFA11NgA/gkDCHSS -PThf1M2K4LN8F1MRcvR+sb7i0nH55ojkwuVB1DE6jqIT9m9i+mX1tzjSAS+6 -lPQiweCJvG7xTC7Hs3AzRapf/r1At4TB+v+5G2/CKynNFEJpbGwgPGJpbGxA -aG9tZS5jb20+wncEEBMIAB8FAl9SrEkGCwkHCAMCBBUICgIDFgIBAhkBAhsD -Ah4BAAoJEMpwT3+q3+xqw5UBAMebZN9isEZ1ML+R/jWAAWMwa/knMugrEZ1v -Bl9+ZwM0AQCZdf80/wYY4Nve01qSRFv8OmKswLli3TvDv6FKc4cLz8epBF9S -rEkSCCqGSM49AwEHAgMEAjKnT9b5wY2bf9TpAV3d7OUfPOxKj9c4VzeVzSrH -AtQgo/MuI1cdYVURicV4i76DNjFhQHQFTk7BrC+C2u1yqQMBCAf+CQMIHImA -iYfzQtjgQWSFZYUkCFpbbwhNF0ch+3HNaZkaHCnZRIsWsRnc6FCb6lRQyK9+ -Dq59kHlduE5QgY40894jfmP2JdJHU6nBdYrivbEdbMJhBBgTCAAJBQJfUqxJ -AhsMAAoJEMpwT3+q3+xqUI0BAMykhV08kQ4Ip9Qlbss6Jdufv7YrU0Vd5hou -b5TmiPd0APoDBh3qIic+aLLUcAuG3+Gt1P1AbUlmqV61ozn1WfHxfw== -=KLN8 ------END PGP PRIVATE KEY BLOCK-----` - -const dsaPrivateKeyWithElGamalSubkey = `-----BEGIN PGP PRIVATE KEY BLOCK----- - -lQOBBF9/MLsRCACeaF6BI0jTgDAs86t8/kXPfwlPvR2MCYzB0BCqAdcq1hV/GTYd -oNmJRna/ZJfsI/vf+d8Nv+EYOQkPheFS1MJVBitkAXjQPgm8i1tQWen1FCWZxqGk -/vwZYF4yo8GhZ+Wxi3w09W9Cp9QM/CTmyE1Xe7wpPBGe+oD+me8Zxjyt8JBS4Qx+ -gvWbfHxfHnggh4pz7U8QkItlLsBNQEdX4R5+zwRN66g2ZSX/shaa/EkVnihUhD7r -njP9I51ORWucTQD6OvgooaNQZCkQ/Se9TzdakwWKS2XSIFXiY/e2E5ZgKI/pfKDU -iA/KessxddPb7nP/05OIJqg9AoDrD4vmehLzAQD+zsUS3LDU1m9/cG4LMsQbT2VK -Te4HqbGIAle+eu/asQf8DDJMrbZpiJZvADum9j0TJ0oep6VdMbzo9RSDKvlLKT9m -kG63H8oDWnCZm1a+HmGq9YIX+JHWmsLXXsFLeEouLzHO+mZo0X28eji3V2T87hyR -MmUM0wFo4k7jK8uVmkDXv3XwNp2uByWxUKZd7EnWmcEZWqIiexJ7XpCS0Pg3tRaI -zxve0SRe/dxfUPnTk/9KQ9hS6DWroBKquL182zx1Fggh4LIWWE2zq+UYn8BI0E8A -rmIDFJdF8ymFQGRrEy6g79NnkPmkrZWsgMRYY65P6v4zLVmqohJKkpm3/Uxa6QAP -CCoPh/JTOvPeCP2bOJH8z4Z9Py3ouMIjofQW8sXqRgf/RIHbh0KsINHrwwZ4gVIr -MK3RofpaYxw1ztPIWb4cMWoWZHH1Pxh7ggTGSBpAhKXkiWw2Rxat8QF5aA7e962c -bLvVv8dqsPrD/RnVJHag89cbPTzjn7gY9elE8EM8ithV3oQkwHTr4avYlpDZsgNd -hUW3YgRwGo31tdzxoG04AcpV2t+07P8XMPr9hsfWs4rHohXPi38Hseu1Ji+dBoWQ -3+1w/HH3o55s+jy4Ruaz78AIrjbmAJq+6rA2mIcCgrhw3DnzuwQAKeBvSeqn9zfS -ZC812osMBVmkycwelpaIh64WZ0vWL3GvdXDctV2kXM+qVpDTLEny0LuiXxrwCKQL -Ev4HAwK9uQBcreDEEud7pfRb8EYP5lzO2ZA7RaIvje6EWAGBvJGMRT0QQE5SGqc7 -Fw5geigBdt+vVyRuNNhg3c2fdn/OBQaYu0J/8AiOogG8EaM8tCFlbGdhbWFsQGRz -YS5jb20gPGVsZ2FtYWxAZHNhLmNvbT6IkAQTEQgAOBYhBI+gnfiHQxB35/Dp0XAQ -aE/rsWC5BQJffzC7AhsDBQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAAAoJEHAQaE/r -sWC5A4EA/0GcJmyPtN+Klc7b9sVT3JgKTRnB/URxOJfYJofP0hZLAQCkqyMO+adV -JvbgDH0zaITQWZSSXPqpgMpCA6juTrDsd50CawRffzC7EAgAxFFFSAAEQzWTgKU5 -EBtpxxoPzHqcChawTHRxHxjcELXzmUBS5PzfA1HXSPnNqK/x3Ut5ycC3CsW41Fnt -Gm3706Wu9VFbFZVn55F9lPiplUo61n5pqMvOr1gmuQsdXiTa0t5FRa4TZ2VSiHFw -vdAVSPTUsT4ZxJ1rPyFYRtq1n3pQcvdZowd07r0JnzTMjLLMFYCKhwIowoOC4zqJ -iB8enjwOlpaqBATRm9xpVF7SJkroPF6/B1vdhj7E3c1aJyHlo0PYBAg756sSHWHg -UuLyUQ4TA0hcCVenn/L/aSY2LnbdZB1EBhlYjA7dTCgwIqsQhfQmPkjz6g64A7+Y -HbbrLwADBQgAk14QIEQ+J/VHetpQV/jt2pNsFK1kVK7mXK0spTExaC2yj2sXlHjL -Ie3bO5T/KqmIaBEB5db5fA5xK9cZt79qrQHDKsEqUetUeMUWLBx77zBsus3grIgy -bwDZKseRzQ715pwxquxQlScGoDIBKEh08HpwHkq140eIj3w+MAIfndaZaSCNaxaP -Snky7BQmJ7Wc7qrIwoQP6yrnUqyW2yNi81nJYUhxjChqaFSlwzLs/iNGryBKo0ic -BqVIRjikKHBlwBng6WyrltQo/Vt9GG8w+lqaAVXbJRlaBZJUR+2NKi/YhP3qQse3 -v8fi4kns0gh5LK+2C01RvdX4T49QSExuIf4HAwLJqYIGwadA2uem5v7/765ZtFWV -oL0iZ0ueTJDby4wTFDpLVzzDi/uVcB0ZRFrGOp7w6OYcNYTtV8n3xmli2Q5Trw0c -wZVzvg+ABKWiv7faBjMczIFF8y6WZKOIeAQYEQgAIBYhBI+gnfiHQxB35/Dp0XAQ -aE/rsWC5BQJffzC7AhsMAAoJEHAQaE/rsWC5ZmIA/jhS4r4lClbvjuPWt0Yqdn7R -fss2SPMYvMrrDh42aE0OAQD8xn4G6CN8UtW9xihXOY6FpxiJ/sMc2VaneeUd34oa -4g== -=XZm8 ------END PGP PRIVATE KEY BLOCK-----` - -// https://tests.sequoia-pgp.org/#Certificate_expiration -// P _ U p -const expiringPrimaryUIDKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- - -xsDNBF2lnPIBDAC5cL9PQoQLTMuhjbYvb4Ncuuo0bfmgPRFywX53jPhoFf4Zg6mv -/seOXpgecTdOcVttfzC8ycIKrt3aQTiwOG/ctaR4Bk/t6ayNFfdUNxHWk4WCKzdz -/56fW2O0F23qIRd8UUJp5IIlN4RDdRCtdhVQIAuzvp2oVy/LaS2kxQoKvph/5pQ/ -5whqsyroEWDJoSV0yOb25B/iwk/pLUFoyhDG9bj0kIzDxrEqW+7Ba8nocQlecMF3 -X5KMN5kp2zraLv9dlBBpWW43XktjcCZgMy20SouraVma8Je/ECwUWYUiAZxLIlMv -9CurEOtxUw6N3RdOtLmYZS9uEnn5y1UkF88o8Nku890uk6BrewFzJyLAx5wRZ4F0 -qV/yq36UWQ0JB/AUGhHVPdFf6pl6eaxBwT5GXvbBUibtf8YI2og5RsgTWtXfU7eb -SGXrl5ZMpbA6mbfhd0R8aPxWfmDWiIOhBufhMCvUHh1sApMKVZnvIff9/0Dca3wb -vLIwa3T4CyshfT0AEQEAAc0hQm9iIEJhYmJhZ2UgPGJvYkBvcGVucGdwLmV4YW1w -bGU+wsEUBBMBCgBIBYJfiWKKBYkBX+/UBQsJCAcCCRD7/MgqAV5zMAYVCgkICwIE -FgIDAQIXgAIbAwIeARYhBNGmbhojsYLJmA94jPv8yCoBXnMwAAAf0wv/a++a3DkL -CttbK4LRIiSry2wb97mxYQoWYzvkKYD1IP/KiamRwzjBKuRSE0qZ2uonQDRGc+zl -1H9dwkDLL4T9uJngCCPCBgFW/hOFPvF+WYEKHtOzunqx6KwDHkpdH+hpzfFzIhDo -aXiVnDGvJ3H/bVTKq1m2KPXO2ckkXPQXJX9Fx6kPHdvcq+3ZI75IzbD/ue5lBcsy -OKLzVu+KLxzlzGui6v1V0fTvU/uqvHlvUxcDAqMnIsDUPakjK2RDeQ38qtN6+PQ5 -/dT7vx2Wtzesqn2eDbDf5uRfSgmp2hJLJniAKjMCBVAJiOgPb0LXUIcwCGxaiWOA -g5ZvNWjX5bZ/FxqpLpOE9OReI5YY7ns7zqP4thLYYWe0Qdp9a2ezVrgzgrh/SLla -d73x/S9TrmLtYGGlbVUByJW+GXjW2Tt6iaa/WDFzx8NvZ/wzIAdGSEfLcvS+JBSP -2ppdY5Ac/2dK3PzYABkHvB/rhXIwlXnrFDU9efRHZfFqQqGauA64wfdIzsDNBF2l -nPIBDADWML9cbGMrp12CtF9b2P6z9TTT74S8iyBOzaSvdGDQY/sUtZXRg21HWamX -nn9sSXvIDEINOQ6A9QxdxoqWdCHrOuW3ofneYXoG+zeKc4dC86wa1TR2q9vW+RMX -SO4uImA+Uzula/6k1DogDf28qhCxMwG/i/m9g1c/0aApuDyKdQ1PXsHHNlgd/Dn6 -rrd5y2AObaifV7wIhEJnvqgFXDN2RXGjLeCOHV4Q2WTYPg/S4k1nMXVDwZXrvIsA -0YwIMgIT86Rafp1qKlgPNbiIlC1g9RY/iFaGN2b4Ir6GDohBQSfZW2+LXoPZuVE/ -wGlQ01rh827KVZW4lXvqsge+wtnWlszcselGATyzqOK9LdHPdZGzROZYI2e8c+pa -LNDdVPL6vdRBUnkCaEkOtl1mr2JpQi5nTU+gTX4IeInC7E+1a9UDF/Y85ybUz8XV -8rUnR76UqVC7KidNepdHbZjjXCt8/Zo+Tec9JNbYNQB/e9ExmDntmlHEsSEQzFwz -j8sxH48AEQEAAcLA9gQYAQoAIBYhBNGmbhojsYLJmA94jPv8yCoBXnMwBQJdpZzy -AhsMAAoJEPv8yCoBXnMw6f8L/26C34dkjBffTzMj5Bdzm8MtF67OYneJ4TQMw7+4 -1IL4rVcSKhIhk/3Ud5knaRtP2ef1+5F66h9/RPQOJ5+tvBwhBAcUWSupKnUrdVaZ -QanYmtSxcVV2PL9+QEiNN3tzluhaWO//rACxJ+K/ZXQlIzwQVTpNhfGzAaMVV9zp -f3u0k14itcv6alKY8+rLZvO1wIIeRZLmU0tZDD5HtWDvUV7rIFI1WuoLb+KZgbYn -3OWjCPHVdTrdZ2CqnZbG3SXw6awH9bzRLV9EXkbhIMez0deCVdeo+wFFklh8/5VK -2b0vk/+wqMJxfpa1lHvJLobzOP9fvrswsr92MA2+k901WeISR7qEzcI0Fdg8AyFA -ExaEK6VyjP7SXGLwvfisw34OxuZr3qmx1Sufu4toH3XrB7QJN8XyqqbsGxUCBqWi -f9RSK4xjzRTe56iPeiSJJOIciMP9i2ldI+KgLycyeDvGoBj0HCLO3gVaBe4ubVrj -5KjhX2PVNEJd3XZRzaXZE2aAMQ== -=522n ------END PGP PUBLIC KEY BLOCK-----` diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_config.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_config.go deleted file mode 100644 index 7350974effc..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_config.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (C) 2019 ProtonTech AG - -package packet - -import "math/bits" - -// AEADConfig collects a number of AEAD parameters along with sensible defaults. -// A nil AEADConfig is valid and results in all default values. -type AEADConfig struct { - // The AEAD mode of operation. - DefaultMode AEADMode - // Amount of octets in each chunk of data - ChunkSize uint64 -} - -// Mode returns the AEAD mode of operation. -func (conf *AEADConfig) Mode() AEADMode { - if conf == nil || conf.DefaultMode == 0 { - return AEADModeEAX - } - mode := conf.DefaultMode - if mode != AEADModeEAX && mode != AEADModeOCB && - mode != AEADModeExperimentalGCM { - panic("AEAD mode unsupported") - } - return mode -} - -// ChunkSizeByte returns the byte indicating the chunk size. The effective -// chunk size is computed with the formula uint64(1) << (chunkSizeByte + 6) -func (conf *AEADConfig) ChunkSizeByte() byte { - if conf == nil || conf.ChunkSize == 0 { - return 12 // 1 << (12 + 6) == 262144 bytes - } - - chunkSize := conf.ChunkSize - exponent := bits.Len64(chunkSize) - 1 - switch { - case exponent < 6: - exponent = 6 - case exponent > 27: - exponent = 27 - } - - return byte(exponent - 6) -} - -// decodeAEADChunkSize returns the effective chunk size. In 32-bit systems, the -// maximum returned value is 1 << 30. -func decodeAEADChunkSize(c byte) int { - size := uint64(1 << (c + 6)) - if size != uint64(int(size)) { - return 1 << 30 - } - return int(size) -} diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go deleted file mode 100644 index 85c53f08aa5..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go +++ /dev/null @@ -1,364 +0,0 @@ -// Copyright (C) 2019 ProtonTech AG - -package packet - -import ( - "bytes" - "crypto/cipher" - "crypto/rand" - "encoding/binary" - "io" - - "github.com/ProtonMail/go-crypto/openpgp/errors" - "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" -) - -// AEADEncrypted represents an AEAD Encrypted Packet (tag 20, RFC4880bis-5.16). -type AEADEncrypted struct { - cipher CipherFunction - mode AEADMode - chunkSizeByte byte - Contents io.Reader // Encrypted chunks and tags - initialNonce []byte // Referred to as IV in RFC4880-bis -} - -// Only currently defined version -const aeadEncryptedVersion = 1 - -// An AEAD opener/sealer, its configuration, and data for en/decryption. -type aeadCrypter struct { - aead cipher.AEAD - chunkSize int - initialNonce []byte - associatedData []byte // Chunk-independent associated data - chunkIndex []byte // Chunk counter - bytesProcessed int // Amount of plaintext bytes encrypted/decrypted - buffer bytes.Buffer // Buffered bytes accross chunks -} - -// aeadEncrypter encrypts and writes bytes. It encrypts when necessary according -// to the AEAD block size, and buffers the extra encrypted bytes for next write. -type aeadEncrypter struct { - aeadCrypter // Embedded plaintext sealer - writer io.WriteCloser // 'writer' is a partialLengthWriter -} - -// aeadDecrypter reads and decrypts bytes. It buffers extra decrypted bytes when -// necessary, similar to aeadEncrypter. -type aeadDecrypter struct { - aeadCrypter // Embedded ciphertext opener - reader io.Reader // 'reader' is a partialLengthReader - peekedBytes []byte // Used to detect last chunk - eof bool -} - -func (ae *AEADEncrypted) parse(buf io.Reader) error { - headerData := make([]byte, 4) - if n, err := io.ReadFull(buf, headerData); n < 4 { - return errors.AEADError("could not read aead header:" + err.Error()) - } - // Read initial nonce - mode := AEADMode(headerData[2]) - nonceLen := mode.NonceLength() - if nonceLen == 0 { - return errors.AEADError("unknown mode") - } - initialNonce := make([]byte, nonceLen) - if n, err := io.ReadFull(buf, initialNonce); n < nonceLen { - return errors.AEADError("could not read aead nonce:" + err.Error()) - } - ae.Contents = buf - ae.initialNonce = initialNonce - c := headerData[1] - if _, ok := algorithm.CipherById[c]; !ok { - return errors.UnsupportedError("unknown cipher: " + string(c)) - } - ae.cipher = CipherFunction(c) - ae.mode = mode - ae.chunkSizeByte = byte(headerData[3]) - return nil -} - -// Decrypt returns a io.ReadCloser from which decrypted bytes can be read, or -// an error. -func (ae *AEADEncrypted) Decrypt(ciph CipherFunction, key []byte) (io.ReadCloser, error) { - return ae.decrypt(key) -} - -// decrypt prepares an aeadCrypter and returns a ReadCloser from which -// decrypted bytes can be read (see aeadDecrypter.Read()). -func (ae *AEADEncrypted) decrypt(key []byte) (io.ReadCloser, error) { - blockCipher := ae.cipher.new(key) - aead := ae.mode.new(blockCipher) - // Carry the first tagLen bytes - tagLen := ae.mode.TagLength() - peekedBytes := make([]byte, tagLen) - n, err := io.ReadFull(ae.Contents, peekedBytes) - if n < tagLen || (err != nil && err != io.EOF) { - return nil, errors.AEADError("Not enough data to decrypt:" + err.Error()) - } - chunkSize := decodeAEADChunkSize(ae.chunkSizeByte) - return &aeadDecrypter{ - aeadCrypter: aeadCrypter{ - aead: aead, - chunkSize: chunkSize, - initialNonce: ae.initialNonce, - associatedData: ae.associatedData(), - chunkIndex: make([]byte, 8), - }, - reader: ae.Contents, - peekedBytes: peekedBytes}, nil -} - -// Read decrypts bytes and reads them into dst. It decrypts when necessary and -// buffers extra decrypted bytes. It returns the number of bytes copied into dst -// and an error. -func (ar *aeadDecrypter) Read(dst []byte) (n int, err error) { - // Return buffered plaintext bytes from previous calls - if ar.buffer.Len() > 0 { - return ar.buffer.Read(dst) - } - - // Return EOF if we've previously validated the final tag - if ar.eof { - return 0, io.EOF - } - - // Read a chunk - tagLen := ar.aead.Overhead() - cipherChunkBuf := new(bytes.Buffer) - _, errRead := io.CopyN(cipherChunkBuf, ar.reader, int64(ar.chunkSize + tagLen)) - cipherChunk := cipherChunkBuf.Bytes() - if errRead != nil && errRead != io.EOF { - return 0, errRead - } - decrypted, errChunk := ar.openChunk(cipherChunk) - if errChunk != nil { - return 0, errChunk - } - - // Return decrypted bytes, buffering if necessary - if len(dst) < len(decrypted) { - n = copy(dst, decrypted[:len(dst)]) - ar.buffer.Write(decrypted[len(dst):]) - } else { - n = copy(dst, decrypted) - } - - // Check final authentication tag - if errRead == io.EOF { - errChunk := ar.validateFinalTag(ar.peekedBytes) - if errChunk != nil { - return n, errChunk - } - ar.eof = true // Mark EOF for when we've returned all buffered data - } - return -} - -// Close is noOp. The final authentication tag of the stream was already -// checked in the last Read call. In the future, this function could be used to -// wipe the reader and peeked, decrypted bytes, if necessary. -func (ar *aeadDecrypter) Close() (err error) { - return nil -} - -// SerializeAEADEncrypted initializes the aeadCrypter and returns a writer. -// This writer encrypts and writes bytes (see aeadEncrypter.Write()). -func SerializeAEADEncrypted(w io.Writer, key []byte, cipher CipherFunction, mode AEADMode, config *Config) (io.WriteCloser, error) { - writeCloser := noOpCloser{w} - writer, err := serializeStreamHeader(writeCloser, packetTypeAEADEncrypted) - if err != nil { - return nil, err - } - - // Data for en/decryption: tag, version, cipher, aead mode, chunk size - aeadConf := config.AEAD() - prefix := []byte{ - 0xD4, - aeadEncryptedVersion, - byte(config.Cipher()), - byte(aeadConf.Mode()), - aeadConf.ChunkSizeByte(), - } - n, err := writer.Write(prefix[1:]) - if err != nil || n < 4 { - return nil, errors.AEADError("could not write AEAD headers") - } - // Sample nonce - nonceLen := aeadConf.Mode().NonceLength() - nonce := make([]byte, nonceLen) - n, err = rand.Read(nonce) - if err != nil { - panic("Could not sample random nonce") - } - _, err = writer.Write(nonce) - if err != nil { - return nil, err - } - blockCipher := CipherFunction(config.Cipher()).new(key) - alg := AEADMode(aeadConf.Mode()).new(blockCipher) - - chunkSize := decodeAEADChunkSize(aeadConf.ChunkSizeByte()) - return &aeadEncrypter{ - aeadCrypter: aeadCrypter{ - aead: alg, - chunkSize: chunkSize, - associatedData: prefix, - chunkIndex: make([]byte, 8), - initialNonce: nonce, - }, - writer: writer}, nil -} - -// Write encrypts and writes bytes. It encrypts when necessary and buffers extra -// plaintext bytes for next call. When the stream is finished, Close() MUST be -// called to append the final tag. -func (aw *aeadEncrypter) Write(plaintextBytes []byte) (n int, err error) { - // Append plaintextBytes to existing buffered bytes - n, err = aw.buffer.Write(plaintextBytes) - if err != nil { - return n, err - } - // Encrypt and write chunks - for aw.buffer.Len() >= aw.chunkSize { - plainChunk := aw.buffer.Next(aw.chunkSize) - encryptedChunk, err := aw.sealChunk(plainChunk) - if err != nil { - return n, err - } - _, err = aw.writer.Write(encryptedChunk) - if err != nil { - return n, err - } - } - return -} - -// Close encrypts and writes the remaining buffered plaintext if any, appends -// the final authentication tag, and closes the embedded writer. This function -// MUST be called at the end of a stream. -func (aw *aeadEncrypter) Close() (err error) { - // Encrypt and write a chunk if there's buffered data left, or if we haven't - // written any chunks yet. - if aw.buffer.Len() > 0 || aw.bytesProcessed == 0 { - plainChunk := aw.buffer.Bytes() - lastEncryptedChunk, err := aw.sealChunk(plainChunk) - if err != nil { - return err - } - _, err = aw.writer.Write(lastEncryptedChunk) - if err != nil { - return err - } - } - // Compute final tag (associated data: packet tag, version, cipher, aead, - // chunk size, index, total number of encrypted octets). - adata := append(aw.associatedData[:], aw.chunkIndex[:]...) - adata = append(adata, make([]byte, 8)...) - binary.BigEndian.PutUint64(adata[13:], uint64(aw.bytesProcessed)) - nonce := aw.computeNextNonce() - finalTag := aw.aead.Seal(nil, nonce, nil, adata) - _, err = aw.writer.Write(finalTag) - if err != nil { - return err - } - return aw.writer.Close() -} - -// sealChunk Encrypts and authenticates the given chunk. -func (aw *aeadEncrypter) sealChunk(data []byte) ([]byte, error) { - if len(data) > aw.chunkSize { - return nil, errors.AEADError("chunk exceeds maximum length") - } - if aw.associatedData == nil { - return nil, errors.AEADError("can't seal without headers") - } - adata := append(aw.associatedData, aw.chunkIndex...) - nonce := aw.computeNextNonce() - encrypted := aw.aead.Seal(nil, nonce, data, adata) - aw.bytesProcessed += len(data) - if err := aw.aeadCrypter.incrementIndex(); err != nil { - return nil, err - } - return encrypted, nil -} - -// openChunk decrypts and checks integrity of an encrypted chunk, returning -// the underlying plaintext and an error. It access peeked bytes from next -// chunk, to identify the last chunk and decrypt/validate accordingly. -func (ar *aeadDecrypter) openChunk(data []byte) ([]byte, error) { - tagLen := ar.aead.Overhead() - // Restore carried bytes from last call - chunkExtra := append(ar.peekedBytes, data...) - // 'chunk' contains encrypted bytes, followed by an authentication tag. - chunk := chunkExtra[:len(chunkExtra)-tagLen] - ar.peekedBytes = chunkExtra[len(chunkExtra)-tagLen:] - adata := append(ar.associatedData, ar.chunkIndex...) - nonce := ar.computeNextNonce() - plainChunk, err := ar.aead.Open(nil, nonce, chunk, adata) - if err != nil { - return nil, err - } - ar.bytesProcessed += len(plainChunk) - if err = ar.aeadCrypter.incrementIndex(); err != nil { - return nil, err - } - return plainChunk, nil -} - -// Checks the summary tag. It takes into account the total decrypted bytes into -// the associated data. It returns an error, or nil if the tag is valid. -func (ar *aeadDecrypter) validateFinalTag(tag []byte) error { - // Associated: tag, version, cipher, aead, chunk size, index, and octets - amountBytes := make([]byte, 8) - binary.BigEndian.PutUint64(amountBytes, uint64(ar.bytesProcessed)) - adata := append(ar.associatedData, ar.chunkIndex...) - adata = append(adata, amountBytes...) - nonce := ar.computeNextNonce() - _, err := ar.aead.Open(nil, nonce, tag, adata) - if err != nil { - return err - } - return nil -} - -// Associated data for chunks: tag, version, cipher, mode, chunk size byte -func (ae *AEADEncrypted) associatedData() []byte { - return []byte{ - 0xD4, - aeadEncryptedVersion, - byte(ae.cipher), - byte(ae.mode), - ae.chunkSizeByte} -} - -// computeNonce takes the incremental index and computes an eXclusive OR with -// the least significant 8 bytes of the receivers' initial nonce (see sec. -// 5.16.1 and 5.16.2). It returns the resulting nonce. -func (wo *aeadCrypter) computeNextNonce() (nonce []byte) { - nonce = make([]byte, len(wo.initialNonce)) - copy(nonce, wo.initialNonce) - offset := len(wo.initialNonce) - 8 - for i := 0; i < 8; i++ { - nonce[i+offset] ^= wo.chunkIndex[i] - } - return -} - -// incrementIndex perfoms an integer increment by 1 of the integer represented by the -// slice, modifying it accordingly. -func (wo *aeadCrypter) incrementIndex() error { - index := wo.chunkIndex - if len(index) == 0 { - return errors.AEADError("Index has length 0") - } - for i := len(index) - 1; i >= 0; i-- { - if index[i] < 255 { - index[i]++ - return nil - } - index[i] = 0 - } - return errors.AEADError("cannot further increment index") -} diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go deleted file mode 100644 index c55cecd3577..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "compress/bzip2" - "compress/flate" - "compress/zlib" - "github.com/ProtonMail/go-crypto/openpgp/errors" - "io" - "strconv" -) - -// Compressed represents a compressed OpenPGP packet. The decompressed contents -// will contain more OpenPGP packets. See RFC 4880, section 5.6. -type Compressed struct { - Body io.Reader -} - -const ( - NoCompression = flate.NoCompression - BestSpeed = flate.BestSpeed - BestCompression = flate.BestCompression - DefaultCompression = flate.DefaultCompression -) - -// CompressionConfig contains compressor configuration settings. -type CompressionConfig struct { - // Level is the compression level to use. It must be set to - // between -1 and 9, with -1 causing the compressor to use the - // default compression level, 0 causing the compressor to use - // no compression and 1 to 9 representing increasing (better, - // slower) compression levels. If Level is less than -1 or - // more then 9, a non-nil error will be returned during - // encryption. See the constants above for convenient common - // settings for Level. - Level int -} - -func (c *Compressed) parse(r io.Reader) error { - var buf [1]byte - _, err := readFull(r, buf[:]) - if err != nil { - return err - } - - switch buf[0] { - case 1: - c.Body = flate.NewReader(r) - case 2: - c.Body, err = zlib.NewReader(r) - case 3: - c.Body = bzip2.NewReader(r) - default: - err = errors.UnsupportedError("unknown compression algorithm: " + strconv.Itoa(int(buf[0]))) - } - - return err -} - -// compressedWriterCloser represents the serialized compression stream -// header and the compressor. Its Close() method ensures that both the -// compressor and serialized stream header are closed. Its Write() -// method writes to the compressor. -type compressedWriteCloser struct { - sh io.Closer // Stream Header - c io.WriteCloser // Compressor -} - -func (cwc compressedWriteCloser) Write(p []byte) (int, error) { - return cwc.c.Write(p) -} - -func (cwc compressedWriteCloser) Close() (err error) { - err = cwc.c.Close() - if err != nil { - return err - } - - return cwc.sh.Close() -} - -// SerializeCompressed serializes a compressed data packet to w and -// returns a WriteCloser to which the literal data packets themselves -// can be written and which MUST be closed on completion. If cc is -// nil, sensible defaults will be used to configure the compression -// algorithm. -func SerializeCompressed(w io.WriteCloser, algo CompressionAlgo, cc *CompressionConfig) (literaldata io.WriteCloser, err error) { - compressed, err := serializeStreamHeader(w, packetTypeCompressed) - if err != nil { - return - } - - _, err = compressed.Write([]byte{uint8(algo)}) - if err != nil { - return - } - - level := DefaultCompression - if cc != nil { - level = cc.Level - } - - var compressor io.WriteCloser - switch algo { - case CompressionZIP: - compressor, err = flate.NewWriter(compressed, level) - case CompressionZLIB: - compressor, err = zlib.NewWriterLevel(compressed, level) - default: - s := strconv.Itoa(int(algo)) - err = errors.UnsupportedError("Unsupported compression algorithm: " + s) - } - if err != nil { - return - } - - literaldata = compressedWriteCloser{compressed, compressor} - - return -} diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go deleted file mode 100644 index 5652dd81485..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "crypto/rand" - "io" - "math/big" - "time" -) - -// Config collects a number of parameters along with sensible defaults. -// A nil *Config is valid and results in all default values. -type Config struct { - // Rand provides the source of entropy. - // If nil, the crypto/rand Reader is used. - Rand io.Reader - // DefaultHash is the default hash function to be used. - // If zero, SHA-256 is used. - DefaultHash crypto.Hash - // DefaultCipher is the cipher to be used. - // If zero, AES-128 is used. - DefaultCipher CipherFunction - // Time returns the current time as the number of seconds since the - // epoch. If Time is nil, time.Now is used. - Time func() time.Time - // DefaultCompressionAlgo is the compression algorithm to be - // applied to the plaintext before encryption. If zero, no - // compression is done. - DefaultCompressionAlgo CompressionAlgo - // CompressionConfig configures the compression settings. - CompressionConfig *CompressionConfig - // S2KCount is only used for symmetric encryption. It - // determines the strength of the passphrase stretching when - // the said passphrase is hashed to produce a key. S2KCount - // should be between 1024 and 65011712, inclusive. If Config - // is nil or S2KCount is 0, the value 65536 used. Not all - // values in the above range can be represented. S2KCount will - // be rounded up to the next representable value if it cannot - // be encoded exactly. When set, it is strongly encrouraged to - // use a value that is at least 65536. See RFC 4880 Section - // 3.7.1.3. - S2KCount int - // RSABits is the number of bits in new RSA keys made with NewEntity. - // If zero, then 2048 bit keys are created. - RSABits int - // The public key algorithm to use - will always create a signing primary - // key and encryption subkey. - Algorithm PublicKeyAlgorithm - // Some known primes that are optionally prepopulated by the caller - RSAPrimes []*big.Int - // AEADConfig configures the use of the new AEAD Encrypted Data Packet, - // defined in the draft of the next version of the OpenPGP specification. - // If a non-nil AEADConfig is passed, usage of this packet is enabled. By - // default, it is disabled. See the documentation of AEADConfig for more - // configuration options related to AEAD. - // **Note: using this option may break compatibility with other OpenPGP - // implementations, as well as future versions of this library.** - AEADConfig *AEADConfig - // V5Keys configures version 5 key generation. If false, this package still - // supports version 5 keys, but produces version 4 keys. - V5Keys bool - // "The validity period of the key. This is the number of seconds after - // the key creation time that the key expires. If this is not present - // or has a value of zero, the key never expires. This is found only on - // a self-signature."" - // https://tools.ietf.org/html/rfc4880#section-5.2.3.6 - KeyLifetimeSecs uint32 - // "The validity period of the signature. This is the number of seconds - // after the signature creation time that the signature expires. If - // this is not present or has a value of zero, it never expires." - // https://tools.ietf.org/html/rfc4880#section-5.2.3.10 - SigLifetimeSecs uint32 - // SigningKeyId is used to specify the signing key to use (by Key ID). - // By default, the signing key is selected automatically, preferring - // signing subkeys if available. - SigningKeyId uint64 -} - -func (c *Config) Random() io.Reader { - if c == nil || c.Rand == nil { - return rand.Reader - } - return c.Rand -} - -func (c *Config) Hash() crypto.Hash { - if c == nil || uint(c.DefaultHash) == 0 { - return crypto.SHA256 - } - return c.DefaultHash -} - -func (c *Config) Cipher() CipherFunction { - if c == nil || uint8(c.DefaultCipher) == 0 { - return CipherAES128 - } - return c.DefaultCipher -} - -func (c *Config) Now() time.Time { - if c == nil || c.Time == nil { - return time.Now() - } - return c.Time() -} - -// KeyLifetime returns the validity period of the key. -func (c *Config) KeyLifetime() uint32 { - if c == nil { - return 0 - } - return c.KeyLifetimeSecs -} - -// SigLifetime returns the validity period of the signature. -func (c *Config) SigLifetime() uint32 { - if c == nil { - return 0 - } - return c.SigLifetimeSecs -} - -func (c *Config) Compression() CompressionAlgo { - if c == nil { - return CompressionNone - } - return c.DefaultCompressionAlgo -} - -func (c *Config) PasswordHashIterations() int { - if c == nil || c.S2KCount == 0 { - return 0 - } - return c.S2KCount -} - -func (c *Config) RSAModulusBits() int { - if c == nil || c.RSABits == 0 { - return 2048 - } - return c.RSABits -} - -func (c *Config) PublicKeyAlgorithm() PublicKeyAlgorithm { - if c == nil || c.Algorithm == 0 { - return PubKeyAlgoRSA - } - return c.Algorithm -} - -func (c *Config) AEAD() *AEADConfig { - if c == nil { - return nil - } - return c.AEADConfig -} - -func (c *Config) SigningKey() uint64 { - if c == nil { - return 0 - } - return c.SigningKeyId -} diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go deleted file mode 100644 index 801aec92b49..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go +++ /dev/null @@ -1,282 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "crypto/rsa" - "encoding/binary" - "io" - "math/big" - "strconv" - - "github.com/ProtonMail/go-crypto/openpgp/ecdh" - "github.com/ProtonMail/go-crypto/openpgp/elgamal" - "github.com/ProtonMail/go-crypto/openpgp/errors" - "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" -) - -const encryptedKeyVersion = 3 - -// EncryptedKey represents a public-key encrypted session key. See RFC 4880, -// section 5.1. -type EncryptedKey struct { - KeyId uint64 - Algo PublicKeyAlgorithm - CipherFunc CipherFunction // only valid after a successful Decrypt - Key []byte // only valid after a successful Decrypt - - encryptedMPI1, encryptedMPI2 encoding.Field -} - -func (e *EncryptedKey) parse(r io.Reader) (err error) { - var buf [10]byte - _, err = readFull(r, buf[:]) - if err != nil { - return - } - if buf[0] != encryptedKeyVersion { - return errors.UnsupportedError("unknown EncryptedKey version " + strconv.Itoa(int(buf[0]))) - } - e.KeyId = binary.BigEndian.Uint64(buf[1:9]) - e.Algo = PublicKeyAlgorithm(buf[9]) - switch e.Algo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - e.encryptedMPI1 = new(encoding.MPI) - if _, err = e.encryptedMPI1.ReadFrom(r); err != nil { - return - } - case PubKeyAlgoElGamal: - e.encryptedMPI1 = new(encoding.MPI) - if _, err = e.encryptedMPI1.ReadFrom(r); err != nil { - return - } - - e.encryptedMPI2 = new(encoding.MPI) - if _, err = e.encryptedMPI2.ReadFrom(r); err != nil { - return - } - case PubKeyAlgoECDH: - e.encryptedMPI1 = new(encoding.MPI) - if _, err = e.encryptedMPI1.ReadFrom(r); err != nil { - return - } - - e.encryptedMPI2 = new(encoding.OID) - if _, err = e.encryptedMPI2.ReadFrom(r); err != nil { - return - } - } - _, err = consumeAll(r) - return -} - -func checksumKeyMaterial(key []byte) uint16 { - var checksum uint16 - for _, v := range key { - checksum += uint16(v) - } - return checksum -} - -// Decrypt decrypts an encrypted session key with the given private key. The -// private key must have been decrypted first. -// If config is nil, sensible defaults will be used. -func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error { - if e.KeyId != 0 && e.KeyId != priv.KeyId { - return errors.InvalidArgumentError("cannot decrypt encrypted session key for key id " + strconv.FormatUint(e.KeyId, 16) + " with private key id " + strconv.FormatUint(priv.KeyId, 16)) - } - if e.Algo != priv.PubKeyAlgo { - return errors.InvalidArgumentError("cannot decrypt encrypted session key of type " + strconv.Itoa(int(e.Algo)) + " with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo))) - } - if priv.Dummy() { - return errors.ErrDummyPrivateKey("dummy key found") - } - - var err error - var b []byte - - // TODO(agl): use session key decryption routines here to avoid - // padding oracle attacks. - switch priv.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - // Supports both *rsa.PrivateKey and crypto.Decrypter - k := priv.PrivateKey.(crypto.Decrypter) - b, err = k.Decrypt(config.Random(), padToKeySize(k.Public().(*rsa.PublicKey), e.encryptedMPI1.Bytes()), nil) - case PubKeyAlgoElGamal: - c1 := new(big.Int).SetBytes(e.encryptedMPI1.Bytes()) - c2 := new(big.Int).SetBytes(e.encryptedMPI2.Bytes()) - b, err = elgamal.Decrypt(priv.PrivateKey.(*elgamal.PrivateKey), c1, c2) - case PubKeyAlgoECDH: - vsG := e.encryptedMPI1.Bytes() - m := e.encryptedMPI2.Bytes() - oid := priv.PublicKey.oid.EncodedBytes() - b, err = ecdh.Decrypt(priv.PrivateKey.(*ecdh.PrivateKey), vsG, m, oid, priv.PublicKey.Fingerprint[:]) - default: - err = errors.InvalidArgumentError("cannot decrypt encrypted session key with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo))) - } - - if err != nil { - return err - } - - e.CipherFunc = CipherFunction(b[0]) - e.Key = b[1 : len(b)-2] - expectedChecksum := uint16(b[len(b)-2])<<8 | uint16(b[len(b)-1]) - checksum := checksumKeyMaterial(e.Key) - if checksum != expectedChecksum { - return errors.StructuralError("EncryptedKey checksum incorrect") - } - - return nil -} - -// Serialize writes the encrypted key packet, e, to w. -func (e *EncryptedKey) Serialize(w io.Writer) error { - var mpiLen int - switch e.Algo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - mpiLen = int(e.encryptedMPI1.EncodedLength()) - case PubKeyAlgoElGamal: - mpiLen = int(e.encryptedMPI1.EncodedLength()) + int(e.encryptedMPI2.EncodedLength()) - case PubKeyAlgoECDH: - mpiLen = int(e.encryptedMPI1.EncodedLength()) + int(e.encryptedMPI2.EncodedLength()) - default: - return errors.InvalidArgumentError("don't know how to serialize encrypted key type " + strconv.Itoa(int(e.Algo))) - } - - err := serializeHeader(w, packetTypeEncryptedKey, 1 /* version */ +8 /* key id */ +1 /* algo */ +mpiLen) - if err != nil { - return err - } - - w.Write([]byte{encryptedKeyVersion}) - binary.Write(w, binary.BigEndian, e.KeyId) - w.Write([]byte{byte(e.Algo)}) - - switch e.Algo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - _, err := w.Write(e.encryptedMPI1.EncodedBytes()) - return err - case PubKeyAlgoElGamal: - if _, err := w.Write(e.encryptedMPI1.EncodedBytes()); err != nil { - return err - } - _, err := w.Write(e.encryptedMPI2.EncodedBytes()) - return err - case PubKeyAlgoECDH: - if _, err := w.Write(e.encryptedMPI1.EncodedBytes()); err != nil { - return err - } - _, err := w.Write(e.encryptedMPI2.EncodedBytes()) - return err - default: - panic("internal error") - } -} - -// SerializeEncryptedKey serializes an encrypted key packet to w that contains -// key, encrypted to pub. -// If config is nil, sensible defaults will be used. -func SerializeEncryptedKey(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, config *Config) error { - var buf [10]byte - buf[0] = encryptedKeyVersion - binary.BigEndian.PutUint64(buf[1:9], pub.KeyId) - buf[9] = byte(pub.PubKeyAlgo) - - keyBlock := make([]byte, 1 /* cipher type */ +len(key)+2 /* checksum */) - keyBlock[0] = byte(cipherFunc) - copy(keyBlock[1:], key) - checksum := checksumKeyMaterial(key) - keyBlock[1+len(key)] = byte(checksum >> 8) - keyBlock[1+len(key)+1] = byte(checksum) - - switch pub.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - return serializeEncryptedKeyRSA(w, config.Random(), buf, pub.PublicKey.(*rsa.PublicKey), keyBlock) - case PubKeyAlgoElGamal: - return serializeEncryptedKeyElGamal(w, config.Random(), buf, pub.PublicKey.(*elgamal.PublicKey), keyBlock) - case PubKeyAlgoECDH: - return serializeEncryptedKeyECDH(w, config.Random(), buf, pub.PublicKey.(*ecdh.PublicKey), keyBlock, pub.oid, pub.Fingerprint) - case PubKeyAlgoDSA, PubKeyAlgoRSASignOnly: - return errors.InvalidArgumentError("cannot encrypt to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) - } - - return errors.UnsupportedError("encrypting a key to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) -} - -func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header [10]byte, pub *rsa.PublicKey, keyBlock []byte) error { - cipherText, err := rsa.EncryptPKCS1v15(rand, pub, keyBlock) - if err != nil { - return errors.InvalidArgumentError("RSA encryption failed: " + err.Error()) - } - - cipherMPI := encoding.NewMPI(cipherText) - packetLen := 10 /* header length */ + int(cipherMPI.EncodedLength()) - - err = serializeHeader(w, packetTypeEncryptedKey, packetLen) - if err != nil { - return err - } - _, err = w.Write(header[:]) - if err != nil { - return err - } - _, err = w.Write(cipherMPI.EncodedBytes()) - return err -} - -func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header [10]byte, pub *elgamal.PublicKey, keyBlock []byte) error { - c1, c2, err := elgamal.Encrypt(rand, pub, keyBlock) - if err != nil { - return errors.InvalidArgumentError("ElGamal encryption failed: " + err.Error()) - } - - packetLen := 10 /* header length */ - packetLen += 2 /* mpi size */ + (c1.BitLen()+7)/8 - packetLen += 2 /* mpi size */ + (c2.BitLen()+7)/8 - - err = serializeHeader(w, packetTypeEncryptedKey, packetLen) - if err != nil { - return err - } - _, err = w.Write(header[:]) - if err != nil { - return err - } - if _, err = w.Write(new(encoding.MPI).SetBig(c1).EncodedBytes()); err != nil { - return err - } - _, err = w.Write(new(encoding.MPI).SetBig(c2).EncodedBytes()) - return err -} - -func serializeEncryptedKeyECDH(w io.Writer, rand io.Reader, header [10]byte, pub *ecdh.PublicKey, keyBlock []byte, oid encoding.Field, fingerprint []byte) error { - vsG, c, err := ecdh.Encrypt(rand, pub, keyBlock, oid.EncodedBytes(), fingerprint) - if err != nil { - return errors.InvalidArgumentError("ECDH encryption failed: " + err.Error()) - } - - g := encoding.NewMPI(vsG) - m := encoding.NewOID(c) - - packetLen := 10 /* header length */ - packetLen += int(g.EncodedLength()) + int(m.EncodedLength()) - - err = serializeHeader(w, packetTypeEncryptedKey, packetLen) - if err != nil { - return err - } - - _, err = w.Write(header[:]) - if err != nil { - return err - } - if _, err = w.Write(g.EncodedBytes()); err != nil { - return err - } - _, err = w.Write(m.EncodedBytes()) - return err -} diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go deleted file mode 100644 index 4be987609be..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "encoding/binary" - "io" -) - -// LiteralData represents an encrypted file. See RFC 4880, section 5.9. -type LiteralData struct { - Format uint8 - IsBinary bool - FileName string - Time uint32 // Unix epoch time. Either creation time or modification time. 0 means undefined. - Body io.Reader -} - -// ForEyesOnly returns whether the contents of the LiteralData have been marked -// as especially sensitive. -func (l *LiteralData) ForEyesOnly() bool { - return l.FileName == "_CONSOLE" -} - -func (l *LiteralData) parse(r io.Reader) (err error) { - var buf [256]byte - - _, err = readFull(r, buf[:2]) - if err != nil { - return - } - - l.Format = buf[0] - l.IsBinary = l.Format == 'b' - fileNameLen := int(buf[1]) - - _, err = readFull(r, buf[:fileNameLen]) - if err != nil { - return - } - - l.FileName = string(buf[:fileNameLen]) - - _, err = readFull(r, buf[:4]) - if err != nil { - return - } - - l.Time = binary.BigEndian.Uint32(buf[:4]) - l.Body = r - return -} - -// SerializeLiteral serializes a literal data packet to w and returns a -// WriteCloser to which the data itself can be written and which MUST be closed -// on completion. The fileName is truncated to 255 bytes. -func SerializeLiteral(w io.WriteCloser, isBinary bool, fileName string, time uint32) (plaintext io.WriteCloser, err error) { - var buf [4]byte - buf[0] = 't' - if isBinary { - buf[0] = 'b' - } - if len(fileName) > 255 { - fileName = fileName[:255] - } - buf[1] = byte(len(fileName)) - - inner, err := serializeStreamHeader(w, packetTypeLiteralData) - if err != nil { - return - } - - _, err = inner.Write(buf[:2]) - if err != nil { - return - } - _, err = inner.Write([]byte(fileName)) - if err != nil { - return - } - binary.BigEndian.PutUint32(buf[:], time) - _, err = inner.Write(buf[:]) - if err != nil { - return - } - - plaintext = inner - return -} diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/ocfb.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/ocfb.go deleted file mode 100644 index 4f26d0a00b7..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/ocfb.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// OpenPGP CFB Mode. http://tools.ietf.org/html/rfc4880#section-13.9 - -package packet - -import ( - "crypto/cipher" -) - -type ocfbEncrypter struct { - b cipher.Block - fre []byte - outUsed int -} - -// An OCFBResyncOption determines if the "resynchronization step" of OCFB is -// performed. -type OCFBResyncOption bool - -const ( - OCFBResync OCFBResyncOption = true - OCFBNoResync OCFBResyncOption = false -) - -// NewOCFBEncrypter returns a cipher.Stream which encrypts data with OpenPGP's -// cipher feedback mode using the given cipher.Block, and an initial amount of -// ciphertext. randData must be random bytes and be the same length as the -// cipher.Block's block size. Resync determines if the "resynchronization step" -// from RFC 4880, 13.9 step 7 is performed. Different parts of OpenPGP vary on -// this point. -func NewOCFBEncrypter(block cipher.Block, randData []byte, resync OCFBResyncOption) (cipher.Stream, []byte) { - blockSize := block.BlockSize() - if len(randData) != blockSize { - return nil, nil - } - - x := &ocfbEncrypter{ - b: block, - fre: make([]byte, blockSize), - outUsed: 0, - } - prefix := make([]byte, blockSize+2) - - block.Encrypt(x.fre, x.fre) - for i := 0; i < blockSize; i++ { - prefix[i] = randData[i] ^ x.fre[i] - } - - block.Encrypt(x.fre, prefix[:blockSize]) - prefix[blockSize] = x.fre[0] ^ randData[blockSize-2] - prefix[blockSize+1] = x.fre[1] ^ randData[blockSize-1] - - if resync { - block.Encrypt(x.fre, prefix[2:]) - } else { - x.fre[0] = prefix[blockSize] - x.fre[1] = prefix[blockSize+1] - x.outUsed = 2 - } - return x, prefix -} - -func (x *ocfbEncrypter) XORKeyStream(dst, src []byte) { - for i := 0; i < len(src); i++ { - if x.outUsed == len(x.fre) { - x.b.Encrypt(x.fre, x.fre) - x.outUsed = 0 - } - - x.fre[x.outUsed] ^= src[i] - dst[i] = x.fre[x.outUsed] - x.outUsed++ - } -} - -type ocfbDecrypter struct { - b cipher.Block - fre []byte - outUsed int -} - -// NewOCFBDecrypter returns a cipher.Stream which decrypts data with OpenPGP's -// cipher feedback mode using the given cipher.Block. Prefix must be the first -// blockSize + 2 bytes of the ciphertext, where blockSize is the cipher.Block's -// block size. On successful exit, blockSize+2 bytes of decrypted data are written into -// prefix. Resync determines if the "resynchronization step" from RFC 4880, -// 13.9 step 7 is performed. Different parts of OpenPGP vary on this point. -func NewOCFBDecrypter(block cipher.Block, prefix []byte, resync OCFBResyncOption) cipher.Stream { - blockSize := block.BlockSize() - if len(prefix) != blockSize+2 { - return nil - } - - x := &ocfbDecrypter{ - b: block, - fre: make([]byte, blockSize), - outUsed: 0, - } - prefixCopy := make([]byte, len(prefix)) - copy(prefixCopy, prefix) - - block.Encrypt(x.fre, x.fre) - for i := 0; i < blockSize; i++ { - prefixCopy[i] ^= x.fre[i] - } - - block.Encrypt(x.fre, prefix[:blockSize]) - prefixCopy[blockSize] ^= x.fre[0] - prefixCopy[blockSize+1] ^= x.fre[1] - - if resync { - block.Encrypt(x.fre, prefix[2:]) - } else { - x.fre[0] = prefix[blockSize] - x.fre[1] = prefix[blockSize+1] - x.outUsed = 2 - } - copy(prefix, prefixCopy) - return x -} - -func (x *ocfbDecrypter) XORKeyStream(dst, src []byte) { - for i := 0; i < len(src); i++ { - if x.outUsed == len(x.fre) { - x.b.Encrypt(x.fre, x.fre) - x.outUsed = 0 - } - - c := src[i] - dst[i] = x.fre[x.outUsed] ^ src[i] - x.fre[x.outUsed] = c - x.outUsed++ - } -} diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go deleted file mode 100644 index 41c35de219d..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "encoding/binary" - "github.com/ProtonMail/go-crypto/openpgp/errors" - "github.com/ProtonMail/go-crypto/openpgp/s2k" - "io" - "strconv" -) - -// OnePassSignature represents a one-pass signature packet. See RFC 4880, -// section 5.4. -type OnePassSignature struct { - SigType SignatureType - Hash crypto.Hash - PubKeyAlgo PublicKeyAlgorithm - KeyId uint64 - IsLast bool -} - -const onePassSignatureVersion = 3 - -func (ops *OnePassSignature) parse(r io.Reader) (err error) { - var buf [13]byte - - _, err = readFull(r, buf[:]) - if err != nil { - return - } - if buf[0] != onePassSignatureVersion { - err = errors.UnsupportedError("one-pass-signature packet version " + strconv.Itoa(int(buf[0]))) - } - - var ok bool - ops.Hash, ok = s2k.HashIdToHash(buf[2]) - if !ok { - return errors.UnsupportedError("hash function: " + strconv.Itoa(int(buf[2]))) - } - - ops.SigType = SignatureType(buf[1]) - ops.PubKeyAlgo = PublicKeyAlgorithm(buf[3]) - ops.KeyId = binary.BigEndian.Uint64(buf[4:12]) - ops.IsLast = buf[12] != 0 - return -} - -// Serialize marshals the given OnePassSignature to w. -func (ops *OnePassSignature) Serialize(w io.Writer) error { - var buf [13]byte - buf[0] = onePassSignatureVersion - buf[1] = uint8(ops.SigType) - var ok bool - buf[2], ok = s2k.HashToHashId(ops.Hash) - if !ok { - return errors.UnsupportedError("hash type: " + strconv.Itoa(int(ops.Hash))) - } - buf[3] = uint8(ops.PubKeyAlgo) - binary.BigEndian.PutUint64(buf[4:12], ops.KeyId) - if ops.IsLast { - buf[12] = 1 - } - - if err := serializeHeader(w, packetTypeOnePassSignature, len(buf)); err != nil { - return err - } - _, err := w.Write(buf[:]) - return err -} diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go deleted file mode 100644 index e3879199e6b..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "io" - "io/ioutil" - - "github.com/ProtonMail/go-crypto/openpgp/errors" -) - -// OpaquePacket represents an OpenPGP packet as raw, unparsed data. This is -// useful for splitting and storing the original packet contents separately, -// handling unsupported packet types or accessing parts of the packet not yet -// implemented by this package. -type OpaquePacket struct { - // Packet type - Tag uint8 - // Reason why the packet was parsed opaquely - Reason error - // Binary contents of the packet data - Contents []byte -} - -func (op *OpaquePacket) parse(r io.Reader) (err error) { - op.Contents, err = ioutil.ReadAll(r) - return -} - -// Serialize marshals the packet to a writer in its original form, including -// the packet header. -func (op *OpaquePacket) Serialize(w io.Writer) (err error) { - err = serializeHeader(w, packetType(op.Tag), len(op.Contents)) - if err == nil { - _, err = w.Write(op.Contents) - } - return -} - -// Parse attempts to parse the opaque contents into a structure supported by -// this package. If the packet is not known then the result will be another -// OpaquePacket. -func (op *OpaquePacket) Parse() (p Packet, err error) { - hdr := bytes.NewBuffer(nil) - err = serializeHeader(hdr, packetType(op.Tag), len(op.Contents)) - if err != nil { - op.Reason = err - return op, err - } - p, err = Read(io.MultiReader(hdr, bytes.NewBuffer(op.Contents))) - if err != nil { - op.Reason = err - p = op - } - return -} - -// OpaqueReader reads OpaquePackets from an io.Reader. -type OpaqueReader struct { - r io.Reader -} - -func NewOpaqueReader(r io.Reader) *OpaqueReader { - return &OpaqueReader{r: r} -} - -// Read the next OpaquePacket. -func (or *OpaqueReader) Next() (op *OpaquePacket, err error) { - tag, _, contents, err := readHeader(or.r) - if err != nil { - return - } - op = &OpaquePacket{Tag: uint8(tag), Reason: err} - err = op.parse(contents) - if err != nil { - consumeAll(contents) - } - return -} - -// OpaqueSubpacket represents an unparsed OpenPGP subpacket, -// as found in signature and user attribute packets. -type OpaqueSubpacket struct { - SubType uint8 - Contents []byte -} - -// OpaqueSubpackets extracts opaque, unparsed OpenPGP subpackets from -// their byte representation. -func OpaqueSubpackets(contents []byte) (result []*OpaqueSubpacket, err error) { - var ( - subHeaderLen int - subPacket *OpaqueSubpacket - ) - for len(contents) > 0 { - subHeaderLen, subPacket, err = nextSubpacket(contents) - if err != nil { - break - } - result = append(result, subPacket) - contents = contents[subHeaderLen+len(subPacket.Contents):] - } - return -} - -func nextSubpacket(contents []byte) (subHeaderLen int, subPacket *OpaqueSubpacket, err error) { - // RFC 4880, section 5.2.3.1 - var subLen uint32 - if len(contents) < 1 { - goto Truncated - } - subPacket = &OpaqueSubpacket{} - switch { - case contents[0] < 192: - subHeaderLen = 2 // 1 length byte, 1 subtype byte - if len(contents) < subHeaderLen { - goto Truncated - } - subLen = uint32(contents[0]) - contents = contents[1:] - case contents[0] < 255: - subHeaderLen = 3 // 2 length bytes, 1 subtype - if len(contents) < subHeaderLen { - goto Truncated - } - subLen = uint32(contents[0]-192)<<8 + uint32(contents[1]) + 192 - contents = contents[2:] - default: - subHeaderLen = 6 // 5 length bytes, 1 subtype - if len(contents) < subHeaderLen { - goto Truncated - } - subLen = uint32(contents[1])<<24 | - uint32(contents[2])<<16 | - uint32(contents[3])<<8 | - uint32(contents[4]) - contents = contents[5:] - } - if subLen > uint32(len(contents)) || subLen == 0 { - goto Truncated - } - subPacket.SubType = contents[0] - subPacket.Contents = contents[1:subLen] - return -Truncated: - err = errors.StructuralError("subpacket truncated") - return -} - -func (osp *OpaqueSubpacket) Serialize(w io.Writer) (err error) { - buf := make([]byte, 6) - n := serializeSubpacketLength(buf, len(osp.Contents)+1) - buf[n] = osp.SubType - if _, err = w.Write(buf[:n+1]); err != nil { - return - } - _, err = w.Write(osp.Contents) - return -} diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go deleted file mode 100644 index fe0da9d3685..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go +++ /dev/null @@ -1,522 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package packet implements parsing and serialization of OpenPGP packets, as -// specified in RFC 4880. -package packet // import "github.com/ProtonMail/go-crypto/openpgp/packet" - -import ( - "bytes" - "crypto/cipher" - "crypto/rsa" - "io" - - "github.com/ProtonMail/go-crypto/openpgp/errors" - "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" -) - -// readFull is the same as io.ReadFull except that reading zero bytes returns -// ErrUnexpectedEOF rather than EOF. -func readFull(r io.Reader, buf []byte) (n int, err error) { - n, err = io.ReadFull(r, buf) - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return -} - -// readLength reads an OpenPGP length from r. See RFC 4880, section 4.2.2. -func readLength(r io.Reader) (length int64, isPartial bool, err error) { - var buf [4]byte - _, err = readFull(r, buf[:1]) - if err != nil { - return - } - switch { - case buf[0] < 192: - length = int64(buf[0]) - case buf[0] < 224: - length = int64(buf[0]-192) << 8 - _, err = readFull(r, buf[0:1]) - if err != nil { - return - } - length += int64(buf[0]) + 192 - case buf[0] < 255: - length = int64(1) << (buf[0] & 0x1f) - isPartial = true - default: - _, err = readFull(r, buf[0:4]) - if err != nil { - return - } - length = int64(buf[0])<<24 | - int64(buf[1])<<16 | - int64(buf[2])<<8 | - int64(buf[3]) - } - return -} - -// partialLengthReader wraps an io.Reader and handles OpenPGP partial lengths. -// The continuation lengths are parsed and removed from the stream and EOF is -// returned at the end of the packet. See RFC 4880, section 4.2.2.4. -type partialLengthReader struct { - r io.Reader - remaining int64 - isPartial bool -} - -func (r *partialLengthReader) Read(p []byte) (n int, err error) { - for r.remaining == 0 { - if !r.isPartial { - return 0, io.EOF - } - r.remaining, r.isPartial, err = readLength(r.r) - if err != nil { - return 0, err - } - } - - toRead := int64(len(p)) - if toRead > r.remaining { - toRead = r.remaining - } - - n, err = r.r.Read(p[:int(toRead)]) - r.remaining -= int64(n) - if n < int(toRead) && err == io.EOF { - err = io.ErrUnexpectedEOF - } - return -} - -// partialLengthWriter writes a stream of data using OpenPGP partial lengths. -// See RFC 4880, section 4.2.2.4. -type partialLengthWriter struct { - w io.WriteCloser - buf bytes.Buffer - lengthByte [1]byte -} - -func (w *partialLengthWriter) Write(p []byte) (n int, err error) { - bufLen := w.buf.Len() - if bufLen > 512 { - for power := uint(30); ; power-- { - l := 1 << power - if bufLen >= l { - w.lengthByte[0] = 224 + uint8(power) - _, err = w.w.Write(w.lengthByte[:]) - if err != nil { - return - } - var m int - m, err = w.w.Write(w.buf.Next(l)) - if err != nil { - return - } - if m != l { - return 0, io.ErrShortWrite - } - break - } - } - } - return w.buf.Write(p) -} - -func (w *partialLengthWriter) Close() (err error) { - len := w.buf.Len() - err = serializeLength(w.w, len) - if err != nil { - return err - } - _, err = w.buf.WriteTo(w.w) - if err != nil { - return err - } - return w.w.Close() -} - -// A spanReader is an io.LimitReader, but it returns ErrUnexpectedEOF if the -// underlying Reader returns EOF before the limit has been reached. -type spanReader struct { - r io.Reader - n int64 -} - -func (l *spanReader) Read(p []byte) (n int, err error) { - if l.n <= 0 { - return 0, io.EOF - } - if int64(len(p)) > l.n { - p = p[0:l.n] - } - n, err = l.r.Read(p) - l.n -= int64(n) - if l.n > 0 && err == io.EOF { - err = io.ErrUnexpectedEOF - } - return -} - -// readHeader parses a packet header and returns an io.Reader which will return -// the contents of the packet. See RFC 4880, section 4.2. -func readHeader(r io.Reader) (tag packetType, length int64, contents io.Reader, err error) { - var buf [4]byte - _, err = io.ReadFull(r, buf[:1]) - if err != nil { - return - } - if buf[0]&0x80 == 0 { - err = errors.StructuralError("tag byte does not have MSB set") - return - } - if buf[0]&0x40 == 0 { - // Old format packet - tag = packetType((buf[0] & 0x3f) >> 2) - lengthType := buf[0] & 3 - if lengthType == 3 { - length = -1 - contents = r - return - } - lengthBytes := 1 << lengthType - _, err = readFull(r, buf[0:lengthBytes]) - if err != nil { - return - } - for i := 0; i < lengthBytes; i++ { - length <<= 8 - length |= int64(buf[i]) - } - contents = &spanReader{r, length} - return - } - - // New format packet - tag = packetType(buf[0] & 0x3f) - length, isPartial, err := readLength(r) - if err != nil { - return - } - if isPartial { - contents = &partialLengthReader{ - remaining: length, - isPartial: true, - r: r, - } - length = -1 - } else { - contents = &spanReader{r, length} - } - return -} - -// serializeHeader writes an OpenPGP packet header to w. See RFC 4880, section -// 4.2. -func serializeHeader(w io.Writer, ptype packetType, length int) (err error) { - err = serializeType(w, ptype) - if err != nil { - return - } - return serializeLength(w, length) -} - -// serializeType writes an OpenPGP packet type to w. See RFC 4880, section -// 4.2. -func serializeType(w io.Writer, ptype packetType) (err error) { - var buf [1]byte - buf[0] = 0x80 | 0x40 | byte(ptype) - _, err = w.Write(buf[:]) - return -} - -// serializeLength writes an OpenPGP packet length to w. See RFC 4880, section -// 4.2.2. -func serializeLength(w io.Writer, length int) (err error) { - var buf [5]byte - var n int - - if length < 192 { - buf[0] = byte(length) - n = 1 - } else if length < 8384 { - length -= 192 - buf[0] = 192 + byte(length>>8) - buf[1] = byte(length) - n = 2 - } else { - buf[0] = 255 - buf[1] = byte(length >> 24) - buf[2] = byte(length >> 16) - buf[3] = byte(length >> 8) - buf[4] = byte(length) - n = 5 - } - - _, err = w.Write(buf[:n]) - return -} - -// serializeStreamHeader writes an OpenPGP packet header to w where the -// length of the packet is unknown. It returns a io.WriteCloser which can be -// used to write the contents of the packet. See RFC 4880, section 4.2. -func serializeStreamHeader(w io.WriteCloser, ptype packetType) (out io.WriteCloser, err error) { - err = serializeType(w, ptype) - if err != nil { - return - } - out = &partialLengthWriter{w: w} - return -} - -// Packet represents an OpenPGP packet. Users are expected to try casting -// instances of this interface to specific packet types. -type Packet interface { - parse(io.Reader) error -} - -// consumeAll reads from the given Reader until error, returning the number of -// bytes read. -func consumeAll(r io.Reader) (n int64, err error) { - var m int - var buf [1024]byte - - for { - m, err = r.Read(buf[:]) - n += int64(m) - if err == io.EOF { - err = nil - return - } - if err != nil { - return - } - } -} - -// packetType represents the numeric ids of the different OpenPGP packet types. See -// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-2 -type packetType uint8 - -const ( - packetTypeEncryptedKey packetType = 1 - packetTypeSignature packetType = 2 - packetTypeSymmetricKeyEncrypted packetType = 3 - packetTypeOnePassSignature packetType = 4 - packetTypePrivateKey packetType = 5 - packetTypePublicKey packetType = 6 - packetTypePrivateSubkey packetType = 7 - packetTypeCompressed packetType = 8 - packetTypeSymmetricallyEncrypted packetType = 9 - packetTypeLiteralData packetType = 11 - packetTypeUserId packetType = 13 - packetTypePublicSubkey packetType = 14 - packetTypeUserAttribute packetType = 17 - packetTypeSymmetricallyEncryptedMDC packetType = 18 - packetTypeAEADEncrypted packetType = 20 -) - -// EncryptedDataPacket holds encrypted data. It is currently implemented by -// SymmetricallyEncrypted and AEADEncrypted. -type EncryptedDataPacket interface { - Decrypt(CipherFunction, []byte) (io.ReadCloser, error) -} - -// Read reads a single OpenPGP packet from the given io.Reader. If there is an -// error parsing a packet, the whole packet is consumed from the input. -func Read(r io.Reader) (p Packet, err error) { - tag, _, contents, err := readHeader(r) - if err != nil { - return - } - - switch tag { - case packetTypeEncryptedKey: - p = new(EncryptedKey) - case packetTypeSignature: - p = new(Signature) - case packetTypeSymmetricKeyEncrypted: - p = new(SymmetricKeyEncrypted) - case packetTypeOnePassSignature: - p = new(OnePassSignature) - case packetTypePrivateKey, packetTypePrivateSubkey: - pk := new(PrivateKey) - if tag == packetTypePrivateSubkey { - pk.IsSubkey = true - } - p = pk - case packetTypePublicKey, packetTypePublicSubkey: - isSubkey := tag == packetTypePublicSubkey - p = &PublicKey{IsSubkey: isSubkey} - case packetTypeCompressed: - p = new(Compressed) - case packetTypeSymmetricallyEncrypted: - err = errors.UnsupportedError("Symmetrically encrypted packets without MDC are not supported") - case packetTypeLiteralData: - p = new(LiteralData) - case packetTypeUserId: - p = new(UserId) - case packetTypeUserAttribute: - p = new(UserAttribute) - case packetTypeSymmetricallyEncryptedMDC: - se := new(SymmetricallyEncrypted) - se.MDC = true - p = se - case packetTypeAEADEncrypted: - p = new(AEADEncrypted) - default: - err = errors.UnknownPacketTypeError(tag) - } - if p != nil { - err = p.parse(contents) - } - if err != nil { - consumeAll(contents) - } - return -} - -// SignatureType represents the different semantic meanings of an OpenPGP -// signature. See RFC 4880, section 5.2.1. -type SignatureType uint8 - -const ( - SigTypeBinary SignatureType = 0x00 - SigTypeText = 0x01 - SigTypeGenericCert = 0x10 - SigTypePersonaCert = 0x11 - SigTypeCasualCert = 0x12 - SigTypePositiveCert = 0x13 - SigTypeSubkeyBinding = 0x18 - SigTypePrimaryKeyBinding = 0x19 - SigTypeDirectSignature = 0x1F - SigTypeKeyRevocation = 0x20 - SigTypeSubkeyRevocation = 0x28 -) - -// PublicKeyAlgorithm represents the different public key system specified for -// OpenPGP. See -// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-12 -type PublicKeyAlgorithm uint8 - -const ( - PubKeyAlgoRSA PublicKeyAlgorithm = 1 - PubKeyAlgoElGamal PublicKeyAlgorithm = 16 - PubKeyAlgoDSA PublicKeyAlgorithm = 17 - // RFC 6637, Section 5. - PubKeyAlgoECDH PublicKeyAlgorithm = 18 - PubKeyAlgoECDSA PublicKeyAlgorithm = 19 - // https://www.ietf.org/archive/id/draft-koch-eddsa-for-openpgp-04.txt - PubKeyAlgoEdDSA PublicKeyAlgorithm = 22 - - // Deprecated in RFC 4880, Section 13.5. Use key flags instead. - PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2 - PubKeyAlgoRSASignOnly PublicKeyAlgorithm = 3 -) - -// CanEncrypt returns true if it's possible to encrypt a message to a public -// key of the given type. -func (pka PublicKeyAlgorithm) CanEncrypt() bool { - switch pka { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal, PubKeyAlgoECDH: - return true - } - return false -} - -// CanSign returns true if it's possible for a public key of the given type to -// sign a message. -func (pka PublicKeyAlgorithm) CanSign() bool { - switch pka { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA, PubKeyAlgoEdDSA: - return true - } - return false -} - -// CipherFunction represents the different block ciphers specified for OpenPGP. See -// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13 -type CipherFunction algorithm.CipherFunction - -const ( - Cipher3DES CipherFunction = 2 - CipherCAST5 CipherFunction = 3 - CipherAES128 CipherFunction = 7 - CipherAES192 CipherFunction = 8 - CipherAES256 CipherFunction = 9 -) - -// KeySize returns the key size, in bytes, of cipher. -func (cipher CipherFunction) KeySize() int { - return algorithm.CipherFunction(cipher).KeySize() -} - -// blockSize returns the block size, in bytes, of cipher. -func (cipher CipherFunction) blockSize() int { - return algorithm.CipherFunction(cipher).BlockSize() -} - -// new returns a fresh instance of the given cipher. -func (cipher CipherFunction) new(key []byte) (block cipher.Block) { - return algorithm.CipherFunction(cipher).New(key) -} - -// padToKeySize left-pads a MPI with zeroes to match the length of the -// specified RSA public. -func padToKeySize(pub *rsa.PublicKey, b []byte) []byte { - k := (pub.N.BitLen() + 7) / 8 - if len(b) >= k { - return b - } - bb := make([]byte, k) - copy(bb[len(bb)-len(b):], b) - return bb -} - -// CompressionAlgo Represents the different compression algorithms -// supported by OpenPGP (except for BZIP2, which is not currently -// supported). See Section 9.3 of RFC 4880. -type CompressionAlgo uint8 - -const ( - CompressionNone CompressionAlgo = 0 - CompressionZIP CompressionAlgo = 1 - CompressionZLIB CompressionAlgo = 2 -) - -// AEADMode represents the different Authenticated Encryption with Associated -// Data specified for OpenPGP. -type AEADMode algorithm.AEADMode - -const ( - AEADModeEAX AEADMode = 1 - AEADModeOCB AEADMode = 2 - AEADModeExperimentalGCM AEADMode = 100 -) - -func (mode AEADMode) NonceLength() int { - return algorithm.AEADMode(mode).NonceLength() -} - -func (mode AEADMode) TagLength() int { - return algorithm.AEADMode(mode).TagLength() -} - -// new returns a fresh instance of the given mode. -func (mode AEADMode) new(block cipher.Block) cipher.AEAD { - return algorithm.AEADMode(mode).New(block) -} - -// ReasonForRevocation represents a revocation reason code as per RFC4880 -// section 5.2.3.23. -type ReasonForRevocation uint8 - -const ( - NoReason ReasonForRevocation = 0 - KeySuperseded ReasonForRevocation = 1 - KeyCompromised ReasonForRevocation = 2 - KeyRetired ReasonForRevocation = 3 -) diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go deleted file mode 100644 index 854f9c4bbbe..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go +++ /dev/null @@ -1,780 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "crypto" - "crypto/cipher" - "crypto/dsa" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "crypto/sha1" - "fmt" - "io" - "io/ioutil" - "math/big" - "strconv" - "time" - - "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" - "golang.org/x/crypto/curve25519" - - "github.com/ProtonMail/go-crypto/openpgp/ecdh" - "github.com/ProtonMail/go-crypto/openpgp/elgamal" - "github.com/ProtonMail/go-crypto/openpgp/errors" - "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" - "github.com/ProtonMail/go-crypto/openpgp/s2k" - "golang.org/x/crypto/ed25519" -) - -// PrivateKey represents a possibly encrypted private key. See RFC 4880, -// section 5.5.3. -type PrivateKey struct { - PublicKey - Encrypted bool // if true then the private key is unavailable until Decrypt has been called. - encryptedData []byte - cipher CipherFunction - s2k func(out, in []byte) - // An *{rsa|dsa|elgamal|ecdh|ecdsa|ed25519}.PrivateKey or - // crypto.Signer/crypto.Decrypter (Decryptor RSA only). - PrivateKey interface{} - sha1Checksum bool - iv []byte - - // Type of encryption of the S2K packet - // Allowed values are 0 (Not encrypted), 254 (SHA1), or - // 255 (2-byte checksum) - s2kType S2KType - // Full parameters of the S2K packet - s2kParams *s2k.Params -} - -//S2KType s2k packet type -type S2KType uint8 - -const ( - // S2KNON unencrypt - S2KNON S2KType = 0 - // S2KSHA1 sha1 sum check - S2KSHA1 S2KType = 254 - // S2KCHECKSUM sum check - S2KCHECKSUM S2KType = 255 -) - -func NewRSAPrivateKey(creationTime time.Time, priv *rsa.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pk.PublicKey = *NewRSAPublicKey(creationTime, &priv.PublicKey) - pk.PrivateKey = priv - return pk -} - -func NewDSAPrivateKey(creationTime time.Time, priv *dsa.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pk.PublicKey = *NewDSAPublicKey(creationTime, &priv.PublicKey) - pk.PrivateKey = priv - return pk -} - -func NewElGamalPrivateKey(creationTime time.Time, priv *elgamal.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pk.PublicKey = *NewElGamalPublicKey(creationTime, &priv.PublicKey) - pk.PrivateKey = priv - return pk -} - -func NewECDSAPrivateKey(creationTime time.Time, priv *ecdsa.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pk.PublicKey = *NewECDSAPublicKey(creationTime, &priv.PublicKey) - pk.PrivateKey = priv - return pk -} - -func NewEdDSAPrivateKey(creationTime time.Time, priv *ed25519.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pub := priv.Public().(ed25519.PublicKey) - pk.PublicKey = *NewEdDSAPublicKey(creationTime, &pub) - pk.PrivateKey = priv - return pk -} - -func NewECDHPrivateKey(creationTime time.Time, priv *ecdh.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pk.PublicKey = *NewECDHPublicKey(creationTime, &priv.PublicKey) - pk.PrivateKey = priv - return pk -} - -// NewSignerPrivateKey creates a PrivateKey from a crypto.Signer that -// implements RSA, ECDSA or EdDSA. -func NewSignerPrivateKey(creationTime time.Time, signer crypto.Signer) *PrivateKey { - pk := new(PrivateKey) - // In general, the public Keys should be used as pointers. We still - // type-switch on the values, for backwards-compatibility. - switch pubkey := signer.Public().(type) { - case *rsa.PublicKey: - pk.PublicKey = *NewRSAPublicKey(creationTime, pubkey) - case rsa.PublicKey: - pk.PublicKey = *NewRSAPublicKey(creationTime, &pubkey) - case *ecdsa.PublicKey: - pk.PublicKey = *NewECDSAPublicKey(creationTime, pubkey) - case ecdsa.PublicKey: - pk.PublicKey = *NewECDSAPublicKey(creationTime, &pubkey) - case *ed25519.PublicKey: - pk.PublicKey = *NewEdDSAPublicKey(creationTime, pubkey) - case ed25519.PublicKey: - pk.PublicKey = *NewEdDSAPublicKey(creationTime, &pubkey) - default: - panic("openpgp: unknown crypto.Signer type in NewSignerPrivateKey") - } - pk.PrivateKey = signer - return pk -} - -// NewDecrypterPrivateKey creates a PrivateKey from a *{rsa|elgamal|ecdh}.PrivateKey. -func NewDecrypterPrivateKey(creationTime time.Time, decrypter interface{}) *PrivateKey { - pk := new(PrivateKey) - switch priv := decrypter.(type) { - case *rsa.PrivateKey: - pk.PublicKey = *NewRSAPublicKey(creationTime, &priv.PublicKey) - case *elgamal.PrivateKey: - pk.PublicKey = *NewElGamalPublicKey(creationTime, &priv.PublicKey) - case *ecdh.PrivateKey: - pk.PublicKey = *NewECDHPublicKey(creationTime, &priv.PublicKey) - default: - panic("openpgp: unknown decrypter type in NewDecrypterPrivateKey") - } - pk.PrivateKey = decrypter - return pk -} - -func (pk *PrivateKey) parse(r io.Reader) (err error) { - err = (&pk.PublicKey).parse(r) - if err != nil { - return - } - v5 := pk.PublicKey.Version == 5 - - var buf [1]byte - _, err = readFull(r, buf[:]) - if err != nil { - return - } - pk.s2kType = S2KType(buf[0]) - var optCount [1]byte - if v5 { - if _, err = readFull(r, optCount[:]); err != nil { - return - } - } - - switch pk.s2kType { - case S2KNON: - pk.s2k = nil - pk.Encrypted = false - case S2KSHA1, S2KCHECKSUM: - if v5 && pk.s2kType == S2KCHECKSUM { - return errors.StructuralError("wrong s2k identifier for version 5") - } - _, err = readFull(r, buf[:]) - if err != nil { - return - } - pk.cipher = CipherFunction(buf[0]) - pk.s2kParams, err = s2k.ParseIntoParams(r) - if err != nil { - return - } - if pk.s2kParams.Dummy() { - return - } - pk.s2k, err = pk.s2kParams.Function() - if err != nil { - return - } - pk.Encrypted = true - if pk.s2kType == S2KSHA1 { - pk.sha1Checksum = true - } - default: - return errors.UnsupportedError("deprecated s2k function in private key") - } - - if pk.Encrypted { - blockSize := pk.cipher.blockSize() - if blockSize == 0 { - return errors.UnsupportedError("unsupported cipher in private key: " + strconv.Itoa(int(pk.cipher))) - } - pk.iv = make([]byte, blockSize) - _, err = readFull(r, pk.iv) - if err != nil { - return - } - } - - var privateKeyData []byte - if v5 { - var n [4]byte /* secret material four octet count */ - _, err = readFull(r, n[:]) - if err != nil { - return - } - count := uint32(uint32(n[0])<<24 | uint32(n[1])<<16 | uint32(n[2])<<8 | uint32(n[3])) - if !pk.Encrypted { - count = count + 2 /* two octet checksum */ - } - privateKeyData = make([]byte, count) - _, err = readFull(r, privateKeyData) - if err != nil { - return - } - } else { - privateKeyData, err = ioutil.ReadAll(r) - if err != nil { - return - } - } - if !pk.Encrypted { - return pk.parsePrivateKey(privateKeyData) - } - - pk.encryptedData = privateKeyData - return -} - -// Dummy returns true if the private key is a dummy key. This is a GNU extension. -func (pk *PrivateKey) Dummy() bool { - return pk.s2kParams.Dummy() -} - -func mod64kHash(d []byte) uint16 { - var h uint16 - for _, b := range d { - h += uint16(b) - } - return h -} - -func (pk *PrivateKey) Serialize(w io.Writer) (err error) { - contents := bytes.NewBuffer(nil) - err = pk.PublicKey.serializeWithoutHeaders(contents) - if err != nil { - return - } - if _, err = contents.Write([]byte{uint8(pk.s2kType)}); err != nil { - return - } - - optional := bytes.NewBuffer(nil) - if pk.Encrypted || pk.Dummy() { - optional.Write([]byte{uint8(pk.cipher)}) - if err := pk.s2kParams.Serialize(optional); err != nil { - return err - } - if pk.Encrypted { - optional.Write(pk.iv) - } - } - if pk.Version == 5 { - contents.Write([]byte{uint8(optional.Len())}) - } - io.Copy(contents, optional) - - if !pk.Dummy() { - l := 0 - var priv []byte - if !pk.Encrypted { - buf := bytes.NewBuffer(nil) - err = pk.serializePrivateKey(buf) - if err != nil { - return err - } - l = buf.Len() - if pk.sha1Checksum { - h := sha1.New() - h.Write(buf.Bytes()) - buf.Write(h.Sum(nil)) - } else { - checksum := mod64kHash(buf.Bytes()) - buf.Write([]byte{byte(checksum >> 8), byte(checksum)}) - } - priv = buf.Bytes() - } else { - priv, l = pk.encryptedData, len(pk.encryptedData) - } - - if pk.Version == 5 { - contents.Write([]byte{byte(l >> 24), byte(l >> 16), byte(l >> 8), byte(l)}) - } - contents.Write(priv) - } - - ptype := packetTypePrivateKey - if pk.IsSubkey { - ptype = packetTypePrivateSubkey - } - err = serializeHeader(w, ptype, contents.Len()) - if err != nil { - return - } - _, err = io.Copy(w, contents) - if err != nil { - return - } - return -} - -func serializeRSAPrivateKey(w io.Writer, priv *rsa.PrivateKey) error { - if _, err := w.Write(new(encoding.MPI).SetBig(priv.D).EncodedBytes()); err != nil { - return err - } - if _, err := w.Write(new(encoding.MPI).SetBig(priv.Primes[1]).EncodedBytes()); err != nil { - return err - } - if _, err := w.Write(new(encoding.MPI).SetBig(priv.Primes[0]).EncodedBytes()); err != nil { - return err - } - _, err := w.Write(new(encoding.MPI).SetBig(priv.Precomputed.Qinv).EncodedBytes()) - return err -} - -func serializeDSAPrivateKey(w io.Writer, priv *dsa.PrivateKey) error { - _, err := w.Write(new(encoding.MPI).SetBig(priv.X).EncodedBytes()) - return err -} - -func serializeElGamalPrivateKey(w io.Writer, priv *elgamal.PrivateKey) error { - _, err := w.Write(new(encoding.MPI).SetBig(priv.X).EncodedBytes()) - return err -} - -func serializeECDSAPrivateKey(w io.Writer, priv *ecdsa.PrivateKey) error { - _, err := w.Write(new(encoding.MPI).SetBig(priv.D).EncodedBytes()) - return err -} - -func serializeEdDSAPrivateKey(w io.Writer, priv *ed25519.PrivateKey) error { - keySize := ed25519.PrivateKeySize - ed25519.PublicKeySize - _, err := w.Write(encoding.NewMPI((*priv)[:keySize]).EncodedBytes()) - return err -} - -func serializeECDHPrivateKey(w io.Writer, priv *ecdh.PrivateKey) error { - _, err := w.Write(encoding.NewMPI(priv.D).EncodedBytes()) - return err -} - -// Decrypt decrypts an encrypted private key using a passphrase. -func (pk *PrivateKey) Decrypt(passphrase []byte) error { - if pk.Dummy() { - return errors.ErrDummyPrivateKey("dummy key found") - } - if !pk.Encrypted { - return nil - } - - key := make([]byte, pk.cipher.KeySize()) - pk.s2k(key, passphrase) - block := pk.cipher.new(key) - cfb := cipher.NewCFBDecrypter(block, pk.iv) - - data := make([]byte, len(pk.encryptedData)) - cfb.XORKeyStream(data, pk.encryptedData) - - if pk.sha1Checksum { - if len(data) < sha1.Size { - return errors.StructuralError("truncated private key data") - } - h := sha1.New() - h.Write(data[:len(data)-sha1.Size]) - sum := h.Sum(nil) - if !bytes.Equal(sum, data[len(data)-sha1.Size:]) { - return errors.StructuralError("private key checksum failure") - } - data = data[:len(data)-sha1.Size] - } else { - if len(data) < 2 { - return errors.StructuralError("truncated private key data") - } - var sum uint16 - for i := 0; i < len(data)-2; i++ { - sum += uint16(data[i]) - } - if data[len(data)-2] != uint8(sum>>8) || - data[len(data)-1] != uint8(sum) { - return errors.StructuralError("private key checksum failure") - } - data = data[:len(data)-2] - } - - err := pk.parsePrivateKey(data) - if _, ok := err.(errors.KeyInvalidError); ok { - return errors.KeyInvalidError("invalid key parameters") - } - if err != nil { - return err - } - - // Mark key as unencrypted - pk.s2kType = S2KNON - pk.s2k = nil - pk.Encrypted = false - pk.encryptedData = nil - - return nil -} - -// Encrypt encrypts an unencrypted private key using a passphrase. -func (pk *PrivateKey) Encrypt(passphrase []byte) error { - priv := bytes.NewBuffer(nil) - err := pk.serializePrivateKey(priv) - if err != nil { - return err - } - - //Default config of private key encryption - pk.cipher = CipherAES256 - s2kConfig := &s2k.Config{ - S2KMode: 3, //Iterated - S2KCount: 65536, - Hash: crypto.SHA256, - } - - pk.s2kParams, err = s2k.Generate(rand.Reader, s2kConfig) - if err != nil { - return err - } - privateKeyBytes := priv.Bytes() - key := make([]byte, pk.cipher.KeySize()) - - pk.sha1Checksum = true - pk.s2k, err = pk.s2kParams.Function() - if err != nil { - return err - } - pk.s2k(key, passphrase) - block := pk.cipher.new(key) - pk.iv = make([]byte, pk.cipher.blockSize()) - _, err = rand.Read(pk.iv) - if err != nil { - return err - } - cfb := cipher.NewCFBEncrypter(block, pk.iv) - - if pk.sha1Checksum { - pk.s2kType = S2KSHA1 - h := sha1.New() - h.Write(privateKeyBytes) - sum := h.Sum(nil) - privateKeyBytes = append(privateKeyBytes, sum...) - } else { - pk.s2kType = S2KCHECKSUM - var sum uint16 - for _, b := range privateKeyBytes { - sum += uint16(b) - } - priv.Write([]byte{uint8(sum >> 8), uint8(sum)}) - } - - pk.encryptedData = make([]byte, len(privateKeyBytes)) - cfb.XORKeyStream(pk.encryptedData, privateKeyBytes) - pk.Encrypted = true - pk.PrivateKey = nil - return err -} - -func (pk *PrivateKey) serializePrivateKey(w io.Writer) (err error) { - switch priv := pk.PrivateKey.(type) { - case *rsa.PrivateKey: - err = serializeRSAPrivateKey(w, priv) - case *dsa.PrivateKey: - err = serializeDSAPrivateKey(w, priv) - case *elgamal.PrivateKey: - err = serializeElGamalPrivateKey(w, priv) - case *ecdsa.PrivateKey: - err = serializeECDSAPrivateKey(w, priv) - case *ed25519.PrivateKey: - err = serializeEdDSAPrivateKey(w, priv) - case *ecdh.PrivateKey: - err = serializeECDHPrivateKey(w, priv) - default: - err = errors.InvalidArgumentError("unknown private key type") - } - return -} - -func (pk *PrivateKey) parsePrivateKey(data []byte) (err error) { - switch pk.PublicKey.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoRSAEncryptOnly: - return pk.parseRSAPrivateKey(data) - case PubKeyAlgoDSA: - return pk.parseDSAPrivateKey(data) - case PubKeyAlgoElGamal: - return pk.parseElGamalPrivateKey(data) - case PubKeyAlgoECDSA: - return pk.parseECDSAPrivateKey(data) - case PubKeyAlgoECDH: - return pk.parseECDHPrivateKey(data) - case PubKeyAlgoEdDSA: - return pk.parseEdDSAPrivateKey(data) - } - panic("impossible") -} - -func (pk *PrivateKey) parseRSAPrivateKey(data []byte) (err error) { - rsaPub := pk.PublicKey.PublicKey.(*rsa.PublicKey) - rsaPriv := new(rsa.PrivateKey) - rsaPriv.PublicKey = *rsaPub - - buf := bytes.NewBuffer(data) - d := new(encoding.MPI) - if _, err := d.ReadFrom(buf); err != nil { - return err - } - - p := new(encoding.MPI) - if _, err := p.ReadFrom(buf); err != nil { - return err - } - - q := new(encoding.MPI) - if _, err := q.ReadFrom(buf); err != nil { - return err - } - - rsaPriv.D = new(big.Int).SetBytes(d.Bytes()) - rsaPriv.Primes = make([]*big.Int, 2) - rsaPriv.Primes[0] = new(big.Int).SetBytes(p.Bytes()) - rsaPriv.Primes[1] = new(big.Int).SetBytes(q.Bytes()) - if err := rsaPriv.Validate(); err != nil { - return errors.KeyInvalidError(err.Error()) - } - rsaPriv.Precompute() - pk.PrivateKey = rsaPriv - - return nil -} - -func (pk *PrivateKey) parseDSAPrivateKey(data []byte) (err error) { - dsaPub := pk.PublicKey.PublicKey.(*dsa.PublicKey) - dsaPriv := new(dsa.PrivateKey) - dsaPriv.PublicKey = *dsaPub - - buf := bytes.NewBuffer(data) - x := new(encoding.MPI) - if _, err := x.ReadFrom(buf); err != nil { - return err - } - - dsaPriv.X = new(big.Int).SetBytes(x.Bytes()) - if err := validateDSAParameters(dsaPriv); err != nil { - return err - } - pk.PrivateKey = dsaPriv - - return nil -} - -func (pk *PrivateKey) parseElGamalPrivateKey(data []byte) (err error) { - pub := pk.PublicKey.PublicKey.(*elgamal.PublicKey) - priv := new(elgamal.PrivateKey) - priv.PublicKey = *pub - - buf := bytes.NewBuffer(data) - x := new(encoding.MPI) - if _, err := x.ReadFrom(buf); err != nil { - return err - } - - priv.X = new(big.Int).SetBytes(x.Bytes()) - if err := validateElGamalParameters(priv); err != nil { - return err - } - pk.PrivateKey = priv - - return nil -} - -func (pk *PrivateKey) parseECDSAPrivateKey(data []byte) (err error) { - ecdsaPub := pk.PublicKey.PublicKey.(*ecdsa.PublicKey) - ecdsaPriv := new(ecdsa.PrivateKey) - ecdsaPriv.PublicKey = *ecdsaPub - - buf := bytes.NewBuffer(data) - d := new(encoding.MPI) - if _, err := d.ReadFrom(buf); err != nil { - return err - } - - ecdsaPriv.D = new(big.Int).SetBytes(d.Bytes()) - if err := validateECDSAParameters(ecdsaPriv); err != nil { - return err - } - pk.PrivateKey = ecdsaPriv - - return nil -} - -func (pk *PrivateKey) parseECDHPrivateKey(data []byte) (err error) { - ecdhPub := pk.PublicKey.PublicKey.(*ecdh.PublicKey) - ecdhPriv := new(ecdh.PrivateKey) - ecdhPriv.PublicKey = *ecdhPub - - buf := bytes.NewBuffer(data) - d := new(encoding.MPI) - if _, err := d.ReadFrom(buf); err != nil { - return err - } - - ecdhPriv.D = d.Bytes() - if err := validateECDHParameters(ecdhPriv); err != nil { - return err - } - pk.PrivateKey = ecdhPriv - - return nil -} - -func (pk *PrivateKey) parseEdDSAPrivateKey(data []byte) (err error) { - eddsaPub := pk.PublicKey.PublicKey.(*ed25519.PublicKey) - eddsaPriv := make(ed25519.PrivateKey, ed25519.PrivateKeySize) - - buf := bytes.NewBuffer(data) - d := new(encoding.MPI) - if _, err := d.ReadFrom(buf); err != nil { - return err - } - - priv := d.Bytes() - copy(eddsaPriv[32-len(priv):32], priv) - copy(eddsaPriv[32:], (*eddsaPub)[:]) - if err := validateEdDSAParameters(&eddsaPriv); err != nil { - return err - } - pk.PrivateKey = &eddsaPriv - - return nil -} - -func validateECDSAParameters(priv *ecdsa.PrivateKey) error { - return validateCommonECC(priv.Curve, priv.D.Bytes(), priv.X, priv.Y) -} - -func validateECDHParameters(priv *ecdh.PrivateKey) error { - if priv.CurveType != ecc.Curve25519 { - return validateCommonECC(priv.Curve, priv.D, priv.X, priv.Y) - } - // Handle Curve25519 - Q := priv.X.Bytes()[1:] - var d [32]byte - // Copy reversed d - l := len(priv.D) - for i := 0; i < l; i++ { - d[i] = priv.D[l-i-1] - } - var expectedQ [32]byte - curve25519.ScalarBaseMult(&expectedQ, &d) - if !bytes.Equal(Q, expectedQ[:]) { - return errors.KeyInvalidError("ECDH curve25519: invalid point") - } - return nil -} - -func validateCommonECC(curve elliptic.Curve, d []byte, X, Y *big.Int) error { - // the public point should not be at infinity (0,0) - zero := new(big.Int) - if X.Cmp(zero) == 0 && Y.Cmp(zero) == 0 { - return errors.KeyInvalidError(fmt.Sprintf("ecc (%s): infinity point", curve.Params().Name)) - } - // re-derive the public point Q' = (X,Y) = dG - // to compare to declared Q in public key - expectedX, expectedY := curve.ScalarBaseMult(d) - if X.Cmp(expectedX) != 0 || Y.Cmp(expectedY) != 0 { - return errors.KeyInvalidError(fmt.Sprintf("ecc (%s): invalid point", curve.Params().Name)) - } - return nil -} - -func validateEdDSAParameters(priv *ed25519.PrivateKey) error { - // In EdDSA, the serialized public point is stored as part of private key (together with the seed), - // hence we can re-derive the key from the seed - seed := priv.Seed() - expectedPriv := ed25519.NewKeyFromSeed(seed) - if !bytes.Equal(*priv, expectedPriv) { - return errors.KeyInvalidError("eddsa: invalid point") - } - return nil -} - -func validateDSAParameters(priv *dsa.PrivateKey) error { - p := priv.P // group prime - q := priv.Q // subgroup order - g := priv.G // g has order q mod p - x := priv.X // secret - y := priv.Y // y == g**x mod p - one := big.NewInt(1) - // expect g, y >= 2 and g < p - if g.Cmp(one) <= 0 || y.Cmp(one) <= 0 || g.Cmp(p) > 0 { - return errors.KeyInvalidError("dsa: invalid group") - } - // expect p > q - if p.Cmp(q) <= 0 { - return errors.KeyInvalidError("dsa: invalid group prime") - } - // q should be large enough and divide p-1 - pSub1 := new(big.Int).Sub(p, one) - if q.BitLen() < 150 || new(big.Int).Mod(pSub1, q).Cmp(big.NewInt(0)) != 0 { - return errors.KeyInvalidError("dsa: invalid order") - } - // confirm that g has order q mod p - if !q.ProbablyPrime(32) || new(big.Int).Exp(g, q, p).Cmp(one) != 0 { - return errors.KeyInvalidError("dsa: invalid order") - } - // check y - if new(big.Int).Exp(g, x, p).Cmp(y) != 0 { - return errors.KeyInvalidError("dsa: mismatching values") - } - - return nil -} - -func validateElGamalParameters(priv *elgamal.PrivateKey) error { - p := priv.P // group prime - g := priv.G // g has order p-1 mod p - x := priv.X // secret - y := priv.Y // y == g**x mod p - one := big.NewInt(1) - // Expect g, y >= 2 and g < p - if g.Cmp(one) <= 0 || y.Cmp(one) <= 0 || g.Cmp(p) > 0 { - return errors.KeyInvalidError("elgamal: invalid group") - } - if p.BitLen() < 1024 { - return errors.KeyInvalidError("elgamal: group order too small") - } - pSub1 := new(big.Int).Sub(p, one) - if new(big.Int).Exp(g, pSub1, p).Cmp(one) != 0 { - return errors.KeyInvalidError("elgamal: invalid group") - } - // Since p-1 is not prime, g might have a smaller order that divides p-1. - // We cannot confirm the exact order of g, but we make sure it is not too small. - gExpI := new(big.Int).Set(g) - i := 1 - threshold := 2 << 17 // we want order > threshold - for i < threshold { - i++ // we check every order to make sure key validation is not easily bypassed by guessing y' - gExpI.Mod(new(big.Int).Mul(gExpI, g), p) - if gExpI.Cmp(one) == 0 { - return errors.KeyInvalidError("elgamal: order too small") - } - } - // Check y - if new(big.Int).Exp(g, x, p).Cmp(y) != 0 { - return errors.KeyInvalidError("elgamal: mismatching values") - } - - return nil -} diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key_test_data.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key_test_data.go deleted file mode 100644 index 029b8f1aab2..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key_test_data.go +++ /dev/null @@ -1,12 +0,0 @@ -package packet - -// Generated with `gpg --export-secret-keys "Test Key 2"` -const privKeyRSAHex = "9501fe044cc349a8010400b70ca0010e98c090008d45d1ee8f9113bd5861fd57b88bacb7c68658747663f1e1a3b5a98f32fda6472373c024b97359cd2efc88ff60f77751adfbf6af5e615e6a1408cfad8bf0cea30b0d5f53aa27ad59089ba9b15b7ebc2777a25d7b436144027e3bcd203909f147d0e332b240cf63d3395f5dfe0df0a6c04e8655af7eacdf0011010001fe0303024a252e7d475fd445607de39a265472aa74a9320ba2dac395faa687e9e0336aeb7e9a7397e511b5afd9dc84557c80ac0f3d4d7bfec5ae16f20d41c8c84a04552a33870b930420e230e179564f6d19bb153145e76c33ae993886c388832b0fa042ddda7f133924f3854481533e0ede31d51278c0519b29abc3bf53da673e13e3e1214b52413d179d7f66deee35cac8eacb060f78379d70ef4af8607e68131ff529439668fc39c9ce6dfef8a5ac234d234802cbfb749a26107db26406213ae5c06d4673253a3cbee1fcbae58d6ab77e38d6e2c0e7c6317c48e054edadb5a40d0d48acb44643d998139a8a66bb820be1f3f80185bc777d14b5954b60effe2448a036d565c6bc0b915fcea518acdd20ab07bc1529f561c58cd044f723109b93f6fd99f876ff891d64306b5d08f48bab59f38695e9109c4dec34013ba3153488ce070268381ba923ee1eb77125b36afcb4347ec3478c8f2735b06ef17351d872e577fa95d0c397c88c71b59629a36aec" - -// Generated by `gpg --export-secret-keys` followed by a manual extraction of -// the ElGamal subkey from the packets. -const privKeyElGamalHex = "9d0157044df9ee1a100400eb8e136a58ec39b582629cdadf830bc64e0a94ed8103ca8bb247b27b11b46d1d25297ef4bcc3071785ba0c0bedfe89eabc5287fcc0edf81ab5896c1c8e4b20d27d79813c7aede75320b33eaeeaa586edc00fd1036c10133e6ba0ff277245d0d59d04b2b3421b7244aca5f4a8d870c6f1c1fbff9e1c26699a860b9504f35ca1d700030503fd1ededd3b840795be6d9ccbe3c51ee42e2f39233c432b831ddd9c4e72b7025a819317e47bf94f9ee316d7273b05d5fcf2999c3a681f519b1234bbfa6d359b4752bd9c3f77d6b6456cde152464763414ca130f4e91d91041432f90620fec0e6d6b5116076c2985d5aeaae13be492b9b329efcaf7ee25120159a0a30cd976b42d7afe030302dae7eb80db744d4960c4df930d57e87fe81412eaace9f900e6c839817a614ddb75ba6603b9417c33ea7b6c93967dfa2bcff3fa3c74a5ce2c962db65b03aece14c96cbd0038fc" - -// pkcs1PrivKeyHex is a PKCS#1, RSA private key. -// Generated by `openssl genrsa 1024 | openssl rsa -outform DER | xxd -p` -const pkcs1PrivKeyHex = "3082025d02010002818100e98edfa1c3b35884a54d0b36a6a603b0290fa85e49e30fa23fc94fef9c6790bc4849928607aa48d809da326fb42a969d06ad756b98b9c1a90f5d4a2b6d0ac05953c97f4da3120164a21a679793ce181c906dc01d235cc085ddcdf6ea06c389b6ab8885dfd685959e693138856a68a7e5db263337ff82a088d583a897cf2d59e9020301000102818100b6d5c9eb70b02d5369b3ee5b520a14490b5bde8a317d36f7e4c74b7460141311d1e5067735f8f01d6f5908b2b96fbd881f7a1ab9a84d82753e39e19e2d36856be960d05ac9ef8e8782ea1b6d65aee28fdfe1d61451e8cff0adfe84322f12cf455028b581cf60eb9e0e140ba5d21aeba6c2634d7c65318b9a665fc01c3191ca21024100fa5e818da3705b0fa33278bb28d4b6f6050388af2d4b75ec9375dd91ccf2e7d7068086a8b82a8f6282e4fbbdb8a7f2622eb97295249d87acea7f5f816f54d347024100eecf9406d7dc49cdfb95ab1eff4064de84c7a30f64b2798936a0d2018ba9eb52e4b636f82e96c49cc63b80b675e91e40d1b2e4017d4b9adaf33ab3d9cf1c214f024100c173704ace742c082323066226a4655226819a85304c542b9dacbeacbf5d1881ee863485fcf6f59f3a604f9b42289282067447f2b13dfeed3eab7851fc81e0550240741fc41f3fc002b382eed8730e33c5d8de40256e4accee846667f536832f711ab1d4590e7db91a8a116ac5bff3be13d3f9243ff2e976662aa9b395d907f8e9c9024046a5696c9ef882363e06c9fa4e2f5b580906452befba03f4a99d0f873697ef1f851d2226ca7934b30b7c3e80cb634a67172bbbf4781735fe3e09263e2dd723e7" diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go deleted file mode 100644 index 28971c1ada2..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go +++ /dev/null @@ -1,825 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rsa" - "crypto/sha1" - "crypto/sha256" - _ "crypto/sha512" - "encoding/binary" - "fmt" - "hash" - "io" - "math/big" - "strconv" - "time" - - "github.com/ProtonMail/go-crypto/openpgp/ecdh" - "github.com/ProtonMail/go-crypto/openpgp/elgamal" - "github.com/ProtonMail/go-crypto/openpgp/errors" - "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" - "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" - "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" - "golang.org/x/crypto/ed25519" -) - -type kdfHashFunction byte -type kdfAlgorithm byte - -// PublicKey represents an OpenPGP public key. See RFC 4880, section 5.5.2. -type PublicKey struct { - Version int - CreationTime time.Time - PubKeyAlgo PublicKeyAlgorithm - PublicKey interface{} // *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey or *eddsa.PublicKey - Fingerprint []byte - KeyId uint64 - IsSubkey bool - - // RFC 4880 fields - n, e, p, q, g, y encoding.Field - - // RFC 6637 fields - // oid contains the OID byte sequence identifying the elliptic curve used - oid encoding.Field - - // kdf stores key derivation function parameters - // used for ECDH encryption. See RFC 6637, Section 9. - kdf encoding.Field -} - -// UpgradeToV5 updates the version of the key to v5, and updates all necessary -// fields. -func (pk *PublicKey) UpgradeToV5() { - pk.Version = 5 - pk.setFingerprintAndKeyId() -} - -// signingKey provides a convenient abstraction over signature verification -// for v3 and v4 public keys. -type signingKey interface { - SerializeForHash(io.Writer) error - SerializeSignaturePrefix(io.Writer) - serializeWithoutHeaders(io.Writer) error -} - -// NewRSAPublicKey returns a PublicKey that wraps the given rsa.PublicKey. -func NewRSAPublicKey(creationTime time.Time, pub *rsa.PublicKey) *PublicKey { - pk := &PublicKey{ - Version: 4, - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoRSA, - PublicKey: pub, - n: new(encoding.MPI).SetBig(pub.N), - e: new(encoding.MPI).SetBig(big.NewInt(int64(pub.E))), - } - - pk.setFingerprintAndKeyId() - return pk -} - -// NewDSAPublicKey returns a PublicKey that wraps the given dsa.PublicKey. -func NewDSAPublicKey(creationTime time.Time, pub *dsa.PublicKey) *PublicKey { - pk := &PublicKey{ - Version: 4, - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoDSA, - PublicKey: pub, - p: new(encoding.MPI).SetBig(pub.P), - q: new(encoding.MPI).SetBig(pub.Q), - g: new(encoding.MPI).SetBig(pub.G), - y: new(encoding.MPI).SetBig(pub.Y), - } - - pk.setFingerprintAndKeyId() - return pk -} - -// NewElGamalPublicKey returns a PublicKey that wraps the given elgamal.PublicKey. -func NewElGamalPublicKey(creationTime time.Time, pub *elgamal.PublicKey) *PublicKey { - pk := &PublicKey{ - Version: 4, - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoElGamal, - PublicKey: pub, - p: new(encoding.MPI).SetBig(pub.P), - g: new(encoding.MPI).SetBig(pub.G), - y: new(encoding.MPI).SetBig(pub.Y), - } - - pk.setFingerprintAndKeyId() - return pk -} - -func NewECDSAPublicKey(creationTime time.Time, pub *ecdsa.PublicKey) *PublicKey { - pk := &PublicKey{ - Version: 4, - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoECDSA, - PublicKey: pub, - p: encoding.NewMPI(elliptic.Marshal(pub.Curve, pub.X, pub.Y)), - } - - curveInfo := ecc.FindByCurve(pub.Curve) - if curveInfo == nil { - panic("unknown elliptic curve") - } - pk.oid = curveInfo.Oid - pk.setFingerprintAndKeyId() - return pk -} - -func NewECDHPublicKey(creationTime time.Time, pub *ecdh.PublicKey) *PublicKey { - var pk *PublicKey - var curveInfo *ecc.CurveInfo - var kdf = encoding.NewOID([]byte{0x1, pub.Hash.Id(), pub.Cipher.Id()}) - if pub.CurveType == ecc.Curve25519 { - pk = &PublicKey{ - Version: 4, - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoECDH, - PublicKey: pub, - p: encoding.NewMPI(pub.X.Bytes()), - kdf: kdf, - } - curveInfo = ecc.FindByName("Curve25519") - } else { - pk = &PublicKey{ - Version: 4, - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoECDH, - PublicKey: pub, - p: encoding.NewMPI(elliptic.Marshal(pub.Curve, pub.X, pub.Y)), - kdf: kdf, - } - curveInfo = ecc.FindByCurve(pub.Curve) - } - if curveInfo == nil { - panic("unknown elliptic curve") - } - pk.oid = curveInfo.Oid - pk.setFingerprintAndKeyId() - return pk -} - -func NewEdDSAPublicKey(creationTime time.Time, pub *ed25519.PublicKey) *PublicKey { - curveInfo := ecc.FindByName("Ed25519") - pk := &PublicKey{ - Version: 4, - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoEdDSA, - PublicKey: pub, - oid: curveInfo.Oid, - // Native point format, see draft-koch-eddsa-for-openpgp-04, Appendix B - p: encoding.NewMPI(append([]byte{0x40}, *pub...)), - } - - pk.setFingerprintAndKeyId() - return pk -} - -func (pk *PublicKey) parse(r io.Reader) (err error) { - // RFC 4880, section 5.5.2 - var buf [6]byte - _, err = readFull(r, buf[:]) - if err != nil { - return - } - if buf[0] != 4 && buf[0] != 5 { - return errors.UnsupportedError("public key version " + strconv.Itoa(int(buf[0]))) - } - - pk.Version = int(buf[0]) - if pk.Version == 5 { - var n [4]byte - _, err = readFull(r, n[:]) - if err != nil { - return - } - } - pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0) - pk.PubKeyAlgo = PublicKeyAlgorithm(buf[5]) - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - err = pk.parseRSA(r) - case PubKeyAlgoDSA: - err = pk.parseDSA(r) - case PubKeyAlgoElGamal: - err = pk.parseElGamal(r) - case PubKeyAlgoECDSA: - err = pk.parseECDSA(r) - case PubKeyAlgoECDH: - err = pk.parseECDH(r) - case PubKeyAlgoEdDSA: - err = pk.parseEdDSA(r) - default: - err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo))) - } - if err != nil { - return - } - - pk.setFingerprintAndKeyId() - return -} - -func (pk *PublicKey) setFingerprintAndKeyId() { - // RFC 4880, section 12.2 - if pk.Version == 5 { - fingerprint := sha256.New() - pk.SerializeForHash(fingerprint) - pk.Fingerprint = make([]byte, 32) - copy(pk.Fingerprint, fingerprint.Sum(nil)) - pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[:8]) - } else { - fingerprint := sha1.New() - pk.SerializeForHash(fingerprint) - pk.Fingerprint = make([]byte, 20) - copy(pk.Fingerprint, fingerprint.Sum(nil)) - pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[12:20]) - } -} - -// parseRSA parses RSA public key material from the given Reader. See RFC 4880, -// section 5.5.2. -func (pk *PublicKey) parseRSA(r io.Reader) (err error) { - pk.n = new(encoding.MPI) - if _, err = pk.n.ReadFrom(r); err != nil { - return - } - pk.e = new(encoding.MPI) - if _, err = pk.e.ReadFrom(r); err != nil { - return - } - - if len(pk.e.Bytes()) > 3 { - err = errors.UnsupportedError("large public exponent") - return - } - rsa := &rsa.PublicKey{ - N: new(big.Int).SetBytes(pk.n.Bytes()), - E: 0, - } - for i := 0; i < len(pk.e.Bytes()); i++ { - rsa.E <<= 8 - rsa.E |= int(pk.e.Bytes()[i]) - } - pk.PublicKey = rsa - return -} - -// parseDSA parses DSA public key material from the given Reader. See RFC 4880, -// section 5.5.2. -func (pk *PublicKey) parseDSA(r io.Reader) (err error) { - pk.p = new(encoding.MPI) - if _, err = pk.p.ReadFrom(r); err != nil { - return - } - pk.q = new(encoding.MPI) - if _, err = pk.q.ReadFrom(r); err != nil { - return - } - pk.g = new(encoding.MPI) - if _, err = pk.g.ReadFrom(r); err != nil { - return - } - pk.y = new(encoding.MPI) - if _, err = pk.y.ReadFrom(r); err != nil { - return - } - - dsa := new(dsa.PublicKey) - dsa.P = new(big.Int).SetBytes(pk.p.Bytes()) - dsa.Q = new(big.Int).SetBytes(pk.q.Bytes()) - dsa.G = new(big.Int).SetBytes(pk.g.Bytes()) - dsa.Y = new(big.Int).SetBytes(pk.y.Bytes()) - pk.PublicKey = dsa - return -} - -// parseElGamal parses ElGamal public key material from the given Reader. See -// RFC 4880, section 5.5.2. -func (pk *PublicKey) parseElGamal(r io.Reader) (err error) { - pk.p = new(encoding.MPI) - if _, err = pk.p.ReadFrom(r); err != nil { - return - } - pk.g = new(encoding.MPI) - if _, err = pk.g.ReadFrom(r); err != nil { - return - } - pk.y = new(encoding.MPI) - if _, err = pk.y.ReadFrom(r); err != nil { - return - } - - elgamal := new(elgamal.PublicKey) - elgamal.P = new(big.Int).SetBytes(pk.p.Bytes()) - elgamal.G = new(big.Int).SetBytes(pk.g.Bytes()) - elgamal.Y = new(big.Int).SetBytes(pk.y.Bytes()) - pk.PublicKey = elgamal - return -} - -// parseECDSA parses ECDSA public key material from the given Reader. See -// RFC 6637, Section 9. -func (pk *PublicKey) parseECDSA(r io.Reader) (err error) { - pk.oid = new(encoding.OID) - if _, err = pk.oid.ReadFrom(r); err != nil { - return - } - pk.p = new(encoding.MPI) - if _, err = pk.p.ReadFrom(r); err != nil { - return - } - - var c elliptic.Curve - curveInfo := ecc.FindByOid(pk.oid) - if curveInfo == nil || curveInfo.SigAlgorithm != ecc.ECDSA { - return errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", pk.oid)) - } - c = curveInfo.Curve - x, y := elliptic.Unmarshal(c, pk.p.Bytes()) - if x == nil { - return errors.UnsupportedError("failed to parse EC point") - } - pk.PublicKey = &ecdsa.PublicKey{Curve: c, X: x, Y: y} - return -} - -// parseECDH parses ECDH public key material from the given Reader. See -// RFC 6637, Section 9. -func (pk *PublicKey) parseECDH(r io.Reader) (err error) { - pk.oid = new(encoding.OID) - if _, err = pk.oid.ReadFrom(r); err != nil { - return - } - pk.p = new(encoding.MPI) - if _, err = pk.p.ReadFrom(r); err != nil { - return - } - pk.kdf = new(encoding.OID) - if _, err = pk.kdf.ReadFrom(r); err != nil { - return - } - - curveInfo := ecc.FindByOid(pk.oid) - if curveInfo == nil { - return errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", pk.oid)) - } - - c := curveInfo.Curve - cType := curveInfo.CurveType - - var x, y *big.Int - if cType == ecc.Curve25519 { - x = new(big.Int) - x.SetBytes(pk.p.Bytes()) - } else { - x, y = elliptic.Unmarshal(c, pk.p.Bytes()) - } - if x == nil { - return errors.UnsupportedError("failed to parse EC point") - } - - if kdfLen := len(pk.kdf.Bytes()); kdfLen < 3 { - return errors.UnsupportedError("unsupported ECDH KDF length: " + strconv.Itoa(kdfLen)) - } - if reserved := pk.kdf.Bytes()[0]; reserved != 0x01 { - return errors.UnsupportedError("unsupported KDF reserved field: " + strconv.Itoa(int(reserved))) - } - kdfHash, ok := algorithm.HashById[pk.kdf.Bytes()[1]] - if !ok { - return errors.UnsupportedError("unsupported ECDH KDF hash: " + strconv.Itoa(int(pk.kdf.Bytes()[1]))) - } - kdfCipher, ok := algorithm.CipherById[pk.kdf.Bytes()[2]] - if !ok { - return errors.UnsupportedError("unsupported ECDH KDF cipher: " + strconv.Itoa(int(pk.kdf.Bytes()[2]))) - } - - pk.PublicKey = &ecdh.PublicKey{ - CurveType: cType, - Curve: c, - X: x, - Y: y, - KDF: ecdh.KDF{ - Hash: kdfHash, - Cipher: kdfCipher, - }, - } - return -} - -func (pk *PublicKey) parseEdDSA(r io.Reader) (err error) { - pk.oid = new(encoding.OID) - if _, err = pk.oid.ReadFrom(r); err != nil { - return - } - curveInfo := ecc.FindByOid(pk.oid) - if curveInfo == nil || curveInfo.SigAlgorithm != ecc.EdDSA { - return errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", pk.oid)) - } - pk.p = new(encoding.MPI) - if _, err = pk.p.ReadFrom(r); err != nil { - return - } - - eddsa := make(ed25519.PublicKey, ed25519.PublicKeySize) - switch flag := pk.p.Bytes()[0]; flag { - case 0x04: - // TODO: see _grcy_ecc_eddsa_ensure_compact in grcypt - return errors.UnsupportedError("unsupported EdDSA compression: " + strconv.Itoa(int(flag))) - case 0x40: - copy(eddsa[:], pk.p.Bytes()[1:]) - default: - return errors.UnsupportedError("unsupported EdDSA compression: " + strconv.Itoa(int(flag))) - } - - pk.PublicKey = &eddsa - return -} - -// SerializeForHash serializes the PublicKey to w with the special packet -// header format needed for hashing. -func (pk *PublicKey) SerializeForHash(w io.Writer) error { - pk.SerializeSignaturePrefix(w) - return pk.serializeWithoutHeaders(w) -} - -// SerializeSignaturePrefix writes the prefix for this public key to the given Writer. -// The prefix is used when calculating a signature over this public key. See -// RFC 4880, section 5.2.4. -func (pk *PublicKey) SerializeSignaturePrefix(w io.Writer) { - var pLength = pk.algorithmSpecificByteCount() - if pk.Version == 5 { - pLength += 10 // version, timestamp (4), algorithm, key octet count (4). - w.Write([]byte{ - 0x9A, - byte(pLength >> 24), - byte(pLength >> 16), - byte(pLength >> 8), - byte(pLength), - }) - return - } - pLength += 6 - w.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}) -} - -func (pk *PublicKey) Serialize(w io.Writer) (err error) { - length := 6 // 6 byte header - length += pk.algorithmSpecificByteCount() - if pk.Version == 5 { - length += 4 // octet key count - } - packetType := packetTypePublicKey - if pk.IsSubkey { - packetType = packetTypePublicSubkey - } - err = serializeHeader(w, packetType, length) - if err != nil { - return - } - return pk.serializeWithoutHeaders(w) -} - -func (pk *PublicKey) algorithmSpecificByteCount() int { - length := 0 - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - length += int(pk.n.EncodedLength()) - length += int(pk.e.EncodedLength()) - case PubKeyAlgoDSA: - length += int(pk.p.EncodedLength()) - length += int(pk.q.EncodedLength()) - length += int(pk.g.EncodedLength()) - length += int(pk.y.EncodedLength()) - case PubKeyAlgoElGamal: - length += int(pk.p.EncodedLength()) - length += int(pk.g.EncodedLength()) - length += int(pk.y.EncodedLength()) - case PubKeyAlgoECDSA: - length += int(pk.oid.EncodedLength()) - length += int(pk.p.EncodedLength()) - case PubKeyAlgoECDH: - length += int(pk.oid.EncodedLength()) - length += int(pk.p.EncodedLength()) - length += int(pk.kdf.EncodedLength()) - case PubKeyAlgoEdDSA: - length += int(pk.oid.EncodedLength()) - length += int(pk.p.EncodedLength()) - default: - panic("unknown public key algorithm") - } - return length -} - -// serializeWithoutHeaders marshals the PublicKey to w in the form of an -// OpenPGP public key packet, not including the packet header. -func (pk *PublicKey) serializeWithoutHeaders(w io.Writer) (err error) { - t := uint32(pk.CreationTime.Unix()) - if _, err = w.Write([]byte{ - byte(pk.Version), - byte(t >> 24), byte(t >> 16), byte(t >> 8), byte(t), - byte(pk.PubKeyAlgo), - }); err != nil { - return - } - - if pk.Version == 5 { - n := pk.algorithmSpecificByteCount() - if _, err = w.Write([]byte{ - byte(n >> 24), byte(n >> 16), byte(n >> 8), byte(n), - }); err != nil { - return - } - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - if _, err = w.Write(pk.n.EncodedBytes()); err != nil { - return - } - _, err = w.Write(pk.e.EncodedBytes()) - return - case PubKeyAlgoDSA: - if _, err = w.Write(pk.p.EncodedBytes()); err != nil { - return - } - if _, err = w.Write(pk.q.EncodedBytes()); err != nil { - return - } - if _, err = w.Write(pk.g.EncodedBytes()); err != nil { - return - } - _, err = w.Write(pk.y.EncodedBytes()) - return - case PubKeyAlgoElGamal: - if _, err = w.Write(pk.p.EncodedBytes()); err != nil { - return - } - if _, err = w.Write(pk.g.EncodedBytes()); err != nil { - return - } - _, err = w.Write(pk.y.EncodedBytes()) - return - case PubKeyAlgoECDSA: - if _, err = w.Write(pk.oid.EncodedBytes()); err != nil { - return - } - _, err = w.Write(pk.p.EncodedBytes()) - return - case PubKeyAlgoECDH: - if _, err = w.Write(pk.oid.EncodedBytes()); err != nil { - return - } - if _, err = w.Write(pk.p.EncodedBytes()); err != nil { - return - } - _, err = w.Write(pk.kdf.EncodedBytes()) - return - case PubKeyAlgoEdDSA: - if _, err = w.Write(pk.oid.EncodedBytes()); err != nil { - return - } - _, err = w.Write(pk.p.EncodedBytes()) - return - } - return errors.InvalidArgumentError("bad public-key algorithm") -} - -// CanSign returns true iff this public key can generate signatures -func (pk *PublicKey) CanSign() bool { - return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly && pk.PubKeyAlgo != PubKeyAlgoElGamal && pk.PubKeyAlgo != PubKeyAlgoECDH -} - -// VerifySignature returns nil iff sig is a valid signature, made by this -// public key, of the data hashed into signed. signed is mutated by this call. -func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err error) { - if !pk.CanSign() { - return errors.InvalidArgumentError("public key cannot generate signatures") - } - if sig.Version == 5 && (sig.SigType == 0x00 || sig.SigType == 0x01) { - sig.AddMetadataToHashSuffix() - } - signed.Write(sig.HashSuffix) - hashBytes := signed.Sum(nil) - if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { - return errors.SignatureError("hash tag doesn't match") - } - - if pk.PubKeyAlgo != sig.PubKeyAlgo { - return errors.InvalidArgumentError("public key and signature use different algorithms") - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - rsaPublicKey, _ := pk.PublicKey.(*rsa.PublicKey) - err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.Bytes())) - if err != nil { - return errors.SignatureError("RSA verification failure") - } - return nil - case PubKeyAlgoDSA: - dsaPublicKey, _ := pk.PublicKey.(*dsa.PublicKey) - // Need to truncate hashBytes to match FIPS 186-3 section 4.6. - subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8 - if len(hashBytes) > subgroupSize { - hashBytes = hashBytes[:subgroupSize] - } - if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.Bytes()), new(big.Int).SetBytes(sig.DSASigS.Bytes())) { - return errors.SignatureError("DSA verification failure") - } - return nil - case PubKeyAlgoECDSA: - ecdsaPublicKey := pk.PublicKey.(*ecdsa.PublicKey) - if !ecdsa.Verify(ecdsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.ECDSASigR.Bytes()), new(big.Int).SetBytes(sig.ECDSASigS.Bytes())) { - return errors.SignatureError("ECDSA verification failure") - } - return nil - case PubKeyAlgoEdDSA: - eddsaPublicKey := pk.PublicKey.(*ed25519.PublicKey) - - sigR := sig.EdDSASigR.Bytes() - sigS := sig.EdDSASigS.Bytes() - - eddsaSig := make([]byte, ed25519.SignatureSize) - copy(eddsaSig[32-len(sigR):32], sigR) - copy(eddsaSig[64-len(sigS):], sigS) - - if !ed25519.Verify(*eddsaPublicKey, hashBytes, eddsaSig) { - return errors.SignatureError("EdDSA verification failure") - } - return nil - default: - return errors.SignatureError("Unsupported public key algorithm used in signature") - } -} - -// keySignatureHash returns a Hash of the message that needs to be signed for -// pk to assert a subkey relationship to signed. -func keySignatureHash(pk, signed signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { - if !hashFunc.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hashFunc.New() - - // RFC 4880, section 5.2.4 - err = pk.SerializeForHash(h) - if err != nil { - return nil, err - } - - err = signed.SerializeForHash(h) - return -} - -// VerifyKeySignature returns nil iff sig is a valid signature, made by this -// public key, of signed. -func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error { - h, err := keySignatureHash(pk, signed, sig.Hash) - if err != nil { - return err - } - if err = pk.VerifySignature(h, sig); err != nil { - return err - } - - if sig.FlagSign { - // Signing subkeys must be cross-signed. See - // https://www.gnupg.org/faq/subkey-cross-certify.html. - if sig.EmbeddedSignature == nil { - return errors.StructuralError("signing subkey is missing cross-signature") - } - // Verify the cross-signature. This is calculated over the same - // data as the main signature, so we cannot just recursively - // call signed.VerifyKeySignature(...) - if h, err = keySignatureHash(pk, signed, sig.EmbeddedSignature.Hash); err != nil { - return errors.StructuralError("error while hashing for cross-signature: " + err.Error()) - } - if err := signed.VerifySignature(h, sig.EmbeddedSignature); err != nil { - return errors.StructuralError("error while verifying cross-signature: " + err.Error()) - } - } - - return nil -} - -func keyRevocationHash(pk signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { - if !hashFunc.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hashFunc.New() - - // RFC 4880, section 5.2.4 - err = pk.SerializeForHash(h) - - return -} - -// VerifyRevocationSignature returns nil iff sig is a valid signature, made by this -// public key. -func (pk *PublicKey) VerifyRevocationSignature(sig *Signature) (err error) { - h, err := keyRevocationHash(pk, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignature(h, sig) -} - -// VerifySubkeyRevocationSignature returns nil iff sig is a valid subkey revocation signature, -// made by the passed in signingKey. -func (pk *PublicKey) VerifySubkeyRevocationSignature(sig *Signature, signingKey *PublicKey) (err error) { - h, err := keyRevocationHash(pk, sig.Hash) - if err != nil { - return err - } - return signingKey.VerifySignature(h, sig) -} - -// userIdSignatureHash returns a Hash of the message that needs to be signed -// to assert that pk is a valid key for id. -func userIdSignatureHash(id string, pk *PublicKey, hashFunc crypto.Hash) (h hash.Hash, err error) { - if !hashFunc.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hashFunc.New() - - // RFC 4880, section 5.2.4 - pk.SerializeSignaturePrefix(h) - pk.serializeWithoutHeaders(h) - - var buf [5]byte - buf[0] = 0xb4 - buf[1] = byte(len(id) >> 24) - buf[2] = byte(len(id) >> 16) - buf[3] = byte(len(id) >> 8) - buf[4] = byte(len(id)) - h.Write(buf[:]) - h.Write([]byte(id)) - - return -} - -// VerifyUserIdSignature returns nil iff sig is a valid signature, made by this -// public key, that id is the identity of pub. -func (pk *PublicKey) VerifyUserIdSignature(id string, pub *PublicKey, sig *Signature) (err error) { - h, err := userIdSignatureHash(id, pub, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignature(h, sig) -} - -// KeyIdString returns the public key's fingerprint in capital hex -// (e.g. "6C7EE1B8621CC013"). -func (pk *PublicKey) KeyIdString() string { - return fmt.Sprintf("%X", pk.Fingerprint[12:20]) -} - -// KeyIdShortString returns the short form of public key's fingerprint -// in capital hex, as shown by gpg --list-keys (e.g. "621CC013"). -func (pk *PublicKey) KeyIdShortString() string { - return fmt.Sprintf("%X", pk.Fingerprint[16:20]) -} - -// BitLength returns the bit length for the given public key. -func (pk *PublicKey) BitLength() (bitLength uint16, err error) { - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - bitLength = pk.n.BitLength() - case PubKeyAlgoDSA: - bitLength = pk.p.BitLength() - case PubKeyAlgoElGamal: - bitLength = pk.p.BitLength() - case PubKeyAlgoECDSA: - bitLength = pk.p.BitLength() - case PubKeyAlgoECDH: - bitLength = pk.p.BitLength() - case PubKeyAlgoEdDSA: - bitLength = pk.p.BitLength() - default: - err = errors.InvalidArgumentError("bad public-key algorithm") - } - return -} - -// KeyExpired returns whether sig is a self-signature of a key that has -// expired or is created in the future. -func (pk *PublicKey) KeyExpired(sig *Signature, currentTime time.Time) bool { - if pk.CreationTime.After(currentTime) { - return true - } - if sig.KeyLifetimeSecs == nil || *sig.KeyLifetimeSecs == 0 { - return false - } - expiry := pk.CreationTime.Add(time.Duration(*sig.KeyLifetimeSecs) * time.Second) - return currentTime.After(expiry) -} diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key_test_data.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key_test_data.go deleted file mode 100644 index b255f1f6f8f..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key_test_data.go +++ /dev/null @@ -1,24 +0,0 @@ -package packet - -const rsaFingerprintHex = "5fb74b1d03b1e3cb31bc2f8aa34d7e18c20c31bb" - -const rsaPkDataHex = "988d044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd0011010001" - -const dsaFingerprintHex = "eece4c094db002103714c63c8e8fbe54062f19ed" - -const dsaPkDataHex = "9901a2044d432f89110400cd581334f0d7a1e1bdc8b9d6d8c0baf68793632735d2bb0903224cbaa1dfbf35a60ee7a13b92643421e1eb41aa8d79bea19a115a677f6b8ba3c7818ce53a6c2a24a1608bd8b8d6e55c5090cbde09dd26e356267465ae25e69ec8bdd57c7bbb2623e4d73336f73a0a9098f7f16da2e25252130fd694c0e8070c55a812a423ae7f00a0ebf50e70c2f19c3520a551bd4b08d30f23530d3d03ff7d0bf4a53a64a09dc5e6e6e35854b7d70c882b0c60293401958b1bd9e40abec3ea05ba87cf64899299d4bd6aa7f459c201d3fbbd6c82004bdc5e8a9eb8082d12054cc90fa9d4ec251a843236a588bf49552441817436c4f43326966fe85447d4e6d0acf8fa1ef0f014730770603ad7634c3088dc52501c237328417c31c89ed70400b2f1a98b0bf42f11fefc430704bebbaa41d9f355600c3facee1e490f64208e0e094ea55e3a598a219a58500bf78ac677b670a14f4e47e9cf8eab4f368cc1ddcaa18cc59309d4cc62dd4f680e73e6cc3e1ce87a84d0925efbcb26c575c093fc42eecf45135fabf6403a25c2016e1774c0484e440a18319072c617cc97ac0a3bb0" - -const ecdsaFingerprintHex = "9892270b38b8980b05c8d56d43fe956c542ca00b" - -const ecdsaPkDataHex = "9893045071c29413052b8104002304230401f4867769cedfa52c325018896245443968e52e51d0c2df8d939949cb5b330f2921711fbee1c9b9dddb95d15cb0255e99badeddda7cc23d9ddcaacbc290969b9f24019375d61c2e4e3b36953a28d8b2bc95f78c3f1d592fb24499be348656a7b17e3963187b4361afe497bc5f9f81213f04069f8e1fb9e6a6290ae295ca1a92b894396cb4" - -const ecdhFingerprintHex = "722354df2475a42164d1d49faa8b938f9a201946" - -const ecdhPkDataHex = "b90073044d53059212052b810400220303042faa84024a20b6735c4897efa5bfb41bf85b7eefeab5ca0cb9ffc8ea04a46acb25534a577694f9e25340a4ab5223a9dd1eda530c8aa2e6718db10d7e672558c7736fe09369ea5739a2a3554bf16d41faa50562f11c6d39bbd5dffb6b9a9ec91803010909" - -const eddsaFingerprintHex = "b2d5e5ec0e6deca6bc8eeeb00907e75e1dd99ad8" - -const eddsaPkDataHex = "98330456e2132b16092b06010401da470f01010740bbda39266affa511a8c2d02edf690fb784b0499c4406185811a163539ef11dc1b41d74657374696e67203c74657374696e674074657374696e672e636f6d3e8879041316080021050256e2132b021b03050b09080702061508090a0b020416020301021e01021780000a09100907e75e1dd99ad86d0c00fe39d2008359352782bc9b61ac382584cd8eff3f57a18c2287e3afeeb05d1f04ba00fe2d0bc1ddf3ff8adb9afa3e7d9287244b4ec567f3db4d60b74a9b5465ed528203" - -// Source: https://sites.google.com/site/brainhub/pgpecckeys#TOC-ECC-NIST-P-384-key -const ecc384PubHex = `99006f044d53059213052b81040022030304f6b8c5aced5b84ef9f4a209db2e4a9dfb70d28cb8c10ecd57674a9fa5a67389942b62d5e51367df4c7bfd3f8e500feecf07ed265a621a8ebbbe53e947ec78c677eba143bd1533c2b350e1c29f82313e1e1108eba063be1e64b10e6950e799c2db42465635f6473615f64685f333834203c6f70656e70677040627261696e6875622e6f72673e8900cb04101309005305024d530592301480000000002000077072656665727265642d656d61696c2d656e636f64696e67407067702e636f6d7067706d696d65040b090807021901051b03000000021602051e010000000415090a08000a0910098033880f54719fca2b0180aa37350968bd5f115afd8ce7bc7b103822152dbff06d0afcda835329510905b98cb469ba208faab87c7412b799e7b633017f58364ea480e8a1a3f253a0c5f22c446e8be9a9fce6210136ee30811abbd49139de28b5bdf8dc36d06ae748579e9ff503b90073044d53059212052b810400220303042faa84024a20b6735c4897efa5bfb41bf85b7eefeab5ca0cb9ffc8ea04a46acb25534a577694f9e25340a4ab5223a9dd1eda530c8aa2e6718db10d7e672558c7736fe09369ea5739a2a3554bf16d41faa50562f11c6d39bbd5dffb6b9a9ec9180301090989008404181309000c05024d530592051b0c000000000a0910098033880f54719f80970180eee7a6d8fcee41ee4f9289df17f9bcf9d955dca25c583b94336f3a2b2d4986dc5cf417b8d2dc86f741a9e1a6d236c0e3017d1c76575458a0cfb93ae8a2b274fcc65ceecd7a91eec83656ba13219969f06945b48c56bd04152c3a0553c5f2f4bd1267` diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go deleted file mode 100644 index 94ae9ad9aca..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "io" - - "github.com/ProtonMail/go-crypto/openpgp/errors" -) - -// Reader reads packets from an io.Reader and allows packets to be 'unread' so -// that they result from the next call to Next. -type Reader struct { - q []Packet - readers []io.Reader -} - -// New io.Readers are pushed when a compressed or encrypted packet is processed -// and recursively treated as a new source of packets. However, a carefully -// crafted packet can trigger an infinite recursive sequence of packets. See -// http://mumble.net/~campbell/misc/pgp-quine -// https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2013-4402 -// This constant limits the number of recursive packets that may be pushed. -const maxReaders = 32 - -// Next returns the most recently unread Packet, or reads another packet from -// the top-most io.Reader. Unknown packet types are skipped. -func (r *Reader) Next() (p Packet, err error) { - if len(r.q) > 0 { - p = r.q[len(r.q)-1] - r.q = r.q[:len(r.q)-1] - return - } - - for len(r.readers) > 0 { - p, err = Read(r.readers[len(r.readers)-1]) - if err == nil { - return - } - if err == io.EOF { - r.readers = r.readers[:len(r.readers)-1] - continue - } - // TODO: Add strict mode that rejects unknown packets, instead of ignoring them. - if _, ok := err.(errors.UnknownPacketTypeError); !ok { - return nil, err - } - } - - return nil, io.EOF -} - -// Push causes the Reader to start reading from a new io.Reader. When an EOF -// error is seen from the new io.Reader, it is popped and the Reader continues -// to read from the next most recent io.Reader. Push returns a StructuralError -// if pushing the reader would exceed the maximum recursion level, otherwise it -// returns nil. -func (r *Reader) Push(reader io.Reader) (err error) { - if len(r.readers) >= maxReaders { - return errors.StructuralError("too many layers of packets") - } - r.readers = append(r.readers, reader) - return nil -} - -// Unread causes the given Packet to be returned from the next call to Next. -func (r *Reader) Unread(p Packet) { - r.q = append(r.q, p) -} - -func NewReader(r io.Reader) *Reader { - return &Reader{ - q: nil, - readers: []io.Reader{r}, - } -} diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go deleted file mode 100644 index 6b1704e4292..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go +++ /dev/null @@ -1,964 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "encoding/asn1" - "encoding/binary" - "hash" - "io" - "math/big" - "strconv" - "time" - - "github.com/ProtonMail/go-crypto/openpgp/errors" - "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" - "github.com/ProtonMail/go-crypto/openpgp/s2k" -) - -const ( - // See RFC 4880, section 5.2.3.21 for details. - KeyFlagCertify = 1 << iota - KeyFlagSign - KeyFlagEncryptCommunications - KeyFlagEncryptStorage -) - -// Signature represents a signature. See RFC 4880, section 5.2. -type Signature struct { - Version int - SigType SignatureType - PubKeyAlgo PublicKeyAlgorithm - Hash crypto.Hash - - // HashSuffix is extra data that is hashed in after the signed data. - HashSuffix []byte - // HashTag contains the first two bytes of the hash for fast rejection - // of bad signed data. - HashTag [2]byte - - // Metadata includes format, filename and time, and is protected by v5 - // signatures of type 0x00 or 0x01. This metadata is included into the hash - // computation; if nil, six 0x00 bytes are used instead. See section 5.2.4. - Metadata *LiteralData - - CreationTime time.Time - - RSASignature encoding.Field - DSASigR, DSASigS encoding.Field - ECDSASigR, ECDSASigS encoding.Field - EdDSASigR, EdDSASigS encoding.Field - - // rawSubpackets contains the unparsed subpackets, in order. - rawSubpackets []outputSubpacket - - // The following are optional so are nil when not included in the - // signature. - - SigLifetimeSecs, KeyLifetimeSecs *uint32 - PreferredSymmetric, PreferredHash, PreferredCompression []uint8 - PreferredAEAD []uint8 - IssuerKeyId *uint64 - IssuerFingerprint []byte - IsPrimaryId *bool - - // FlagsValid is set if any flags were given. See RFC 4880, section - // 5.2.3.21 for details. - FlagsValid bool - FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage bool - - // RevocationReason is set if this signature has been revoked. - // See RFC 4880, section 5.2.3.23 for details. - RevocationReason *uint8 - RevocationReasonText string - - // In a self-signature, these flags are set there is a features subpacket - // indicating that the issuer implementation supports these features - // (section 5.2.5.25). - MDC, AEAD, V5Keys bool - - // EmbeddedSignature, if non-nil, is a signature of the parent key, by - // this key. This prevents an attacker from claiming another's signing - // subkey as their own. - EmbeddedSignature *Signature - - outSubpackets []outputSubpacket -} - -func (sig *Signature) parse(r io.Reader) (err error) { - // RFC 4880, section 5.2.3 - var buf [5]byte - _, err = readFull(r, buf[:1]) - if err != nil { - return - } - if buf[0] != 4 && buf[0] != 5 { - err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) - return - } - sig.Version = int(buf[0]) - _, err = readFull(r, buf[:5]) - if err != nil { - return - } - sig.SigType = SignatureType(buf[0]) - sig.PubKeyAlgo = PublicKeyAlgorithm(buf[1]) - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA, PubKeyAlgoEdDSA: - default: - err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) - return - } - - var ok bool - sig.Hash, ok = s2k.HashIdToHash(buf[2]) - if !ok { - return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) - } - - hashedSubpacketsLength := int(buf[3])<<8 | int(buf[4]) - hashedSubpackets := make([]byte, hashedSubpacketsLength) - _, err = readFull(r, hashedSubpackets) - if err != nil { - return - } - sig.buildHashSuffix(hashedSubpackets) - err = parseSignatureSubpackets(sig, hashedSubpackets, true) - if err != nil { - return - } - - _, err = readFull(r, buf[:2]) - if err != nil { - return - } - unhashedSubpacketsLength := int(buf[0])<<8 | int(buf[1]) - unhashedSubpackets := make([]byte, unhashedSubpacketsLength) - _, err = readFull(r, unhashedSubpackets) - if err != nil { - return - } - err = parseSignatureSubpackets(sig, unhashedSubpackets, false) - if err != nil { - return - } - - _, err = readFull(r, sig.HashTag[:2]) - if err != nil { - return - } - - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - sig.RSASignature = new(encoding.MPI) - _, err = sig.RSASignature.ReadFrom(r) - case PubKeyAlgoDSA: - sig.DSASigR = new(encoding.MPI) - if _, err = sig.DSASigR.ReadFrom(r); err != nil { - return - } - - sig.DSASigS = new(encoding.MPI) - _, err = sig.DSASigS.ReadFrom(r) - case PubKeyAlgoECDSA: - sig.ECDSASigR = new(encoding.MPI) - if _, err = sig.ECDSASigR.ReadFrom(r); err != nil { - return - } - - sig.ECDSASigS = new(encoding.MPI) - _, err = sig.ECDSASigS.ReadFrom(r) - case PubKeyAlgoEdDSA: - sig.EdDSASigR = new(encoding.MPI) - if _, err = sig.EdDSASigR.ReadFrom(r); err != nil { - return - } - - sig.EdDSASigS = new(encoding.MPI) - if _, err = sig.EdDSASigS.ReadFrom(r); err != nil { - return - } - default: - panic("unreachable") - } - return -} - -// parseSignatureSubpackets parses subpackets of the main signature packet. See -// RFC 4880, section 5.2.3.1. -func parseSignatureSubpackets(sig *Signature, subpackets []byte, isHashed bool) (err error) { - for len(subpackets) > 0 { - subpackets, err = parseSignatureSubpacket(sig, subpackets, isHashed) - if err != nil { - return - } - } - - if sig.CreationTime.IsZero() { - err = errors.StructuralError("no creation time in signature") - } - - return -} - -type signatureSubpacketType uint8 - -const ( - creationTimeSubpacket signatureSubpacketType = 2 - signatureExpirationSubpacket signatureSubpacketType = 3 - keyExpirationSubpacket signatureSubpacketType = 9 - prefSymmetricAlgosSubpacket signatureSubpacketType = 11 - issuerSubpacket signatureSubpacketType = 16 - prefHashAlgosSubpacket signatureSubpacketType = 21 - prefCompressionSubpacket signatureSubpacketType = 22 - primaryUserIdSubpacket signatureSubpacketType = 25 - keyFlagsSubpacket signatureSubpacketType = 27 - reasonForRevocationSubpacket signatureSubpacketType = 29 - featuresSubpacket signatureSubpacketType = 30 - embeddedSignatureSubpacket signatureSubpacketType = 32 - issuerFingerprintSubpacket signatureSubpacketType = 33 - prefAeadAlgosSubpacket signatureSubpacketType = 34 -) - -// parseSignatureSubpacket parses a single subpacket. len(subpacket) is >= 1. -func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (rest []byte, err error) { - // RFC 4880, section 5.2.3.1 - var ( - length uint32 - packetType signatureSubpacketType - isCritical bool - ) - switch { - case subpacket[0] < 192: - length = uint32(subpacket[0]) - subpacket = subpacket[1:] - case subpacket[0] < 255: - if len(subpacket) < 2 { - goto Truncated - } - length = uint32(subpacket[0]-192)<<8 + uint32(subpacket[1]) + 192 - subpacket = subpacket[2:] - default: - if len(subpacket) < 5 { - goto Truncated - } - length = uint32(subpacket[1])<<24 | - uint32(subpacket[2])<<16 | - uint32(subpacket[3])<<8 | - uint32(subpacket[4]) - subpacket = subpacket[5:] - } - if length > uint32(len(subpacket)) { - goto Truncated - } - rest = subpacket[length:] - subpacket = subpacket[:length] - if len(subpacket) == 0 { - err = errors.StructuralError("zero length signature subpacket") - return - } - packetType = signatureSubpacketType(subpacket[0] & 0x7f) - isCritical = subpacket[0]&0x80 == 0x80 - subpacket = subpacket[1:] - sig.rawSubpackets = append(sig.rawSubpackets, outputSubpacket{isHashed, packetType, isCritical, subpacket}) - switch packetType { - case creationTimeSubpacket: - if !isHashed { - err = errors.StructuralError("signature creation time in non-hashed area") - return - } - if len(subpacket) != 4 { - err = errors.StructuralError("signature creation time not four bytes") - return - } - t := binary.BigEndian.Uint32(subpacket) - sig.CreationTime = time.Unix(int64(t), 0) - case signatureExpirationSubpacket: - // Signature expiration time, section 5.2.3.10 - if !isHashed { - return - } - if len(subpacket) != 4 { - err = errors.StructuralError("expiration subpacket with bad length") - return - } - sig.SigLifetimeSecs = new(uint32) - *sig.SigLifetimeSecs = binary.BigEndian.Uint32(subpacket) - case keyExpirationSubpacket: - // Key expiration time, section 5.2.3.6 - if !isHashed { - return - } - if len(subpacket) != 4 { - err = errors.StructuralError("key expiration subpacket with bad length") - return - } - sig.KeyLifetimeSecs = new(uint32) - *sig.KeyLifetimeSecs = binary.BigEndian.Uint32(subpacket) - case prefSymmetricAlgosSubpacket: - // Preferred symmetric algorithms, section 5.2.3.7 - if !isHashed { - return - } - sig.PreferredSymmetric = make([]byte, len(subpacket)) - copy(sig.PreferredSymmetric, subpacket) - case issuerSubpacket: - if sig.Version > 4 { - err = errors.StructuralError("issuer subpacket found in v5 key") - } - // Issuer, section 5.2.3.5 - if len(subpacket) != 8 { - err = errors.StructuralError("issuer subpacket with bad length") - return - } - sig.IssuerKeyId = new(uint64) - *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket) - case prefHashAlgosSubpacket: - // Preferred hash algorithms, section 5.2.3.8 - if !isHashed { - return - } - sig.PreferredHash = make([]byte, len(subpacket)) - copy(sig.PreferredHash, subpacket) - case prefCompressionSubpacket: - // Preferred compression algorithms, section 5.2.3.9 - if !isHashed { - return - } - sig.PreferredCompression = make([]byte, len(subpacket)) - copy(sig.PreferredCompression, subpacket) - case primaryUserIdSubpacket: - // Primary User ID, section 5.2.3.19 - if !isHashed { - return - } - if len(subpacket) != 1 { - err = errors.StructuralError("primary user id subpacket with bad length") - return - } - sig.IsPrimaryId = new(bool) - if subpacket[0] > 0 { - *sig.IsPrimaryId = true - } - case keyFlagsSubpacket: - // Key flags, section 5.2.3.21 - if !isHashed { - return - } - if len(subpacket) == 0 { - err = errors.StructuralError("empty key flags subpacket") - return - } - sig.FlagsValid = true - if subpacket[0]&KeyFlagCertify != 0 { - sig.FlagCertify = true - } - if subpacket[0]&KeyFlagSign != 0 { - sig.FlagSign = true - } - if subpacket[0]&KeyFlagEncryptCommunications != 0 { - sig.FlagEncryptCommunications = true - } - if subpacket[0]&KeyFlagEncryptStorage != 0 { - sig.FlagEncryptStorage = true - } - case reasonForRevocationSubpacket: - // Reason For Revocation, section 5.2.3.23 - if !isHashed { - return - } - if len(subpacket) == 0 { - err = errors.StructuralError("empty revocation reason subpacket") - return - } - sig.RevocationReason = new(uint8) - *sig.RevocationReason = subpacket[0] - sig.RevocationReasonText = string(subpacket[1:]) - case featuresSubpacket: - // Features subpacket, section 5.2.3.24 specifies a very general - // mechanism for OpenPGP implementations to signal support for new - // features. - if !isHashed { - return - } - if len(subpacket) > 0 { - if subpacket[0]&0x01 != 0 { - sig.MDC = true - } - if subpacket[0]&0x02 != 0 { - sig.AEAD = true - } - if subpacket[0]&0x04 != 0 { - sig.V5Keys = true - } - } - case embeddedSignatureSubpacket: - // Only usage is in signatures that cross-certify - // signing subkeys. section 5.2.3.26 describes the - // format, with its usage described in section 11.1 - if sig.EmbeddedSignature != nil { - err = errors.StructuralError("Cannot have multiple embedded signatures") - return - } - sig.EmbeddedSignature = new(Signature) - // Embedded signatures are required to be v4 signatures see - // section 12.1. However, we only parse v4 signatures in this - // file anyway. - if err := sig.EmbeddedSignature.parse(bytes.NewBuffer(subpacket)); err != nil { - return nil, err - } - if sigType := sig.EmbeddedSignature.SigType; sigType != SigTypePrimaryKeyBinding { - return nil, errors.StructuralError("cross-signature has unexpected type " + strconv.Itoa(int(sigType))) - } - case issuerFingerprintSubpacket: - v, l := subpacket[0], len(subpacket[1:]) - if v == 5 && l != 32 || v != 5 && l != 20 { - return nil, errors.StructuralError("bad fingerprint length") - } - sig.IssuerFingerprint = make([]byte, l) - copy(sig.IssuerFingerprint, subpacket[1:]) - sig.IssuerKeyId = new(uint64) - if v == 5 { - *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket[1:9]) - } else { - *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket[13:21]) - } - case prefAeadAlgosSubpacket: - // Preferred symmetric algorithms, section 5.2.3.8 - if !isHashed { - return - } - sig.PreferredAEAD = make([]byte, len(subpacket)) - copy(sig.PreferredAEAD, subpacket) - default: - if isCritical { - err = errors.UnsupportedError("unknown critical signature subpacket type " + strconv.Itoa(int(packetType))) - return - } - } - return - -Truncated: - err = errors.StructuralError("signature subpacket truncated") - return -} - -// subpacketLengthLength returns the length, in bytes, of an encoded length value. -func subpacketLengthLength(length int) int { - if length < 192 { - return 1 - } - if length < 16320 { - return 2 - } - return 5 -} - -func (sig *Signature) CheckKeyIdOrFingerprint(pk *PublicKey) bool { - if sig.IssuerFingerprint != nil && len(sig.IssuerFingerprint) >= 20 { - return bytes.Equal(sig.IssuerFingerprint, pk.Fingerprint) - } - return sig.IssuerKeyId != nil && *sig.IssuerKeyId == pk.KeyId -} - -// serializeSubpacketLength marshals the given length into to. -func serializeSubpacketLength(to []byte, length int) int { - // RFC 4880, Section 4.2.2. - if length < 192 { - to[0] = byte(length) - return 1 - } - if length < 16320 { - length -= 192 - to[0] = byte((length >> 8) + 192) - to[1] = byte(length) - return 2 - } - to[0] = 255 - to[1] = byte(length >> 24) - to[2] = byte(length >> 16) - to[3] = byte(length >> 8) - to[4] = byte(length) - return 5 -} - -// subpacketsLength returns the serialized length, in bytes, of the given -// subpackets. -func subpacketsLength(subpackets []outputSubpacket, hashed bool) (length int) { - for _, subpacket := range subpackets { - if subpacket.hashed == hashed { - length += subpacketLengthLength(len(subpacket.contents) + 1) - length += 1 // type byte - length += len(subpacket.contents) - } - } - return -} - -// serializeSubpackets marshals the given subpackets into to. -func serializeSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) { - for _, subpacket := range subpackets { - if subpacket.hashed == hashed { - n := serializeSubpacketLength(to, len(subpacket.contents)+1) - to[n] = byte(subpacket.subpacketType) - to = to[1+n:] - n = copy(to, subpacket.contents) - to = to[n:] - } - } - return -} - -// SigExpired returns whether sig is a signature that has expired or is created -// in the future. -func (sig *Signature) SigExpired(currentTime time.Time) bool { - if sig.CreationTime.After(currentTime) { - return true - } - if sig.SigLifetimeSecs == nil || *sig.SigLifetimeSecs == 0 { - return false - } - expiry := sig.CreationTime.Add(time.Duration(*sig.SigLifetimeSecs) * time.Second) - return currentTime.After(expiry) -} - -// buildHashSuffix constructs the HashSuffix member of sig in preparation for signing. -func (sig *Signature) buildHashSuffix(hashedSubpackets []byte) (err error) { - hash, ok := s2k.HashToHashId(sig.Hash) - if !ok { - sig.HashSuffix = nil - return errors.InvalidArgumentError("hash cannot be represented in OpenPGP: " + strconv.Itoa(int(sig.Hash))) - } - - hashedFields := bytes.NewBuffer([]byte{ - uint8(sig.Version), - uint8(sig.SigType), - uint8(sig.PubKeyAlgo), - uint8(hash), - uint8(len(hashedSubpackets) >> 8), - uint8(len(hashedSubpackets)), - }) - hashedFields.Write(hashedSubpackets) - - var l uint64 = uint64(6 + len(hashedSubpackets)) - if sig.Version == 5 { - hashedFields.Write([]byte{0x05, 0xff}) - hashedFields.Write([]byte{ - uint8(l >> 56), uint8(l >> 48), uint8(l >> 40), uint8(l >> 32), - uint8(l >> 24), uint8(l >> 16), uint8(l >> 8), uint8(l), - }) - } else { - hashedFields.Write([]byte{0x04, 0xff}) - hashedFields.Write([]byte{ - uint8(l >> 24), uint8(l >> 16), uint8(l >> 8), uint8(l), - }) - } - sig.HashSuffix = make([]byte, hashedFields.Len()) - copy(sig.HashSuffix, hashedFields.Bytes()) - return -} - -func (sig *Signature) signPrepareHash(h hash.Hash) (digest []byte, err error) { - hashedSubpacketsLen := subpacketsLength(sig.outSubpackets, true) - hashedSubpackets := make([]byte, hashedSubpacketsLen) - serializeSubpackets(hashedSubpackets, sig.outSubpackets, true) - err = sig.buildHashSuffix(hashedSubpackets) - if err != nil { - return - } - if sig.Version == 5 && (sig.SigType == 0x00 || sig.SigType == 0x01) { - sig.AddMetadataToHashSuffix() - } - - h.Write(sig.HashSuffix) - digest = h.Sum(nil) - copy(sig.HashTag[:], digest) - return -} - -// Sign signs a message with a private key. The hash, h, must contain -// the hash of the message to be signed and will be mutated by this function. -// On success, the signature is stored in sig. Call Serialize to write it out. -// If config is nil, sensible defaults will be used. -func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err error) { - if priv.Dummy() { - return errors.ErrDummyPrivateKey("dummy key found") - } - sig.Version = priv.PublicKey.Version - sig.IssuerFingerprint = priv.PublicKey.Fingerprint - sig.outSubpackets, err = sig.buildSubpackets(priv.PublicKey) - if err != nil { - return err - } - digest, err := sig.signPrepareHash(h) - if err != nil { - return - } - switch priv.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - // supports both *rsa.PrivateKey and crypto.Signer - sigdata, err := priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash) - if err == nil { - sig.RSASignature = encoding.NewMPI(sigdata) - } - case PubKeyAlgoDSA: - dsaPriv := priv.PrivateKey.(*dsa.PrivateKey) - - // Need to truncate hashBytes to match FIPS 186-3 section 4.6. - subgroupSize := (dsaPriv.Q.BitLen() + 7) / 8 - if len(digest) > subgroupSize { - digest = digest[:subgroupSize] - } - r, s, err := dsa.Sign(config.Random(), dsaPriv, digest) - if err == nil { - sig.DSASigR = new(encoding.MPI).SetBig(r) - sig.DSASigS = new(encoding.MPI).SetBig(s) - } - case PubKeyAlgoECDSA: - var r, s *big.Int - if pk, ok := priv.PrivateKey.(*ecdsa.PrivateKey); ok { - // direct support, avoid asn1 wrapping/unwrapping - r, s, err = ecdsa.Sign(config.Random(), pk, digest) - } else { - var b []byte - b, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash) - if err == nil { - r, s, err = unwrapECDSASig(b) - } - } - if err == nil { - sig.ECDSASigR = new(encoding.MPI).SetBig(r) - sig.ECDSASigS = new(encoding.MPI).SetBig(s) - } - case PubKeyAlgoEdDSA: - sigdata, err := priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, crypto.Hash(0)) - if err == nil { - sig.EdDSASigR = encoding.NewMPI(sigdata[:32]) - sig.EdDSASigS = encoding.NewMPI(sigdata[32:]) - } - default: - err = errors.UnsupportedError("public key algorithm: " + strconv.Itoa(int(sig.PubKeyAlgo))) - } - - return -} - -// unwrapECDSASig parses the two integer components of an ASN.1-encoded ECDSA -// signature. -func unwrapECDSASig(b []byte) (r, s *big.Int, err error) { - var ecsdaSig struct { - R, S *big.Int - } - _, err = asn1.Unmarshal(b, &ecsdaSig) - if err != nil { - return - } - return ecsdaSig.R, ecsdaSig.S, nil -} - -// SignUserId computes a signature from priv, asserting that pub is a valid -// key for the identity id. On success, the signature is stored in sig. Call -// Serialize to write it out. -// If config is nil, sensible defaults will be used. -func (sig *Signature) SignUserId(id string, pub *PublicKey, priv *PrivateKey, config *Config) error { - if priv.Dummy() { - return errors.ErrDummyPrivateKey("dummy key found") - } - h, err := userIdSignatureHash(id, pub, sig.Hash) - if err != nil { - return err - } - return sig.Sign(h, priv, config) -} - -// CrossSignKey computes a signature from signingKey on pub hashed using hashKey. On success, -// the signature is stored in sig. Call Serialize to write it out. -// If config is nil, sensible defaults will be used. -func (sig *Signature) CrossSignKey(pub *PublicKey, hashKey *PublicKey, signingKey *PrivateKey, - config *Config) error { - h, err := keySignatureHash(hashKey, pub, sig.Hash) - if err != nil { - return err - } - return sig.Sign(h, signingKey, config) -} - -// SignKey computes a signature from priv, asserting that pub is a subkey. On -// success, the signature is stored in sig. Call Serialize to write it out. -// If config is nil, sensible defaults will be used. -func (sig *Signature) SignKey(pub *PublicKey, priv *PrivateKey, config *Config) error { - if priv.Dummy() { - return errors.ErrDummyPrivateKey("dummy key found") - } - h, err := keySignatureHash(&priv.PublicKey, pub, sig.Hash) - if err != nil { - return err - } - return sig.Sign(h, priv, config) -} - -// RevokeKey computes a revocation signature of pub using priv. On success, the signature is -// stored in sig. Call Serialize to write it out. -// If config is nil, sensible defaults will be used. -func (sig *Signature) RevokeKey(pub *PublicKey, priv *PrivateKey, config *Config) error { - h, err := keyRevocationHash(pub, sig.Hash) - if err != nil { - return err - } - return sig.Sign(h, priv, config) -} - -// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been -// called first. -func (sig *Signature) Serialize(w io.Writer) (err error) { - if len(sig.outSubpackets) == 0 { - sig.outSubpackets = sig.rawSubpackets - } - if sig.RSASignature == nil && sig.DSASigR == nil && sig.ECDSASigR == nil && sig.EdDSASigR == nil { - return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize") - } - - sigLength := 0 - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - sigLength = int(sig.RSASignature.EncodedLength()) - case PubKeyAlgoDSA: - sigLength = int(sig.DSASigR.EncodedLength()) - sigLength += int(sig.DSASigS.EncodedLength()) - case PubKeyAlgoECDSA: - sigLength = int(sig.ECDSASigR.EncodedLength()) - sigLength += int(sig.ECDSASigS.EncodedLength()) - case PubKeyAlgoEdDSA: - sigLength = int(sig.EdDSASigR.EncodedLength()) - sigLength += int(sig.EdDSASigS.EncodedLength()) - default: - panic("impossible") - } - - unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false) - length := len(sig.HashSuffix) - 6 /* trailer not included */ + - 2 /* length of unhashed subpackets */ + unhashedSubpacketsLen + - 2 /* hash tag */ + sigLength - if sig.Version == 5 { - length -= 4 // eight-octet instead of four-octet big endian - } - err = serializeHeader(w, packetTypeSignature, length) - if err != nil { - return - } - err = sig.serializeBody(w) - if err != nil { - return err - } - return -} - -func (sig *Signature) serializeBody(w io.Writer) (err error) { - hashedSubpacketsLen := uint16(uint16(sig.HashSuffix[4])<<8) | uint16(sig.HashSuffix[5]) - fields := sig.HashSuffix[:6+hashedSubpacketsLen] - _, err = w.Write(fields) - if err != nil { - return - } - - unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false) - unhashedSubpackets := make([]byte, 2+unhashedSubpacketsLen) - unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 8) - unhashedSubpackets[1] = byte(unhashedSubpacketsLen) - serializeSubpackets(unhashedSubpackets[2:], sig.outSubpackets, false) - - _, err = w.Write(unhashedSubpackets) - if err != nil { - return - } - _, err = w.Write(sig.HashTag[:]) - if err != nil { - return - } - - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - _, err = w.Write(sig.RSASignature.EncodedBytes()) - case PubKeyAlgoDSA: - if _, err = w.Write(sig.DSASigR.EncodedBytes()); err != nil { - return - } - _, err = w.Write(sig.DSASigS.EncodedBytes()) - case PubKeyAlgoECDSA: - if _, err = w.Write(sig.ECDSASigR.EncodedBytes()); err != nil { - return - } - _, err = w.Write(sig.ECDSASigS.EncodedBytes()) - case PubKeyAlgoEdDSA: - if _, err = w.Write(sig.EdDSASigR.EncodedBytes()); err != nil { - return - } - _, err = w.Write(sig.EdDSASigS.EncodedBytes()) - default: - panic("impossible") - } - return -} - -// outputSubpacket represents a subpacket to be marshaled. -type outputSubpacket struct { - hashed bool // true if this subpacket is in the hashed area. - subpacketType signatureSubpacketType - isCritical bool - contents []byte -} - -func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubpacket, err error) { - creationTime := make([]byte, 4) - binary.BigEndian.PutUint32(creationTime, uint32(sig.CreationTime.Unix())) - subpackets = append(subpackets, outputSubpacket{true, creationTimeSubpacket, false, creationTime}) - - if sig.IssuerKeyId != nil && sig.Version == 4 { - keyId := make([]byte, 8) - binary.BigEndian.PutUint64(keyId, *sig.IssuerKeyId) - subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, true, keyId}) - } - if sig.IssuerFingerprint != nil { - contents := append([]uint8{uint8(issuer.Version)}, sig.IssuerFingerprint...) - subpackets = append(subpackets, outputSubpacket{true, issuerFingerprintSubpacket, true, contents}) - } - if sig.SigLifetimeSecs != nil && *sig.SigLifetimeSecs != 0 { - sigLifetime := make([]byte, 4) - binary.BigEndian.PutUint32(sigLifetime, *sig.SigLifetimeSecs) - subpackets = append(subpackets, outputSubpacket{true, signatureExpirationSubpacket, true, sigLifetime}) - } - - // Key flags may only appear in self-signatures or certification signatures. - - if sig.FlagsValid { - var flags byte - if sig.FlagCertify { - flags |= KeyFlagCertify - } - if sig.FlagSign { - flags |= KeyFlagSign - } - if sig.FlagEncryptCommunications { - flags |= KeyFlagEncryptCommunications - } - if sig.FlagEncryptStorage { - flags |= KeyFlagEncryptStorage - } - subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, false, []byte{flags}}) - } - - // The following subpackets may only appear in self-signatures. - - var features = byte(0x00) - if sig.MDC { - features |= 0x01 - } - if sig.AEAD { - features |= 0x02 - } - if sig.V5Keys { - features |= 0x04 - } - - if features != 0x00 { - subpackets = append(subpackets, outputSubpacket{true, featuresSubpacket, false, []byte{features}}) - } - - if sig.KeyLifetimeSecs != nil && *sig.KeyLifetimeSecs != 0 { - keyLifetime := make([]byte, 4) - binary.BigEndian.PutUint32(keyLifetime, *sig.KeyLifetimeSecs) - subpackets = append(subpackets, outputSubpacket{true, keyExpirationSubpacket, true, keyLifetime}) - } - - if sig.IsPrimaryId != nil && *sig.IsPrimaryId { - subpackets = append(subpackets, outputSubpacket{true, primaryUserIdSubpacket, false, []byte{1}}) - } - - if len(sig.PreferredSymmetric) > 0 { - subpackets = append(subpackets, outputSubpacket{true, prefSymmetricAlgosSubpacket, false, sig.PreferredSymmetric}) - } - - if len(sig.PreferredHash) > 0 { - subpackets = append(subpackets, outputSubpacket{true, prefHashAlgosSubpacket, false, sig.PreferredHash}) - } - - if len(sig.PreferredCompression) > 0 { - subpackets = append(subpackets, outputSubpacket{true, prefCompressionSubpacket, false, sig.PreferredCompression}) - } - - if len(sig.PreferredAEAD) > 0 { - subpackets = append(subpackets, outputSubpacket{true, prefAeadAlgosSubpacket, false, sig.PreferredAEAD}) - } - - // Revocation reason appears only in revocation signatures and is serialized as per section 5.2.3.23. - if sig.RevocationReason != nil { - subpackets = append(subpackets, outputSubpacket{true, reasonForRevocationSubpacket, true, - append([]uint8{*sig.RevocationReason}, []uint8(sig.RevocationReasonText)...)}) - } - - // EmbeddedSignature appears only in subkeys capable of signing and is serialized as per section 5.2.3.26. - if sig.EmbeddedSignature != nil { - var buf bytes.Buffer - err = sig.EmbeddedSignature.serializeBody(&buf) - if err != nil { - return - } - subpackets = append(subpackets, outputSubpacket{true, embeddedSignatureSubpacket, true, buf.Bytes()}) - } - - return -} - -// AddMetadataToHashSuffix modifies the current hash suffix to include metadata -// (format, filename, and time). Version 5 keys protect this data including it -// in the hash computation. See section 5.2.4. -func (sig *Signature) AddMetadataToHashSuffix() { - if sig == nil || sig.Version != 5 { - return - } - if sig.SigType != 0x00 && sig.SigType != 0x01 { - return - } - lit := sig.Metadata - if lit == nil { - // This will translate into six 0x00 bytes. - lit = &LiteralData{} - } - - // Extract the current byte count - n := sig.HashSuffix[len(sig.HashSuffix)-8:] - l := uint64( - uint64(n[0])<<56 | uint64(n[1])<<48 | uint64(n[2])<<40 | uint64(n[3])<<32 | - uint64(n[4])<<24 | uint64(n[5])<<16 | uint64(n[6])<<8 | uint64(n[7])) - - suffix := bytes.NewBuffer(nil) - suffix.Write(sig.HashSuffix[:l]) - - // Add the metadata - var buf [4]byte - buf[0] = lit.Format - fileName := lit.FileName - if len(lit.FileName) > 255 { - fileName = fileName[:255] - } - buf[1] = byte(len(fileName)) - suffix.Write(buf[:2]) - suffix.Write([]byte(lit.FileName)) - binary.BigEndian.PutUint32(buf[:], lit.Time) - suffix.Write(buf[:]) - - // Update the counter and restore trailing bytes - l = uint64(suffix.Len()) - suffix.Write([]byte{0x05, 0xff}) - suffix.Write([]byte{ - uint8(l >> 56), uint8(l >> 48), uint8(l >> 40), uint8(l >> 32), - uint8(l >> 24), uint8(l >> 16), uint8(l >> 8), uint8(l), - }) - sig.HashSuffix = suffix.Bytes() -} diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go deleted file mode 100644 index 0e9f51d51b3..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go +++ /dev/null @@ -1,267 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "crypto/cipher" - "io" - "strconv" - - "github.com/ProtonMail/go-crypto/openpgp/errors" - "github.com/ProtonMail/go-crypto/openpgp/s2k" -) - -// This is the largest session key that we'll support. Since no 512-bit cipher -// has even been seriously used, this is comfortably large. -const maxSessionKeySizeInBytes = 64 - -// SymmetricKeyEncrypted represents a passphrase protected session key. See RFC -// 4880, section 5.3. -type SymmetricKeyEncrypted struct { - Version int - CipherFunc CipherFunction - Mode AEADMode - s2k func(out, in []byte) - aeadNonce []byte - encryptedKey []byte -} - -func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { - // RFC 4880, section 5.3. - var buf [2]byte - if _, err := readFull(r, buf[:]); err != nil { - return err - } - ske.Version = int(buf[0]) - if ske.Version != 4 && ske.Version != 5 { - return errors.UnsupportedError("unknown SymmetricKeyEncrypted version") - } - ske.CipherFunc = CipherFunction(buf[1]) - if ske.CipherFunc.KeySize() == 0 { - return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(buf[1]))) - } - - if ske.Version == 5 { - mode := make([]byte, 1) - if _, err := r.Read(mode); err != nil { - return errors.StructuralError("cannot read AEAD octect from packet") - } - ske.Mode = AEADMode(mode[0]) - } - - var err error - if ske.s2k, err = s2k.Parse(r); err != nil { - if _, ok := err.(errors.ErrDummyPrivateKey); ok { - return errors.UnsupportedError("missing key GNU extension in session key") - } - return err - } - - if ske.Version == 5 { - // AEAD nonce - nonce := make([]byte, ske.Mode.NonceLength()) - _, err := readFull(r, nonce) - if err != nil && err != io.ErrUnexpectedEOF { - return err - } - ske.aeadNonce = nonce - } - - encryptedKey := make([]byte, maxSessionKeySizeInBytes) - // The session key may follow. We just have to try and read to find - // out. If it exists then we limit it to maxSessionKeySizeInBytes. - n, err := readFull(r, encryptedKey) - if err != nil && err != io.ErrUnexpectedEOF { - return err - } - - if n != 0 { - if n == maxSessionKeySizeInBytes { - return errors.UnsupportedError("oversized encrypted session key") - } - ske.encryptedKey = encryptedKey[:n] - } - return nil -} - -// Decrypt attempts to decrypt an encrypted session key and returns the key and -// the cipher to use when decrypting a subsequent Symmetrically Encrypted Data -// packet. -func (ske *SymmetricKeyEncrypted) Decrypt(passphrase []byte) ([]byte, CipherFunction, error) { - key := make([]byte, ske.CipherFunc.KeySize()) - ske.s2k(key, passphrase) - if len(ske.encryptedKey) == 0 { - return key, ske.CipherFunc, nil - } - switch ske.Version { - case 4: - plaintextKey, cipherFunc, err := ske.decryptV4(key) - return plaintextKey, cipherFunc, err - case 5: - plaintextKey, err := ske.decryptV5(key) - return plaintextKey, CipherFunction(0), err - } - err := errors.UnsupportedError("unknown SymmetricKeyEncrypted version") - return nil, CipherFunction(0), err -} - -func (ske *SymmetricKeyEncrypted) decryptV4(key []byte) ([]byte, CipherFunction, error) { - // the IV is all zeros - iv := make([]byte, ske.CipherFunc.blockSize()) - c := cipher.NewCFBDecrypter(ske.CipherFunc.new(key), iv) - plaintextKey := make([]byte, len(ske.encryptedKey)) - c.XORKeyStream(plaintextKey, ske.encryptedKey) - cipherFunc := CipherFunction(plaintextKey[0]) - if cipherFunc.blockSize() == 0 { - return nil, ske.CipherFunc, errors.UnsupportedError( - "unknown cipher: " + strconv.Itoa(int(cipherFunc))) - } - plaintextKey = plaintextKey[1:] - if len(plaintextKey) != cipherFunc.KeySize() { - return nil, cipherFunc, errors.StructuralError( - "length of decrypted key not equal to cipher keysize") - } - return plaintextKey, cipherFunc, nil -} - -func (ske *SymmetricKeyEncrypted) decryptV5(key []byte) ([]byte, error) { - blockCipher := CipherFunction(ske.CipherFunc).new(key) - aead := ske.Mode.new(blockCipher) - - adata := []byte{0xc3, byte(5), byte(ske.CipherFunc), byte(ske.Mode)} - plaintextKey, err := aead.Open(nil, ske.aeadNonce, ske.encryptedKey, adata) - if err != nil { - return nil, err - } - return plaintextKey, nil -} - -// SerializeSymmetricKeyEncrypted serializes a symmetric key packet to w. -// The packet contains a random session key, encrypted by a key derived from -// the given passphrase. The session key is returned and must be passed to -// SerializeSymmetricallyEncrypted or SerializeAEADEncrypted, depending on -// whether config.AEADConfig != nil. -// If config is nil, sensible defaults will be used. -func SerializeSymmetricKeyEncrypted(w io.Writer, passphrase []byte, config *Config) (key []byte, err error) { - cipherFunc := config.Cipher() - keySize := cipherFunc.KeySize() - if keySize == 0 { - return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc))) - } - - sessionKey := make([]byte, keySize) - _, err = io.ReadFull(config.Random(), sessionKey) - if err != nil { - return - } - - err = SerializeSymmetricKeyEncryptedReuseKey(w, sessionKey, passphrase, config) - if err != nil { - return - } - - key = sessionKey - return -} - -// SerializeSymmetricKeyEncryptedReuseKey serializes a symmetric key packet to w. -// The packet contains the given session key, encrypted by a key derived from -// the given passphrase. The session key must be passed to -// SerializeSymmetricallyEncrypted or SerializeAEADEncrypted, depending on -// whether config.AEADConfig != nil. -// If config is nil, sensible defaults will be used. -func SerializeSymmetricKeyEncryptedReuseKey(w io.Writer, sessionKey []byte, passphrase []byte, config *Config) (err error) { - var version int - if config.AEAD() != nil { - version = 5 - } else { - version = 4 - } - cipherFunc := config.Cipher() - keySize := cipherFunc.KeySize() - if keySize == 0 { - return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc))) - } - - s2kBuf := new(bytes.Buffer) - keyEncryptingKey := make([]byte, keySize) - // s2k.Serialize salts and stretches the passphrase, and writes the - // resulting key to keyEncryptingKey and the s2k descriptor to s2kBuf. - err = s2k.Serialize(s2kBuf, keyEncryptingKey, config.Random(), passphrase, &s2k.Config{Hash: config.Hash(), S2KCount: config.PasswordHashIterations()}) - if err != nil { - return - } - s2kBytes := s2kBuf.Bytes() - - var packetLength int - switch version { - case 4: - packetLength = 2 /* header */ + len(s2kBytes) + 1 /* cipher type */ + keySize - case 5: - nonceLen := config.AEAD().Mode().NonceLength() - tagLen := config.AEAD().Mode().TagLength() - packetLength = 3 + len(s2kBytes) + nonceLen + keySize + tagLen - } - err = serializeHeader(w, packetTypeSymmetricKeyEncrypted, packetLength) - if err != nil { - return - } - - buf := make([]byte, 2) - // Symmetric Key Encrypted Version - buf[0] = byte(version) - // Cipher function - buf[1] = byte(cipherFunc) - - if version == 5 { - // AEAD mode - buf = append(buf, byte(config.AEAD().Mode())) - } - _, err = w.Write(buf) - if err != nil { - return - } - _, err = w.Write(s2kBytes) - if err != nil { - return - } - - switch version { - case 4: - iv := make([]byte, cipherFunc.blockSize()) - c := cipher.NewCFBEncrypter(cipherFunc.new(keyEncryptingKey), iv) - encryptedCipherAndKey := make([]byte, keySize+1) - c.XORKeyStream(encryptedCipherAndKey, buf[1:]) - c.XORKeyStream(encryptedCipherAndKey[1:], sessionKey) - _, err = w.Write(encryptedCipherAndKey) - if err != nil { - return - } - case 5: - blockCipher := cipherFunc.new(keyEncryptingKey) - mode := config.AEAD().Mode() - aead := mode.new(blockCipher) - // Sample nonce using random reader - nonce := make([]byte, config.AEAD().Mode().NonceLength()) - _, err = io.ReadFull(config.Random(), nonce) - if err != nil { - return - } - // Seal and write (encryptedData includes auth. tag) - adata := []byte{0xc3, byte(5), byte(cipherFunc), byte(mode)} - encryptedData := aead.Seal(nil, nonce, sessionKey, adata) - _, err = w.Write(nonce) - if err != nil { - return - } - _, err = w.Write(encryptedData) - if err != nil { - return - } - } - - return -} diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go deleted file mode 100644 index 8b84de177f9..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go +++ /dev/null @@ -1,290 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto/cipher" - "crypto/sha1" - "crypto/subtle" - "hash" - "io" - "strconv" - - "github.com/ProtonMail/go-crypto/openpgp/errors" -) - -// SymmetricallyEncrypted represents a symmetrically encrypted byte string. The -// encrypted Contents will consist of more OpenPGP packets. See RFC 4880, -// sections 5.7 and 5.13. -type SymmetricallyEncrypted struct { - MDC bool // true iff this is a type 18 packet and thus has an embedded MAC. - Contents io.Reader - prefix []byte -} - -const symmetricallyEncryptedVersion = 1 - -func (se *SymmetricallyEncrypted) parse(r io.Reader) error { - if se.MDC { - // See RFC 4880, section 5.13. - var buf [1]byte - _, err := readFull(r, buf[:]) - if err != nil { - return err - } - if buf[0] != symmetricallyEncryptedVersion { - return errors.UnsupportedError("unknown SymmetricallyEncrypted version") - } - } - se.Contents = r - return nil -} - -// Decrypt returns a ReadCloser, from which the decrypted Contents of the -// packet can be read. An incorrect key will only be detected after trying -// to decrypt the entire data. -func (se *SymmetricallyEncrypted) Decrypt(c CipherFunction, key []byte) (io.ReadCloser, error) { - keySize := c.KeySize() - if keySize == 0 { - return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(c))) - } - if len(key) != keySize { - return nil, errors.InvalidArgumentError("SymmetricallyEncrypted: incorrect key length") - } - - if se.prefix == nil { - se.prefix = make([]byte, c.blockSize()+2) - _, err := readFull(se.Contents, se.prefix) - if err != nil { - return nil, err - } - } else if len(se.prefix) != c.blockSize()+2 { - return nil, errors.InvalidArgumentError("can't try ciphers with different block lengths") - } - - ocfbResync := OCFBResync - if se.MDC { - // MDC packets use a different form of OCFB mode. - ocfbResync = OCFBNoResync - } - - s := NewOCFBDecrypter(c.new(key), se.prefix, ocfbResync) - - plaintext := cipher.StreamReader{S: s, R: se.Contents} - - if se.MDC { - // MDC packets have an embedded hash that we need to check. - h := sha1.New() - h.Write(se.prefix) - return &seMDCReader{in: plaintext, h: h}, nil - } - - // Otherwise, we just need to wrap plaintext so that it's a valid ReadCloser. - return seReader{plaintext}, nil -} - -// seReader wraps an io.Reader with a no-op Close method. -type seReader struct { - in io.Reader -} - -func (ser seReader) Read(buf []byte) (int, error) { - return ser.in.Read(buf) -} - -func (ser seReader) Close() error { - return nil -} - -const mdcTrailerSize = 1 /* tag byte */ + 1 /* length byte */ + sha1.Size - -// An seMDCReader wraps an io.Reader, maintains a running hash and keeps hold -// of the most recent 22 bytes (mdcTrailerSize). Upon EOF, those bytes form an -// MDC packet containing a hash of the previous Contents which is checked -// against the running hash. See RFC 4880, section 5.13. -type seMDCReader struct { - in io.Reader - h hash.Hash - trailer [mdcTrailerSize]byte - scratch [mdcTrailerSize]byte - trailerUsed int - error bool - eof bool -} - -func (ser *seMDCReader) Read(buf []byte) (n int, err error) { - if ser.error { - err = io.ErrUnexpectedEOF - return - } - if ser.eof { - err = io.EOF - return - } - - // If we haven't yet filled the trailer buffer then we must do that - // first. - for ser.trailerUsed < mdcTrailerSize { - n, err = ser.in.Read(ser.trailer[ser.trailerUsed:]) - ser.trailerUsed += n - if err == io.EOF { - if ser.trailerUsed != mdcTrailerSize { - n = 0 - err = io.ErrUnexpectedEOF - ser.error = true - return - } - ser.eof = true - n = 0 - return - } - - if err != nil { - n = 0 - return - } - } - - // If it's a short read then we read into a temporary buffer and shift - // the data into the caller's buffer. - if len(buf) <= mdcTrailerSize { - n, err = readFull(ser.in, ser.scratch[:len(buf)]) - copy(buf, ser.trailer[:n]) - ser.h.Write(buf[:n]) - copy(ser.trailer[:], ser.trailer[n:]) - copy(ser.trailer[mdcTrailerSize-n:], ser.scratch[:]) - if n < len(buf) { - ser.eof = true - err = io.EOF - } - return - } - - n, err = ser.in.Read(buf[mdcTrailerSize:]) - copy(buf, ser.trailer[:]) - ser.h.Write(buf[:n]) - copy(ser.trailer[:], buf[n:]) - - if err == io.EOF { - ser.eof = true - } - return -} - -// This is a new-format packet tag byte for a type 19 (MDC) packet. -const mdcPacketTagByte = byte(0x80) | 0x40 | 19 - -func (ser *seMDCReader) Close() error { - if ser.error { - return errors.ErrMDCMissing - } - - for !ser.eof { - // We haven't seen EOF so we need to read to the end - var buf [1024]byte - _, err := ser.Read(buf[:]) - if err == io.EOF { - break - } - if err != nil { - return errors.ErrMDCMissing - } - } - - ser.h.Write(ser.trailer[:2]) - - final := ser.h.Sum(nil) - if subtle.ConstantTimeCompare(final, ser.trailer[2:]) != 1 { - return errors.ErrMDCHashMismatch - } - // The hash already includes the MDC header, but we still check its value - // to confirm encryption correctness - if ser.trailer[0] != mdcPacketTagByte || ser.trailer[1] != sha1.Size { - return errors.ErrMDCMissing - } - return nil -} - -// An seMDCWriter writes through to an io.WriteCloser while maintains a running -// hash of the data written. On close, it emits an MDC packet containing the -// running hash. -type seMDCWriter struct { - w io.WriteCloser - h hash.Hash -} - -func (w *seMDCWriter) Write(buf []byte) (n int, err error) { - w.h.Write(buf) - return w.w.Write(buf) -} - -func (w *seMDCWriter) Close() (err error) { - var buf [mdcTrailerSize]byte - - buf[0] = mdcPacketTagByte - buf[1] = sha1.Size - w.h.Write(buf[:2]) - digest := w.h.Sum(nil) - copy(buf[2:], digest) - - _, err = w.w.Write(buf[:]) - if err != nil { - return - } - return w.w.Close() -} - -// noOpCloser is like an ioutil.NopCloser, but for an io.Writer. -type noOpCloser struct { - w io.Writer -} - -func (c noOpCloser) Write(data []byte) (n int, err error) { - return c.w.Write(data) -} - -func (c noOpCloser) Close() error { - return nil -} - -// SerializeSymmetricallyEncrypted serializes a symmetrically encrypted packet -// to w and returns a WriteCloser to which the to-be-encrypted packets can be -// written. -// If config is nil, sensible defaults will be used. -func SerializeSymmetricallyEncrypted(w io.Writer, c CipherFunction, key []byte, config *Config) (Contents io.WriteCloser, err error) { - if c.KeySize() != len(key) { - return nil, errors.InvalidArgumentError("SymmetricallyEncrypted.Serialize: bad key length") - } - writeCloser := noOpCloser{w} - ciphertext, err := serializeStreamHeader(writeCloser, packetTypeSymmetricallyEncryptedMDC) - if err != nil { - return - } - - _, err = ciphertext.Write([]byte{symmetricallyEncryptedVersion}) - if err != nil { - return - } - - block := c.new(key) - blockSize := block.BlockSize() - iv := make([]byte, blockSize) - _, err = config.Random().Read(iv) - if err != nil { - return - } - s, prefix := NewOCFBEncrypter(block, iv, OCFBNoResync) - _, err = ciphertext.Write(prefix) - if err != nil { - return - } - plaintext := cipher.StreamWriter{S: s, W: ciphertext} - - h := sha1.New() - h.Write(iv) - h.Write(iv[blockSize-2:]) - Contents = &seMDCWriter{w: plaintext, h: h} - return -} diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go deleted file mode 100644 index 0f760ade2cf..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "image" - "image/jpeg" - "io" - "io/ioutil" -) - -const UserAttrImageSubpacket = 1 - -// UserAttribute is capable of storing other types of data about a user -// beyond name, email and a text comment. In practice, user attributes are typically used -// to store a signed thumbnail photo JPEG image of the user. -// See RFC 4880, section 5.12. -type UserAttribute struct { - Contents []*OpaqueSubpacket -} - -// NewUserAttributePhoto creates a user attribute packet -// containing the given images. -func NewUserAttributePhoto(photos ...image.Image) (uat *UserAttribute, err error) { - uat = new(UserAttribute) - for _, photo := range photos { - var buf bytes.Buffer - // RFC 4880, Section 5.12.1. - data := []byte{ - 0x10, 0x00, // Little-endian image header length (16 bytes) - 0x01, // Image header version 1 - 0x01, // JPEG - 0, 0, 0, 0, // 12 reserved octets, must be all zero. - 0, 0, 0, 0, - 0, 0, 0, 0} - if _, err = buf.Write(data); err != nil { - return - } - if err = jpeg.Encode(&buf, photo, nil); err != nil { - return - } - uat.Contents = append(uat.Contents, &OpaqueSubpacket{ - SubType: UserAttrImageSubpacket, - Contents: buf.Bytes()}) - } - return -} - -// NewUserAttribute creates a new user attribute packet containing the given subpackets. -func NewUserAttribute(contents ...*OpaqueSubpacket) *UserAttribute { - return &UserAttribute{Contents: contents} -} - -func (uat *UserAttribute) parse(r io.Reader) (err error) { - // RFC 4880, section 5.13 - b, err := ioutil.ReadAll(r) - if err != nil { - return - } - uat.Contents, err = OpaqueSubpackets(b) - return -} - -// Serialize marshals the user attribute to w in the form of an OpenPGP packet, including -// header. -func (uat *UserAttribute) Serialize(w io.Writer) (err error) { - var buf bytes.Buffer - for _, sp := range uat.Contents { - err = sp.Serialize(&buf) - if err != nil { - return err - } - } - if err = serializeHeader(w, packetTypeUserAttribute, buf.Len()); err != nil { - return err - } - _, err = w.Write(buf.Bytes()) - return -} - -// ImageData returns zero or more byte slices, each containing -// JPEG File Interchange Format (JFIF), for each photo in the -// user attribute packet. -func (uat *UserAttribute) ImageData() (imageData [][]byte) { - for _, sp := range uat.Contents { - if sp.SubType == UserAttrImageSubpacket && len(sp.Contents) > 16 { - imageData = append(imageData, sp.Contents[16:]) - } - } - return -} diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go deleted file mode 100644 index d6bea7d4acc..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "io" - "io/ioutil" - "strings" -) - -// UserId contains text that is intended to represent the name and email -// address of the key holder. See RFC 4880, section 5.11. By convention, this -// takes the form "Full Name (Comment) " -type UserId struct { - Id string // By convention, this takes the form "Full Name (Comment) " which is split out in the fields below. - - Name, Comment, Email string -} - -func hasInvalidCharacters(s string) bool { - for _, c := range s { - switch c { - case '(', ')', '<', '>', 0: - return true - } - } - return false -} - -// NewUserId returns a UserId or nil if any of the arguments contain invalid -// characters. The invalid characters are '\x00', '(', ')', '<' and '>' -func NewUserId(name, comment, email string) *UserId { - // RFC 4880 doesn't deal with the structure of userid strings; the - // name, comment and email form is just a convention. However, there's - // no convention about escaping the metacharacters and GPG just refuses - // to create user ids where, say, the name contains a '('. We mirror - // this behaviour. - - if hasInvalidCharacters(name) || hasInvalidCharacters(comment) || hasInvalidCharacters(email) { - return nil - } - - uid := new(UserId) - uid.Name, uid.Comment, uid.Email = name, comment, email - uid.Id = name - if len(comment) > 0 { - if len(uid.Id) > 0 { - uid.Id += " " - } - uid.Id += "(" - uid.Id += comment - uid.Id += ")" - } - if len(email) > 0 { - if len(uid.Id) > 0 { - uid.Id += " " - } - uid.Id += "<" - uid.Id += email - uid.Id += ">" - } - return uid -} - -func (uid *UserId) parse(r io.Reader) (err error) { - // RFC 4880, section 5.11 - b, err := ioutil.ReadAll(r) - if err != nil { - return - } - uid.Id = string(b) - uid.Name, uid.Comment, uid.Email = parseUserId(uid.Id) - return -} - -// Serialize marshals uid to w in the form of an OpenPGP packet, including -// header. -func (uid *UserId) Serialize(w io.Writer) error { - err := serializeHeader(w, packetTypeUserId, len(uid.Id)) - if err != nil { - return err - } - _, err = w.Write([]byte(uid.Id)) - return err -} - -// parseUserId extracts the name, comment and email from a user id string that -// is formatted as "Full Name (Comment) ". -func parseUserId(id string) (name, comment, email string) { - var n, c, e struct { - start, end int - } - var state int - - for offset, rune := range id { - switch state { - case 0: - // Entering name - n.start = offset - state = 1 - fallthrough - case 1: - // In name - if rune == '(' { - state = 2 - n.end = offset - } else if rune == '<' { - state = 5 - n.end = offset - } - case 2: - // Entering comment - c.start = offset - state = 3 - fallthrough - case 3: - // In comment - if rune == ')' { - state = 4 - c.end = offset - } - case 4: - // Between comment and email - if rune == '<' { - state = 5 - } - case 5: - // Entering email - e.start = offset - state = 6 - fallthrough - case 6: - // In email - if rune == '>' { - state = 7 - e.end = offset - } - default: - // After email - } - } - switch state { - case 1: - // ended in the name - n.end = len(id) - case 3: - // ended in comment - c.end = len(id) - case 6: - // ended in email - e.end = len(id) - } - - name = strings.TrimSpace(id[n.start:n.end]) - comment = strings.TrimSpace(id[c.start:c.end]) - email = strings.TrimSpace(id[e.start:e.end]) - return -} diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go deleted file mode 100644 index a649ebbab39..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go +++ /dev/null @@ -1,508 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package openpgp implements high level operations on OpenPGP messages. -package openpgp // import "github.com/ProtonMail/go-crypto/openpgp" - -import ( - "crypto" - _ "crypto/sha256" - "hash" - "io" - "strconv" - - "github.com/ProtonMail/go-crypto/openpgp/armor" - "github.com/ProtonMail/go-crypto/openpgp/errors" - "github.com/ProtonMail/go-crypto/openpgp/packet" -) - -// SignatureType is the armor type for a PGP signature. -var SignatureType = "PGP SIGNATURE" - -// readArmored reads an armored block with the given type. -func readArmored(r io.Reader, expectedType string) (body io.Reader, err error) { - block, err := armor.Decode(r) - if err != nil { - return - } - - if block.Type != expectedType { - return nil, errors.InvalidArgumentError("expected '" + expectedType + "', got: " + block.Type) - } - - return block.Body, nil -} - -// MessageDetails contains the result of parsing an OpenPGP encrypted and/or -// signed message. -type MessageDetails struct { - IsEncrypted bool // true if the message was encrypted. - EncryptedToKeyIds []uint64 // the list of recipient key ids. - IsSymmetricallyEncrypted bool // true if a passphrase could have decrypted the message. - DecryptedWith Key // the private key used to decrypt the message, if any. - IsSigned bool // true if the message is signed. - SignedByKeyId uint64 // the key id of the signer, if any. - SignedBy *Key // the key of the signer, if available. - LiteralData *packet.LiteralData // the metadata of the contents - UnverifiedBody io.Reader // the contents of the message. - - // If IsSigned is true and SignedBy is non-zero then the signature will - // be verified as UnverifiedBody is read. The signature cannot be - // checked until the whole of UnverifiedBody is read so UnverifiedBody - // must be consumed until EOF before the data can be trusted. Even if a - // message isn't signed (or the signer is unknown) the data may contain - // an authentication code that is only checked once UnverifiedBody has - // been consumed. Once EOF has been seen, the following fields are - // valid. (An authentication code failure is reported as a - // SignatureError error when reading from UnverifiedBody.) - Signature *packet.Signature // the signature packet itself. - SignatureError error // nil if the signature is good. - UnverifiedSignatures []*packet.Signature // all other unverified signature packets. - - decrypted io.ReadCloser -} - -// A PromptFunction is used as a callback by functions that may need to decrypt -// a private key, or prompt for a passphrase. It is called with a list of -// acceptable, encrypted private keys and a boolean that indicates whether a -// passphrase is usable. It should either decrypt a private key or return a -// passphrase to try. If the decrypted private key or given passphrase isn't -// correct, the function will be called again, forever. Any error returned will -// be passed up. -type PromptFunction func(keys []Key, symmetric bool) ([]byte, error) - -// A keyEnvelopePair is used to store a private key with the envelope that -// contains a symmetric key, encrypted with that key. -type keyEnvelopePair struct { - key Key - encryptedKey *packet.EncryptedKey -} - -// ReadMessage parses an OpenPGP message that may be signed and/or encrypted. -// The given KeyRing should contain both public keys (for signature -// verification) and, possibly encrypted, private keys for decrypting. -// If config is nil, sensible defaults will be used. -func ReadMessage(r io.Reader, keyring KeyRing, prompt PromptFunction, config *packet.Config) (md *MessageDetails, err error) { - var p packet.Packet - - var symKeys []*packet.SymmetricKeyEncrypted - var pubKeys []keyEnvelopePair - // Integrity protected encrypted packet: SymmetricallyEncrypted or AEADEncrypted - var edp packet.EncryptedDataPacket - - packets := packet.NewReader(r) - md = new(MessageDetails) - md.IsEncrypted = true - - // The message, if encrypted, starts with a number of packets - // containing an encrypted decryption key. The decryption key is either - // encrypted to a public key, or with a passphrase. This loop - // collects these packets. -ParsePackets: - for { - p, err = packets.Next() - if err != nil { - return nil, err - } - switch p := p.(type) { - case *packet.SymmetricKeyEncrypted: - // This packet contains the decryption key encrypted with a passphrase. - md.IsSymmetricallyEncrypted = true - symKeys = append(symKeys, p) - case *packet.EncryptedKey: - // This packet contains the decryption key encrypted to a public key. - md.EncryptedToKeyIds = append(md.EncryptedToKeyIds, p.KeyId) - switch p.Algo { - case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSAEncryptOnly, packet.PubKeyAlgoElGamal, packet.PubKeyAlgoECDH: - break - default: - continue - } - var keys []Key - if p.KeyId == 0 { - keys = keyring.DecryptionKeys() - } else { - keys = keyring.KeysById(p.KeyId) - } - for _, k := range keys { - pubKeys = append(pubKeys, keyEnvelopePair{k, p}) - } - case *packet.SymmetricallyEncrypted, *packet.AEADEncrypted: - edp = p.(packet.EncryptedDataPacket) - break ParsePackets - case *packet.Compressed, *packet.LiteralData, *packet.OnePassSignature: - // This message isn't encrypted. - if len(symKeys) != 0 || len(pubKeys) != 0 { - return nil, errors.StructuralError("key material not followed by encrypted message") - } - packets.Unread(p) - return readSignedMessage(packets, nil, keyring, config) - } - } - - var candidates []Key - var decrypted io.ReadCloser - - // Now that we have the list of encrypted keys we need to decrypt at - // least one of them or, if we cannot, we need to call the prompt - // function so that it can decrypt a key or give us a passphrase. -FindKey: - for { - // See if any of the keys already have a private key available - candidates = candidates[:0] - candidateFingerprints := make(map[string]bool) - - for _, pk := range pubKeys { - if pk.key.PrivateKey == nil { - continue - } - if !pk.key.PrivateKey.Encrypted { - if len(pk.encryptedKey.Key) == 0 { - errDec := pk.encryptedKey.Decrypt(pk.key.PrivateKey, config) - if errDec != nil { - continue - } - } - // Try to decrypt symmetrically encrypted - decrypted, err = edp.Decrypt(pk.encryptedKey.CipherFunc, pk.encryptedKey.Key) - if err != nil && err != errors.ErrKeyIncorrect { - return nil, err - } - if decrypted != nil { - md.DecryptedWith = pk.key - break FindKey - } - } else { - fpr := string(pk.key.PublicKey.Fingerprint[:]) - if v := candidateFingerprints[fpr]; v { - continue - } - candidates = append(candidates, pk.key) - candidateFingerprints[fpr] = true - } - } - - if len(candidates) == 0 && len(symKeys) == 0 { - return nil, errors.ErrKeyIncorrect - } - - if prompt == nil { - return nil, errors.ErrKeyIncorrect - } - - passphrase, err := prompt(candidates, len(symKeys) != 0) - if err != nil { - return nil, err - } - - // Try the symmetric passphrase first - if len(symKeys) != 0 && passphrase != nil { - for _, s := range symKeys { - key, cipherFunc, err := s.Decrypt(passphrase) - // On wrong passphrase, session key decryption is very likely to result in an invalid cipherFunc: - // only for < 5% of cases we will proceed to decrypt the data - if err == nil { - decrypted, err = edp.Decrypt(cipherFunc, key) - // TODO: ErrKeyIncorrect is no longer thrown on SEIP decryption, - // but it might still be relevant for when we implement AEAD decryption (otherwise, remove?) - if err != nil && err != errors.ErrKeyIncorrect { - return nil, err - } - if decrypted != nil { - break FindKey - } - } - } - } - } - - md.decrypted = decrypted - if err := packets.Push(decrypted); err != nil { - return nil, err - } - mdFinal, sensitiveParsingErr := readSignedMessage(packets, md, keyring, config) - if sensitiveParsingErr != nil { - return nil, errors.StructuralError("parsing error") - } - return mdFinal, nil -} - -// readSignedMessage reads a possibly signed message if mdin is non-zero then -// that structure is updated and returned. Otherwise a fresh MessageDetails is -// used. -func readSignedMessage(packets *packet.Reader, mdin *MessageDetails, keyring KeyRing, config *packet.Config) (md *MessageDetails, err error) { - if mdin == nil { - mdin = new(MessageDetails) - } - md = mdin - - var p packet.Packet - var h hash.Hash - var wrappedHash hash.Hash - var prevLast bool -FindLiteralData: - for { - p, err = packets.Next() - if err != nil { - return nil, err - } - switch p := p.(type) { - case *packet.Compressed: - if err := packets.Push(p.Body); err != nil { - return nil, err - } - case *packet.OnePassSignature: - if prevLast { - return nil, errors.UnsupportedError("nested signature packets") - } - - if p.IsLast { - prevLast = true - } - - h, wrappedHash, err = hashForSignature(p.Hash, p.SigType) - if err != nil { - md.SignatureError = err - } - - md.IsSigned = true - md.SignedByKeyId = p.KeyId - keys := keyring.KeysByIdUsage(p.KeyId, packet.KeyFlagSign) - if len(keys) > 0 { - md.SignedBy = &keys[0] - } - case *packet.LiteralData: - md.LiteralData = p - break FindLiteralData - } - } - - if md.IsSigned && md.SignatureError == nil { - md.UnverifiedBody = &signatureCheckReader{packets, h, wrappedHash, md, config} - } else if md.decrypted != nil { - md.UnverifiedBody = checkReader{md} - } else { - md.UnverifiedBody = md.LiteralData.Body - } - - return md, nil -} - -// hashForSignature returns a pair of hashes that can be used to verify a -// signature. The signature may specify that the contents of the signed message -// should be preprocessed (i.e. to normalize line endings). Thus this function -// returns two hashes. The second should be used to hash the message itself and -// performs any needed preprocessing. -func hashForSignature(hashId crypto.Hash, sigType packet.SignatureType) (hash.Hash, hash.Hash, error) { - if hashId == crypto.MD5 { - return nil, nil, errors.UnsupportedError("insecure hash algorithm: MD5") - } - if !hashId.Available() { - return nil, nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashId))) - } - h := hashId.New() - - switch sigType { - case packet.SigTypeBinary: - return h, h, nil - case packet.SigTypeText: - return h, NewCanonicalTextHash(h), nil - } - - return nil, nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType))) -} - -// checkReader wraps an io.Reader from a LiteralData packet. When it sees EOF -// it closes the ReadCloser from any SymmetricallyEncrypted packet to trigger -// MDC checks. -type checkReader struct { - md *MessageDetails -} - -func (cr checkReader) Read(buf []byte) (int, error) { - n, sensitiveParsingError := cr.md.LiteralData.Body.Read(buf) - if sensitiveParsingError == io.EOF { - mdcErr := cr.md.decrypted.Close() - if mdcErr != nil { - return n, mdcErr - } - return n, io.EOF - } - - if sensitiveParsingError != nil { - return n, errors.StructuralError("parsing error") - } - - return n, nil -} - -// signatureCheckReader wraps an io.Reader from a LiteralData packet and hashes -// the data as it is read. When it sees an EOF from the underlying io.Reader -// it parses and checks a trailing Signature packet and triggers any MDC checks. -type signatureCheckReader struct { - packets *packet.Reader - h, wrappedHash hash.Hash - md *MessageDetails - config *packet.Config -} - -func (scr *signatureCheckReader) Read(buf []byte) (int, error) { - n, sensitiveParsingError := scr.md.LiteralData.Body.Read(buf) - - // Hash only if required - if scr.md.SignedBy != nil { - scr.wrappedHash.Write(buf[:n]) - } - - if sensitiveParsingError == io.EOF { - var p packet.Packet - var readError error - var sig *packet.Signature - - p, readError = scr.packets.Next() - for readError == nil { - var ok bool - if sig, ok = p.(*packet.Signature); ok { - if sig.Version == 5 && (sig.SigType == 0x00 || sig.SigType == 0x01) { - sig.Metadata = scr.md.LiteralData - } - - // If signature KeyID matches - if scr.md.SignedBy != nil && *sig.IssuerKeyId == scr.md.SignedByKeyId { - scr.md.Signature = sig - scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignature(scr.h, scr.md.Signature) - if scr.md.SignatureError == nil && scr.md.Signature.SigExpired(scr.config.Now()) { - scr.md.SignatureError = errors.ErrSignatureExpired - } - } else { - scr.md.UnverifiedSignatures = append(scr.md.UnverifiedSignatures, sig) - } - } - - p, readError = scr.packets.Next() - } - - if scr.md.SignedBy != nil && scr.md.Signature == nil { - if scr.md.UnverifiedSignatures == nil { - scr.md.SignatureError = errors.StructuralError("LiteralData not followed by signature") - } else { - scr.md.SignatureError = errors.StructuralError("No matching signature found") - } - } - - // The SymmetricallyEncrypted packet, if any, might have an - // unsigned hash of its own. In order to check this we need to - // close that Reader. - if scr.md.decrypted != nil { - mdcErr := scr.md.decrypted.Close() - if mdcErr != nil { - return n, mdcErr - } - } - return n, io.EOF - } - - if sensitiveParsingError != nil { - return n, errors.StructuralError("parsing error") - } - - return n, nil -} - -// CheckDetachedSignature takes a signed file and a detached signature and -// returns the signer if the signature is valid. If the signer isn't known, -// ErrUnknownIssuer is returned. -func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader, config *packet.Config) (signer *Entity, err error) { - var expectedHashes []crypto.Hash - return CheckDetachedSignatureAndHash(keyring, signed, signature, expectedHashes, config) -} - -// CheckDetachedSignatureAndHash performs the same actions as -// CheckDetachedSignature and checks that the expected hash functions were used. -func CheckDetachedSignatureAndHash(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, config *packet.Config) (signer *Entity, err error) { - var issuerKeyId uint64 - var hashFunc crypto.Hash - var sigType packet.SignatureType - var keys []Key - var p packet.Packet - - expectedHashesLen := len(expectedHashes) - packets := packet.NewReader(signature) - var sig *packet.Signature - for { - p, err = packets.Next() - if err == io.EOF { - return nil, errors.ErrUnknownIssuer - } - if err != nil { - return nil, err - } - - var ok bool - sig, ok = p.(*packet.Signature) - if !ok { - return nil, errors.StructuralError("non signature packet found") - } - if sig.IssuerKeyId == nil { - return nil, errors.StructuralError("signature doesn't have an issuer") - } - issuerKeyId = *sig.IssuerKeyId - hashFunc = sig.Hash - sigType = sig.SigType - - for i, expectedHash := range expectedHashes { - if hashFunc == expectedHash { - break - } - if i+1 == expectedHashesLen { - return nil, errors.StructuralError("hash algorithm mismatch with cleartext message headers") - } - } - - keys = keyring.KeysByIdUsage(issuerKeyId, packet.KeyFlagSign) - if len(keys) > 0 { - break - } - } - - if len(keys) == 0 { - panic("unreachable") - } - - h, wrappedHash, err := hashForSignature(hashFunc, sigType) - if err != nil { - return nil, err - } - - if _, err := io.Copy(wrappedHash, signed); err != nil && err != io.EOF { - return nil, err - } - - for _, key := range keys { - err = key.PublicKey.VerifySignature(h, sig) - if err == nil { - now := config.Now() - if sig.SigExpired(now) { - return key.Entity, errors.ErrSignatureExpired - } - if key.PublicKey.KeyExpired(key.SelfSignature, now) { - return key.Entity, errors.ErrKeyExpired - } - return key.Entity, nil - } - } - - return nil, err -} - -// CheckArmoredDetachedSignature performs the same actions as -// CheckDetachedSignature but expects the signature to be armored. -func CheckArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader, config *packet.Config) (signer *Entity, err error) { - body, err := readArmored(signature, SignatureType) - if err != nil { - return - } - - return CheckDetachedSignature(keyring, signed, body, config) -} diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go deleted file mode 100644 index 8caed36e3e7..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go +++ /dev/null @@ -1,173 +0,0 @@ -package openpgp - -const testKey1KeyId = 0xA34D7E18C20C31BB -const testKey3KeyId = 0x338934250CCC0360 -const testKeyP256KeyId = 0xd44a2c495918513e - -const signedInput = "Signed message\nline 2\nline 3\n" -const signedTextInput = "Signed message\r\nline 2\r\nline 3\r\n" - -const recipientUnspecifiedHex = "848c0300000000000000000103ff62d4d578d03cf40c3da998dfe216c074fa6ddec5e31c197c9666ba292830d91d18716a80f699f9d897389a90e6d62d0238f5f07a5248073c0f24920e4bc4a30c2d17ee4e0cae7c3d4aaa4e8dced50e3010a80ee692175fa0385f62ecca4b56ee6e9980aa3ec51b61b077096ac9e800edaf161268593eedb6cc7027ff5cb32745d250010d407a6221ae22ef18469b444f2822478c4d190b24d36371a95cb40087cdd42d9399c3d06a53c0673349bfb607927f20d1e122bde1e2bf3aa6cae6edf489629bcaa0689539ae3b718914d88ededc3b" - -const detachedSignatureHex = "889c04000102000605024d449cd1000a0910a34d7e18c20c31bb167603ff57718d09f28a519fdc7b5a68b6a3336da04df85e38c5cd5d5bd2092fa4629848a33d85b1729402a2aab39c3ac19f9d573f773cc62c264dc924c067a79dfd8a863ae06c7c8686120760749f5fd9b1e03a64d20a7df3446ddc8f0aeadeaeba7cbaee5c1e366d65b6a0c6cc749bcb912d2f15013f812795c2e29eb7f7b77f39ce77" - -const detachedSignatureTextHex = "889c04010102000605024d449d21000a0910a34d7e18c20c31bbc8c60400a24fbef7342603a41cb1165767bd18985d015fb72fe05db42db36cfb2f1d455967f1e491194fbf6cf88146222b23bf6ffbd50d17598d976a0417d3192ff9cc0034fd00f287b02e90418bbefe609484b09231e4e7a5f3562e199bf39909ab5276c4d37382fe088f6b5c3426fc1052865da8b3ab158672d58b6264b10823dc4b39" - -const detachedSignatureDSAHex = "884604001102000605024d6c4eac000a0910338934250ccc0360f18d00a087d743d6405ed7b87755476629600b8b694a39e900a0abff8126f46faf1547c1743c37b21b4ea15b8f83" - -const detachedSignatureP256Hex = "885e0400130a0006050256e5bb00000a0910d44a2c495918513edef001009841a4f792beb0befccb35c8838a6a87d9b936beaa86db6745ddc7b045eee0cf00fd1ac1f78306b17e965935dd3f8bae4587a76587e4af231efe19cc4011a8434817" - -// The plaintext is https://www.gutenberg.org/cache/epub/1080/pg1080.txt -const modestProposalSha512 = "lbbrB1+WP3T9AaC9OQqBdOcCjgeEQadlulXsNPgVx0tyqPzDHwUugZ2gE7V0ESKAw6kAVfgkcuvfgxAAGaeHtw==" - -const testKeys1And2Hex = "988d044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd0011010001b41054657374204b6579203120285253412988b804130102002205024d3c5c10021b03060b090807030206150802090a0b0416020301021e01021780000a0910a34d7e18c20c31bbb5b304009cc45fe610b641a2c146331be94dade0a396e73ca725e1b25c21708d9cab46ecca5ccebc23055879df8f99eea39b377962a400f2ebdc36a7c99c333d74aeba346315137c3ff9d0a09b0273299090343048afb8107cf94cbd1400e3026f0ccac7ecebbc4d78588eb3e478fe2754d3ca664bcf3eac96ca4a6b0c8d7df5102f60f6b0020003b88d044d3c5c10010400b201df61d67487301f11879d514f4248ade90c8f68c7af1284c161098de4c28c2850f1ec7b8e30f959793e571542ffc6532189409cb51c3d30dad78c4ad5165eda18b20d9826d8707d0f742e2ab492103a85bbd9ddf4f5720f6de7064feb0d39ee002219765bb07bcfb8b877f47abe270ddeda4f676108cecb6b9bb2ad484a4f0011010001889f04180102000905024d3c5c10021b0c000a0910a34d7e18c20c31bb1a03040085c8d62e16d05dc4e9dad64953c8a2eed8b6c12f92b1575eeaa6dcf7be9473dd5b24b37b6dffbb4e7c99ed1bd3cb11634be19b3e6e207bed7505c7ca111ccf47cb323bf1f8851eb6360e8034cbff8dd149993c959de89f8f77f38e7e98b8e3076323aa719328e2b408db5ec0d03936efd57422ba04f925cdc7b4c1af7590e40ab0020003988d044d3c5c33010400b488c3e5f83f4d561f317817538d9d0397981e9aef1321ca68ebfae1cf8b7d388e19f4b5a24a82e2fbbf1c6c26557a6c5845307a03d815756f564ac7325b02bc83e87d5480a8fae848f07cb891f2d51ce7df83dcafdc12324517c86d472cc0ee10d47a68fd1d9ae49a6c19bbd36d82af597a0d88cc9c49de9df4e696fc1f0b5d0011010001b42754657374204b6579203220285253412c20656e637279707465642070726976617465206b65792988b804130102002205024d3c5c33021b03060b090807030206150802090a0b0416020301021e01021780000a0910d4984f961e35246b98940400908a73b6a6169f700434f076c6c79015a49bee37130eaf23aaa3cfa9ce60bfe4acaa7bc95f1146ada5867e0079babb38804891f4f0b8ebca57a86b249dee786161a755b7a342e68ccf3f78ed6440a93a6626beb9a37aa66afcd4f888790cb4bb46d94a4ae3eb3d7d3e6b00f6bfec940303e89ec5b32a1eaaacce66497d539328b0020003b88d044d3c5c33010400a4e913f9442abcc7f1804ccab27d2f787ffa592077ca935a8bb23165bd8d57576acac647cc596b2c3f814518cc8c82953c7a4478f32e0cf645630a5ba38d9618ef2bc3add69d459ae3dece5cab778938d988239f8c5ae437807075e06c828019959c644ff05ef6a5a1dab72227c98e3a040b0cf219026640698d7a13d8538a570011010001889f04180102000905024d3c5c33021b0c000a0910d4984f961e35246b26c703ff7ee29ef53bc1ae1ead533c408fa136db508434e233d6e62be621e031e5940bbd4c08142aed0f82217e7c3e1ec8de574bc06ccf3c36633be41ad78a9eacd209f861cae7b064100758545cc9dd83db71806dc1cfd5fb9ae5c7474bba0c19c44034ae61bae5eca379383339dece94ff56ff7aa44a582f3e5c38f45763af577c0934b0020003" - -const testKeys1And2PrivateHex = "9501d8044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd00110100010003ff4d91393b9a8e3430b14d6209df42f98dc927425b881f1209f319220841273a802a97c7bdb8b3a7740b3ab5866c4d1d308ad0d3a79bd1e883aacf1ac92dfe720285d10d08752a7efe3c609b1d00f17f2805b217be53999a7da7e493bfc3e9618fd17018991b8128aea70a05dbce30e4fbe626aa45775fa255dd9177aabf4df7cf0200c1ded12566e4bc2bb590455e5becfb2e2c9796482270a943343a7835de41080582c2be3caf5981aa838140e97afa40ad652a0b544f83eb1833b0957dce26e47b0200eacd6046741e9ce2ec5beb6fb5e6335457844fb09477f83b050a96be7da043e17f3a9523567ed40e7a521f818813a8b8a72209f1442844843ccc7eb9805442570200bdafe0438d97ac36e773c7162028d65844c4d463e2420aa2228c6e50dc2743c3d6c72d0d782a5173fe7be2169c8a9f4ef8a7cf3e37165e8c61b89c346cdc6c1799d2b41054657374204b6579203120285253412988b804130102002205024d3c5c10021b03060b090807030206150802090a0b0416020301021e01021780000a0910a34d7e18c20c31bbb5b304009cc45fe610b641a2c146331be94dade0a396e73ca725e1b25c21708d9cab46ecca5ccebc23055879df8f99eea39b377962a400f2ebdc36a7c99c333d74aeba346315137c3ff9d0a09b0273299090343048afb8107cf94cbd1400e3026f0ccac7ecebbc4d78588eb3e478fe2754d3ca664bcf3eac96ca4a6b0c8d7df5102f60f6b00200009d01d8044d3c5c10010400b201df61d67487301f11879d514f4248ade90c8f68c7af1284c161098de4c28c2850f1ec7b8e30f959793e571542ffc6532189409cb51c3d30dad78c4ad5165eda18b20d9826d8707d0f742e2ab492103a85bbd9ddf4f5720f6de7064feb0d39ee002219765bb07bcfb8b877f47abe270ddeda4f676108cecb6b9bb2ad484a4f00110100010003fd17a7490c22a79c59281fb7b20f5e6553ec0c1637ae382e8adaea295f50241037f8997cf42c1ce26417e015091451b15424b2c59eb8d4161b0975630408e394d3b00f88d4b4e18e2cc85e8251d4753a27c639c83f5ad4a571c4f19d7cd460b9b73c25ade730c99df09637bd173d8e3e981ac64432078263bb6dc30d3e974150dd0200d0ee05be3d4604d2146fb0457f31ba17c057560785aa804e8ca5530a7cd81d3440d0f4ba6851efcfd3954b7e68908fc0ba47f7ac37bf559c6c168b70d3a7c8cd0200da1c677c4bce06a068070f2b3733b0a714e88d62aa3f9a26c6f5216d48d5c2b5624144f3807c0df30be66b3268eeeca4df1fbded58faf49fc95dc3c35f134f8b01fd1396b6c0fc1b6c4f0eb8f5e44b8eace1e6073e20d0b8bc5385f86f1cf3f050f66af789f3ef1fc107b7f4421e19e0349c730c68f0a226981f4e889054fdb4dc149e8e889f04180102000905024d3c5c10021b0c000a0910a34d7e18c20c31bb1a03040085c8d62e16d05dc4e9dad64953c8a2eed8b6c12f92b1575eeaa6dcf7be9473dd5b24b37b6dffbb4e7c99ed1bd3cb11634be19b3e6e207bed7505c7ca111ccf47cb323bf1f8851eb6360e8034cbff8dd149993c959de89f8f77f38e7e98b8e3076323aa719328e2b408db5ec0d03936efd57422ba04f925cdc7b4c1af7590e40ab00200009501fe044d3c5c33010400b488c3e5f83f4d561f317817538d9d0397981e9aef1321ca68ebfae1cf8b7d388e19f4b5a24a82e2fbbf1c6c26557a6c5845307a03d815756f564ac7325b02bc83e87d5480a8fae848f07cb891f2d51ce7df83dcafdc12324517c86d472cc0ee10d47a68fd1d9ae49a6c19bbd36d82af597a0d88cc9c49de9df4e696fc1f0b5d0011010001fe030302e9030f3c783e14856063f16938530e148bc57a7aa3f3e4f90df9dceccdc779bc0835e1ad3d006e4a8d7b36d08b8e0de5a0d947254ecfbd22037e6572b426bcfdc517796b224b0036ff90bc574b5509bede85512f2eefb520fb4b02aa523ba739bff424a6fe81c5041f253f8d757e69a503d3563a104d0d49e9e890b9d0c26f96b55b743883b472caa7050c4acfd4a21f875bdf1258d88bd61224d303dc9df77f743137d51e6d5246b88c406780528fd9a3e15bab5452e5b93970d9dcc79f48b38651b9f15bfbcf6da452837e9cc70683d1bdca94507870f743e4ad902005812488dd342f836e72869afd00ce1850eea4cfa53ce10e3608e13d3c149394ee3cbd0e23d018fcbcb6e2ec5a1a22972d1d462ca05355d0d290dd2751e550d5efb38c6c89686344df64852bf4ff86638708f644e8ec6bd4af9b50d8541cb91891a431326ab2e332faa7ae86cfb6e0540aa63160c1e5cdd5a4add518b303fff0a20117c6bc77f7cfbaf36b04c865c6c2b42754657374204b6579203220285253412c20656e637279707465642070726976617465206b65792988b804130102002205024d3c5c33021b03060b090807030206150802090a0b0416020301021e01021780000a0910d4984f961e35246b98940400908a73b6a6169f700434f076c6c79015a49bee37130eaf23aaa3cfa9ce60bfe4acaa7bc95f1146ada5867e0079babb38804891f4f0b8ebca57a86b249dee786161a755b7a342e68ccf3f78ed6440a93a6626beb9a37aa66afcd4f888790cb4bb46d94a4ae3eb3d7d3e6b00f6bfec940303e89ec5b32a1eaaacce66497d539328b00200009d01fe044d3c5c33010400a4e913f9442abcc7f1804ccab27d2f787ffa592077ca935a8bb23165bd8d57576acac647cc596b2c3f814518cc8c82953c7a4478f32e0cf645630a5ba38d9618ef2bc3add69d459ae3dece5cab778938d988239f8c5ae437807075e06c828019959c644ff05ef6a5a1dab72227c98e3a040b0cf219026640698d7a13d8538a570011010001fe030302e9030f3c783e148560f936097339ae381d63116efcf802ff8b1c9360767db5219cc987375702a4123fd8657d3e22700f23f95020d1b261eda5257e9a72f9a918e8ef22dd5b3323ae03bbc1923dd224db988cadc16acc04b120a9f8b7e84da9716c53e0334d7b66586ddb9014df604b41be1e960dcfcbc96f4ed150a1a0dd070b9eb14276b9b6be413a769a75b519a53d3ecc0c220e85cd91ca354d57e7344517e64b43b6e29823cbd87eae26e2b2e78e6dedfbb76e3e9f77bcb844f9a8932eb3db2c3f9e44316e6f5d60e9e2a56e46b72abe6b06dc9a31cc63f10023d1f5e12d2a3ee93b675c96f504af0001220991c88db759e231b3320dcedf814dcf723fd9857e3d72d66a0f2af26950b915abdf56c1596f46a325bf17ad4810d3535fb02a259b247ac3dbd4cc3ecf9c51b6c07cebb009c1506fba0a89321ec8683e3fd009a6e551d50243e2d5092fefb3321083a4bad91320dc624bd6b5dddf93553e3d53924c05bfebec1fb4bd47e89a1a889f04180102000905024d3c5c33021b0c000a0910d4984f961e35246b26c703ff7ee29ef53bc1ae1ead533c408fa136db508434e233d6e62be621e031e5940bbd4c08142aed0f82217e7c3e1ec8de574bc06ccf3c36633be41ad78a9eacd209f861cae7b064100758545cc9dd83db71806dc1cfd5fb9ae5c7474bba0c19c44034ae61bae5eca379383339dece94ff56ff7aa44a582f3e5c38f45763af577c0934b0020000" - -const dsaElGamalTestKeysHex = "9501e1044dfcb16a110400aa3e5c1a1f43dd28c2ffae8abf5cfce555ee874134d8ba0a0f7b868ce2214beddc74e5e1e21ded354a95d18acdaf69e5e342371a71fbb9093162e0c5f3427de413a7f2c157d83f5cd2f9d791256dc4f6f0e13f13c3302af27f2384075ab3021dff7a050e14854bbde0a1094174855fc02f0bae8e00a340d94a1f22b32e48485700a0cec672ac21258fb95f61de2ce1af74b2c4fa3e6703ff698edc9be22c02ae4d916e4fa223f819d46582c0516235848a77b577ea49018dcd5e9e15cff9dbb4663a1ae6dd7580fa40946d40c05f72814b0f88481207e6c0832c3bded4853ebba0a7e3bd8e8c66df33d5a537cd4acf946d1080e7a3dcea679cb2b11a72a33a2b6a9dc85f466ad2ddf4c3db6283fa645343286971e3dd700703fc0c4e290d45767f370831a90187e74e9972aae5bff488eeff7d620af0362bfb95c1a6c3413ab5d15a2e4139e5d07a54d72583914661ed6a87cce810be28a0aa8879a2dd39e52fb6fe800f4f181ac7e328f740cde3d09a05cecf9483e4cca4253e60d4429ffd679d9996a520012aad119878c941e3cf151459873bdfc2a9563472fe0303027a728f9feb3b864260a1babe83925ce794710cfd642ee4ae0e5b9d74cee49e9c67b6cd0ea5dfbb582132195a121356a1513e1bca73e5b80c58c7ccb4164453412f456c47616d616c2054657374204b65792031886204131102002205024dfcb16a021b03060b090807030206150802090a0b0416020301021e01021780000a091033af447ccd759b09fadd00a0b8fd6f5a790bad7e9f2dbb7632046dc4493588db009c087c6a9ba9f7f49fab221587a74788c00db4889ab00200009d0157044dfcb16a1004008dec3f9291205255ccff8c532318133a6840739dd68b03ba942676f9038612071447bf07d00d559c5c0875724ea16a4c774f80d8338b55fca691a0522e530e604215b467bbc9ccfd483a1da99d7bc2648b4318fdbd27766fc8bfad3fddb37c62b8ae7ccfe9577e9b8d1e77c1d417ed2c2ef02d52f4da11600d85d3229607943700030503ff506c94c87c8cab778e963b76cf63770f0a79bf48fb49d3b4e52234620fc9f7657f9f8d56c96a2b7c7826ae6b57ebb2221a3fe154b03b6637cea7e6d98e3e45d87cf8dc432f723d3d71f89c5192ac8d7290684d2c25ce55846a80c9a7823f6acd9bb29fa6cd71f20bc90eccfca20451d0c976e460e672b000df49466408d527affe0303027a728f9feb3b864260abd761730327bca2aaa4ea0525c175e92bf240682a0e83b226f97ecb2e935b62c9a133858ce31b271fa8eb41f6a1b3cd72a63025ce1a75ee4180dcc284884904181102000905024dfcb16a021b0c000a091033af447ccd759b09dd0b009e3c3e7296092c81bee5a19929462caaf2fff3ae26009e218c437a2340e7ea628149af1ec98ec091a43992b00200009501e1044dfcb1be1104009f61faa61aa43df75d128cbe53de528c4aec49ce9360c992e70c77072ad5623de0a3a6212771b66b39a30dad6781799e92608316900518ec01184a85d872365b7d2ba4bacfb5882ea3c2473d3750dc6178cc1cf82147fb58caa28b28e9f12f6d1efcb0534abed644156c91cca4ab78834268495160b2400bc422beb37d237c2300a0cac94911b6d493bda1e1fbc6feeca7cb7421d34b03fe22cec6ccb39675bb7b94a335c2b7be888fd3906a1125f33301d8aa6ec6ee6878f46f73961c8d57a3e9544d8ef2a2cbfd4d52da665b1266928cfe4cb347a58c412815f3b2d2369dec04b41ac9a71cc9547426d5ab941cccf3b18575637ccfb42df1a802df3cfe0a999f9e7109331170e3a221991bf868543960f8c816c28097e503fe319db10fb98049f3a57d7c80c420da66d56f3644371631fad3f0ff4040a19a4fedc2d07727a1b27576f75a4d28c47d8246f27071e12d7a8de62aad216ddbae6aa02efd6b8a3e2818cda48526549791ab277e447b3a36c57cefe9b592f5eab73959743fcc8e83cbefec03a329b55018b53eec196765ae40ef9e20521a603c551efe0303020950d53a146bf9c66034d00c23130cce95576a2ff78016ca471276e8227fb30b1ffbd92e61804fb0c3eff9e30b1a826ee8f3e4730b4d86273ca977b4164453412f456c47616d616c2054657374204b65792032886204131102002205024dfcb1be021b03060b090807030206150802090a0b0416020301021e01021780000a0910a86bf526325b21b22bd9009e34511620415c974750a20df5cb56b182f3b48e6600a0a9466cb1a1305a84953445f77d461593f1d42bc1b00200009d0157044dfcb1be1004009565a951da1ee87119d600c077198f1c1bceb0f7aa54552489298e41ff788fa8f0d43a69871f0f6f77ebdfb14a4260cf9fbeb65d5844b4272a1904dd95136d06c3da745dc46327dd44a0f16f60135914368c8039a34033862261806bb2c5ce1152e2840254697872c85441ccb7321431d75a747a4bfb1d2c66362b51ce76311700030503fc0ea76601c196768070b7365a200e6ddb09307f262d5f39eec467b5f5784e22abdf1aa49226f59ab37cb49969d8f5230ea65caf56015abda62604544ed526c5c522bf92bed178a078789f6c807b6d34885688024a5bed9e9f8c58d11d4b82487b44c5f470c5606806a0443b79cadb45e0f897a561a53f724e5349b9267c75ca17fe0303020950d53a146bf9c660bc5f4ce8f072465e2d2466434320c1e712272fafc20e342fe7608101580fa1a1a367e60486a7cd1246b7ef5586cf5e10b32762b710a30144f12dd17dd4884904181102000905024dfcb1be021b0c000a0910a86bf526325b21b2904c00a0b2b66b4b39ccffda1d10f3ea8d58f827e30a8b8e009f4255b2d8112a184e40cde43a34e8655ca7809370b0020000" - -const signedMessageHex = "a3019bc0cbccc0c4b8d8b74ee2108fe16ec6d3ca490cbe362d3f8333d3f352531472538b8b13d353b97232f352158c20943157c71c16064626063656269052062e4e01987e9b6fccff4b7df3a34c534b23e679cbec3bc0f8f6e64dfb4b55fe3f8efa9ce110ddb5cd79faf1d753c51aecfa669f7e7aa043436596cccc3359cb7dd6bbe9ecaa69e5989d9e57209571edc0b2fa7f57b9b79a64ee6e99ce1371395fee92fec2796f7b15a77c386ff668ee27f6d38f0baa6c438b561657377bf6acff3c5947befd7bf4c196252f1d6e5c524d0300" - -const signedTextMessageHex = "a3019bc0cbccc8c4b8d8b74ee2108fe16ec6d36a250cbece0c178233d3f352531472538b8b13d35379b97232f352158ca0b4312f57c71c1646462606365626906a062e4e019811591798ff99bf8afee860b0d8a8c2a85c3387e3bcf0bb3b17987f2bbcfab2aa526d930cbfd3d98757184df3995c9f3e7790e36e3e9779f06089d4c64e9e47dd6202cb6e9bc73c5d11bb59fbaf89d22d8dc7cf199ddf17af96e77c5f65f9bbed56f427bd8db7af37f6c9984bf9385efaf5f184f986fb3e6adb0ecfe35bbf92d16a7aa2a344fb0bc52fb7624f0200" - -const signedEncryptedMessageHex = "c18c032a67d68660df41c70103ff5a84c9a72f80e74ef0384c2d6a9ebfe2b09e06a8f298394f6d2abf174e40934ab0ec01fb2d0ddf21211c6fe13eb238563663b017a6b44edca552eb4736c4b7dc6ed907dd9e12a21b51b64b46f902f76fb7aaf805c1db8070574d8d0431a23e324a750f77fb72340a17a42300ee4ca8207301e95a731da229a63ab9c6b44541fbd2c11d016d810b3b3b2b38f15b5b40f0a4910332829c2062f1f7cc61f5b03677d73c54cafa1004ced41f315d46444946faae571d6f426e6dbd45d9780eb466df042005298adabf7ce0ef766dfeb94cd449c7ed0046c880339599c4711af073ce649b1e237c40b50a5536283e03bdbb7afad78bd08707715c67fb43295f905b4c479178809d429a8e167a9a8c6dfd8ab20b4edebdc38d6dec879a3202e1b752690d9bb5b0c07c5a227c79cc200e713a99251a4219d62ad5556900cf69bd384b6c8e726c7be267471d0d23af956da165af4af757246c2ebcc302b39e8ef2fccb4971b234fcda22d759ddb20e27269ee7f7fe67898a9de721bfa02ab0becaa046d00ea16cb1afc4e2eab40d0ac17121c565686e5cbd0cbdfbd9d6db5c70278b9c9db5a83176d04f61fbfbc4471d721340ede2746e5c312ded4f26787985af92b64fae3f253dbdde97f6a5e1996fd4d865599e32ff76325d3e9abe93184c02988ee89a4504356a4ef3b9b7a57cbb9637ca90af34a7676b9ef559325c3cca4e29d69fec1887f5440bb101361d744ad292a8547f22b4f22b419a42aa836169b89190f46d9560824cb2ac6e8771de8223216a5e647e132ab9eebcba89569ab339cb1c3d70fe806b31f4f4c600b4103b8d7583ebff16e43dcda551e6530f975122eb8b29" - -const verifiedSignatureEncryptedMessageHex = "c2b304000108000605026048f6d600210910a34d7e18c20c31bb1621045fb74b1d03b1e3cb31bc2f8aa34d7e18c20c31bb9a3b0400a32ddac1af259c1b0abab0041327ea04970944401978fb647dd1cf9aba4f164e43f0d8a9389501886474bdd4a6e77f6aea945c07dfbf87743835b44cc2c39a1f9aeecfa83135abc92e18e50396f2e6a06c44e0188b0081effbfb4160d28f118d4ff73dd199a102e47cffd8c7ff2bacd83ae72b5820c021a486766dd587b5da61" - -const unverifiedSignatureEncryptedMessageHex = "c2b304000108000605026048f6d600210910a34d7e18c20c31bb1621045fb74b1d03b1e3cb31bc2f8aa34d7e18c20c31bb9a3b0400a32ddac1af259c1b0abab0041327ea04970944401978fb647dd1cf9aba4f164e43f0d8a9389501886474bdd4a6e77f6aea945c07dfbf87743835b44cc2c39a1f9aeecfa83135abc92e18e50396f2e6a06c44e0188b0081effbfb4160d28f118d4ff73dd199a102e47cffd8c7ff2bacd83ae72b5820c021a486766dd587b5da61" - -const signedEncryptedMessage2Hex = "85010e03cf6a7abcd43e36731003fb057f5495b79db367e277cdbe4ab90d924ddee0c0381494112ff8c1238fb0184af35d1731573b01bc4c55ecacd2aafbe2003d36310487d1ecc9ac994f3fada7f9f7f5c3a64248ab7782906c82c6ff1303b69a84d9a9529c31ecafbcdb9ba87e05439897d87e8a2a3dec55e14df19bba7f7bd316291c002ae2efd24f83f9e3441203fc081c0c23dc3092a454ca8a082b27f631abf73aca341686982e8fbda7e0e7d863941d68f3de4a755c2964407f4b5e0477b3196b8c93d551dd23c8beef7d0f03fbb1b6066f78907faf4bf1677d8fcec72651124080e0b7feae6b476e72ab207d38d90b958759fdedfc3c6c35717c9dbfc979b3cfbbff0a76d24a5e57056bb88acbd2a901ef64bc6e4db02adc05b6250ff378de81dca18c1910ab257dff1b9771b85bb9bbe0a69f5989e6d1710a35e6dfcceb7d8fb5ccea8db3932b3d9ff3fe0d327597c68b3622aec8e3716c83a6c93f497543b459b58ba504ed6bcaa747d37d2ca746fe49ae0a6ce4a8b694234e941b5159ff8bd34b9023da2814076163b86f40eed7c9472f81b551452d5ab87004a373c0172ec87ea6ce42ccfa7dbdad66b745496c4873d8019e8c28d6b3" - -const signatureEncryptedMessage2Hex = "c24604001102000605024dfd0166000a091033af447ccd759b09bae600a096ec5e63ecf0a403085e10f75cc3bab327663282009f51fad9df457ed8d2b70d8a73c76e0443eac0f377" - -const symmetricallyEncryptedCompressedHex = "c32e040903085a357c1a7b5614ed00cc0d1d92f428162058b3f558a0fb0980d221ebac6c97d5eda4e0fe32f6e706e94dd263012d6ca1ef8c4bbd324098225e603a10c85ebf09cbf7b5aeeb5ce46381a52edc51038b76a8454483be74e6dcd1e50d5689a8ae7eceaeefed98a0023d49b22eb1f65c2aa1ef1783bb5e1995713b0457102ec3c3075fe871267ffa4b686ad5d52000d857" - -const dsaTestKeyHex = "9901a2044d6c49de110400cb5ce438cf9250907ac2ba5bf6547931270b89f7c4b53d9d09f4d0213a5ef2ec1f26806d3d259960f872a4a102ef1581ea3f6d6882d15134f21ef6a84de933cc34c47cc9106efe3bd84c6aec12e78523661e29bc1a61f0aab17fa58a627fd5fd33f5149153fbe8cd70edf3d963bc287ef875270ff14b5bfdd1bca4483793923b00a0fe46d76cb6e4cbdc568435cd5480af3266d610d303fe33ae8273f30a96d4d34f42fa28ce1112d425b2e3bf7ea553d526e2db6b9255e9dc7419045ce817214d1a0056dbc8d5289956a4b1b69f20f1105124096e6a438f41f2e2495923b0f34b70642607d45559595c7fe94d7fa85fc41bf7d68c1fd509ebeaa5f315f6059a446b9369c277597e4f474a9591535354c7e7f4fd98a08aa60400b130c24ff20bdfbf683313f5daebf1c9b34b3bdadfc77f2ddd72ee1fb17e56c473664bc21d66467655dd74b9005e3a2bacce446f1920cd7017231ae447b67036c9b431b8179deacd5120262d894c26bc015bffe3d827ba7087ad9b700d2ca1f6d16cc1786581e5dd065f293c31209300f9b0afcc3f7c08dd26d0a22d87580b4db41054657374204b65792033202844534129886204131102002205024d6c49de021b03060b090807030206150802090a0b0416020301021e01021780000a0910338934250ccc03607e0400a0bdb9193e8a6b96fc2dfc108ae848914b504481f100a09c4dc148cb693293a67af24dd40d2b13a9e36794" - -const dsaTestKeyPrivateHex = "9501bb044d6c49de110400cb5ce438cf9250907ac2ba5bf6547931270b89f7c4b53d9d09f4d0213a5ef2ec1f26806d3d259960f872a4a102ef1581ea3f6d6882d15134f21ef6a84de933cc34c47cc9106efe3bd84c6aec12e78523661e29bc1a61f0aab17fa58a627fd5fd33f5149153fbe8cd70edf3d963bc287ef875270ff14b5bfdd1bca4483793923b00a0fe46d76cb6e4cbdc568435cd5480af3266d610d303fe33ae8273f30a96d4d34f42fa28ce1112d425b2e3bf7ea553d526e2db6b9255e9dc7419045ce817214d1a0056dbc8d5289956a4b1b69f20f1105124096e6a438f41f2e2495923b0f34b70642607d45559595c7fe94d7fa85fc41bf7d68c1fd509ebeaa5f315f6059a446b9369c277597e4f474a9591535354c7e7f4fd98a08aa60400b130c24ff20bdfbf683313f5daebf1c9b34b3bdadfc77f2ddd72ee1fb17e56c473664bc21d66467655dd74b9005e3a2bacce446f1920cd7017231ae447b67036c9b431b8179deacd5120262d894c26bc015bffe3d827ba7087ad9b700d2ca1f6d16cc1786581e5dd065f293c31209300f9b0afcc3f7c08dd26d0a22d87580b4d00009f592e0619d823953577d4503061706843317e4fee083db41054657374204b65792033202844534129886204131102002205024d6c49de021b03060b090807030206150802090a0b0416020301021e01021780000a0910338934250ccc03607e0400a0bdb9193e8a6b96fc2dfc108ae848914b504481f100a09c4dc148cb693293a67af24dd40d2b13a9e36794" - -const p256TestKeyHex = "98520456e5b83813082a8648ce3d030107020304a2072cd6d21321266c758cc5b83fab0510f751cb8d91897cddb7047d8d6f185546e2107111b0a95cb8ef063c33245502af7a65f004d5919d93ee74eb71a66253b424502d3235362054657374204b6579203c696e76616c6964406578616d706c652e636f6d3e8879041313080021050256e5b838021b03050b09080702061508090a0b020416020301021e01021780000a0910d44a2c495918513e54e50100dfa64f97d9b47766fc1943c6314ba3f2b2a103d71ad286dc5b1efb96a345b0c80100dbc8150b54241f559da6ef4baacea6d31902b4f4b1bdc09b34bf0502334b7754b8560456e5b83812082a8648ce3d030107020304bfe3cea9cee13486f8d518aa487fecab451f25467d2bf08e58f63e5fa525d5482133e6a79299c274b068ef0be448152ad65cf11cf764348588ca4f6a0bcf22b6030108078861041813080009050256e5b838021b0c000a0910d44a2c495918513e4a4800ff49d589fa64024ad30be363a032e3a0e0e6f5db56ba4c73db850518bf0121b8f20100fd78e065f4c70ea5be9df319ea67e493b936fc78da834a71828043d3154af56e" - -const p256TestKeyPrivateHex = "94a50456e5b83813082a8648ce3d030107020304a2072cd6d21321266c758cc5b83fab0510f751cb8d91897cddb7047d8d6f185546e2107111b0a95cb8ef063c33245502af7a65f004d5919d93ee74eb71a66253fe070302f0c2bfb0b6c30f87ee1599472b8636477eab23ced13b271886a4b50ed34c9d8436af5af5b8f88921f0efba6ef8c37c459bbb88bc1c6a13bbd25c4ce9b1e97679569ee77645d469bf4b43de637f5561b424502d3235362054657374204b6579203c696e76616c6964406578616d706c652e636f6d3e8879041313080021050256e5b838021b03050b09080702061508090a0b020416020301021e01021780000a0910d44a2c495918513e54e50100dfa64f97d9b47766fc1943c6314ba3f2b2a103d71ad286dc5b1efb96a345b0c80100dbc8150b54241f559da6ef4baacea6d31902b4f4b1bdc09b34bf0502334b77549ca90456e5b83812082a8648ce3d030107020304bfe3cea9cee13486f8d518aa487fecab451f25467d2bf08e58f63e5fa525d5482133e6a79299c274b068ef0be448152ad65cf11cf764348588ca4f6a0bcf22b603010807fe0703027510012471a603cfee2968dce19f732721ddf03e966fd133b4e3c7a685b788705cbc46fb026dc94724b830c9edbaecd2fb2c662f23169516cacd1fe423f0475c364ecc10abcabcfd4bbbda1a36a1bd8861041813080009050256e5b838021b0c000a0910d44a2c495918513e4a4800ff49d589fa64024ad30be363a032e3a0e0e6f5db56ba4c73db850518bf0121b8f20100fd78e065f4c70ea5be9df319ea67e493b936fc78da834a71828043d3154af56e" - -const armoredPrivateKeyBlock = `-----BEGIN PGP PRIVATE KEY BLOCK----- -Version: GnuPG v1.4.10 (GNU/Linux) - -lQHYBE2rFNoBBADFwqWQIW/DSqcB4yCQqnAFTJ27qS5AnB46ccAdw3u4Greeu3Bp -idpoHdjULy7zSKlwR1EA873dO/k/e11Ml3dlAFUinWeejWaK2ugFP6JjiieSsrKn -vWNicdCS4HTWn0X4sjl0ZiAygw6GNhqEQ3cpLeL0g8E9hnYzJKQ0LWJa0QARAQAB -AAP/TB81EIo2VYNmTq0pK1ZXwUpxCrvAAIG3hwKjEzHcbQznsjNvPUihZ+NZQ6+X -0HCfPAdPkGDCLCb6NavcSW+iNnLTrdDnSI6+3BbIONqWWdRDYJhqZCkqmG6zqSfL -IdkJgCw94taUg5BWP/AAeQrhzjChvpMQTVKQL5mnuZbUCeMCAN5qrYMP2S9iKdnk -VANIFj7656ARKt/nf4CBzxcpHTyB8+d2CtPDKCmlJP6vL8t58Jmih+kHJMvC0dzn -gr5f5+sCAOOe5gt9e0am7AvQWhdbHVfJU0TQJx+m2OiCJAqGTB1nvtBLHdJnfdC9 -TnXXQ6ZXibqLyBies/xeY2sCKL5qtTMCAKnX9+9d/5yQxRyrQUHt1NYhaXZnJbHx -q4ytu0eWz+5i68IYUSK69jJ1NWPM0T6SkqpB3KCAIv68VFm9PxqG1KmhSrQIVGVz -dCBLZXmIuAQTAQIAIgUCTasU2gIbAwYLCQgHAwIGFQgCCQoLBBYCAwECHgECF4AA -CgkQO9o98PRieSoLhgQAkLEZex02Qt7vGhZzMwuN0R22w3VwyYyjBx+fM3JFETy1 -ut4xcLJoJfIaF5ZS38UplgakHG0FQ+b49i8dMij0aZmDqGxrew1m4kBfjXw9B/v+ -eIqpODryb6cOSwyQFH0lQkXC040pjq9YqDsO5w0WYNXYKDnzRV0p4H1pweo2VDid -AdgETasU2gEEAN46UPeWRqKHvA99arOxee38fBt2CI08iiWyI8T3J6ivtFGixSqV -bRcPxYO/qLpVe5l84Nb3X71GfVXlc9hyv7CD6tcowL59hg1E/DC5ydI8K8iEpUmK -/UnHdIY5h8/kqgGxkY/T/hgp5fRQgW1ZoZxLajVlMRZ8W4tFtT0DeA+JABEBAAEA -A/0bE1jaaZKj6ndqcw86jd+QtD1SF+Cf21CWRNeLKnUds4FRRvclzTyUMuWPkUeX -TaNNsUOFqBsf6QQ2oHUBBK4VCHffHCW4ZEX2cd6umz7mpHW6XzN4DECEzOVksXtc -lUC1j4UB91DC/RNQqwX1IV2QLSwssVotPMPqhOi0ZLNY7wIA3n7DWKInxYZZ4K+6 -rQ+POsz6brEoRHwr8x6XlHenq1Oki855pSa1yXIARoTrSJkBtn5oI+f8AzrnN0BN -oyeQAwIA/7E++3HDi5aweWrViiul9cd3rcsS0dEnksPhvS0ozCJiHsq/6GFmy7J8 -QSHZPteedBnZyNp5jR+H7cIfVN3KgwH/Skq4PsuPhDq5TKK6i8Pc1WW8MA6DXTdU -nLkX7RGmMwjC0DBf7KWAlPjFaONAX3a8ndnz//fy1q7u2l9AZwrj1qa1iJ8EGAEC -AAkFAk2rFNoCGwwACgkQO9o98PRieSo2/QP/WTzr4ioINVsvN1akKuekmEMI3LAp -BfHwatufxxP1U+3Si/6YIk7kuPB9Hs+pRqCXzbvPRrI8NHZBmc8qIGthishdCYad -AHcVnXjtxrULkQFGbGvhKURLvS9WnzD/m1K2zzwxzkPTzT9/Yf06O6Mal5AdugPL -VrM0m72/jnpKo04= -=zNCn ------END PGP PRIVATE KEY BLOCK-----` - -const e2ePublicKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- -Charset: UTF-8 - -xv8AAABSBAAAAAATCCqGSM49AwEHAgME1LRoXSpOxtHXDUdmuvzchyg6005qIBJ4 -sfaSxX7QgH9RV2ONUhC+WiayCNADq+UMzuR/vunSr4aQffXvuGnR383/AAAAFDxk -Z2lsQHlhaG9vLWluYy5jb20+wv8AAACGBBATCAA4/wAAAAWCVGvAG/8AAAACiwn/ -AAAACZC2VkQCOjdvYf8AAAAFlQgJCgv/AAAAA5YBAv8AAAACngEAAE1BAP0X8veD -24IjmI5/C6ZAfVNXxgZZFhTAACFX75jUA3oD6AEAzoSwKf1aqH6oq62qhCN/pekX -+WAsVMBhNwzLpqtCRjLO/wAAAFYEAAAAABIIKoZIzj0DAQcCAwT50ain7vXiIRv8 -B1DO3x3cE/aattZ5sHNixJzRCXi2vQIA5QmOxZ6b5jjUekNbdHG3SZi1a2Ak5mfX -fRxC/5VGAwEIB8L/AAAAZQQYEwgAGP8AAAAFglRrwBz/AAAACZC2VkQCOjdvYQAA -FJAA9isX3xtGyMLYwp2F3nXm7QEdY5bq5VUcD/RJlj792VwA/1wH0pCzVLl4Q9F9 -ex7En5r7rHR5xwX82Msc+Rq9dSyO -=7MrZ ------END PGP PUBLIC KEY BLOCK-----` - -const dsaKeyWithSHA512 = `9901a2044f04b07f110400db244efecc7316553ee08d179972aab87bb1214de7692593fcf5b6feb1c80fba268722dd464748539b85b81d574cd2d7ad0ca2444de4d849b8756bad7768c486c83a824f9bba4af773d11742bdfb4ac3b89ef8cc9452d4aad31a37e4b630d33927bff68e879284a1672659b8b298222fc68f370f3e24dccacc4a862442b9438b00a0ea444a24088dc23e26df7daf8f43cba3bffc4fe703fe3d6cd7fdca199d54ed8ae501c30e3ec7871ea9cdd4cf63cfe6fc82281d70a5b8bb493f922cd99fba5f088935596af087c8d818d5ec4d0b9afa7f070b3d7c1dd32a84fca08d8280b4890c8da1dde334de8e3cad8450eed2a4a4fcc2db7b8e5528b869a74a7f0189e11ef097ef1253582348de072bb07a9fa8ab838e993cef0ee203ff49298723e2d1f549b00559f886cd417a41692ce58d0ac1307dc71d85a8af21b0cf6eaa14baf2922d3a70389bedf17cc514ba0febbd107675a372fe84b90162a9e88b14d4b1c6be855b96b33fb198c46f058568817780435b6936167ebb3724b680f32bf27382ada2e37a879b3d9de2abe0c3f399350afd1ad438883f4791e2e3b4184453412068617368207472756e636174696f6e207465737488620413110a002205024f04b07f021b03060b090807030206150802090a0b0416020301021e01021780000a0910ef20e0cefca131581318009e2bf3bf047a44d75a9bacd00161ee04d435522397009a03a60d51bd8a568c6c021c8d7cf1be8d990d6417b0020003` - -const unknownHashFunctionHex = `8a00000040040001990006050253863c24000a09103b4fe6acc0b21f32ffff01010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101` - -const rsaSignatureBadMPIlength = `8a00000040040001030006050253863c24000a09103b4fe6acc0b21f32ffff0101010101010101010101010101010101010101010101010101010101010101010101010101` - -const missingHashFunctionHex = `8a00000040040001030006050253863c24000a09103b4fe6acc0b21f32ffff0101010101010101010101010101010101010101010101010101010101010101010101` - -const campbellQuine = `a0b001000300fcffa0b001000d00f2ff000300fcffa0b001000d00f2ff8270a01c00000500faff8270a01c00000500faff000500faff001400ebff8270a01c00000500faff000500faff001400ebff428821c400001400ebff428821c400001400ebff428821c400001400ebff428821c400001400ebff428821c400000000ffff000000ffff000b00f4ff428821c400000000ffff000000ffff000b00f4ff0233214c40000100feff000233214c40000100feff0000` - -const keyV4forVerifyingSignedMessageV3 = `-----BEGIN PGP PUBLIC KEY BLOCK----- -Comment: GPGTools - https://gpgtools.org - -mI0EVfxoFQEEAMBIqmbDfYygcvP6Phr1wr1XI41IF7Qixqybs/foBF8qqblD9gIY -BKpXjnBOtbkcVOJ0nljd3/sQIfH4E0vQwK5/4YRQSI59eKOqd6Fx+fWQOLG+uu6z -tewpeCj9LLHvibx/Sc7VWRnrznia6ftrXxJ/wHMezSab3tnGC0YPVdGNABEBAAG0 -JEdvY3J5cHRvIFRlc3QgS2V5IDx0aGVtYXhAZ21haWwuY29tPoi5BBMBCgAjBQJV -/GgVAhsDBwsJCAcDAgEGFQgCCQoLBBYCAwECHgECF4AACgkQeXnQmhdGW9PFVAP+ -K7TU0qX5ArvIONIxh/WAweyOk884c5cE8f+3NOPOOCRGyVy0FId5A7MmD5GOQh4H -JseOZVEVCqlmngEvtHZb3U1VYtVGE5WZ+6rQhGsMcWP5qaT4soYwMBlSYxgYwQcx -YhN9qOr292f9j2Y//TTIJmZT4Oa+lMxhWdqTfX+qMgG4jQRV/GgVAQQArhFSiij1 -b+hT3dnapbEU+23Z1yTu1DfF6zsxQ4XQWEV3eR8v+8mEDDNcz8oyyF56k6UQ3rXi -UMTIwRDg4V6SbZmaFbZYCOwp/EmXJ3rfhm7z7yzXj2OFN22luuqbyVhuL7LRdB0M -pxgmjXb4tTvfgKd26x34S+QqUJ7W6uprY4sAEQEAAYifBBgBCgAJBQJV/GgVAhsM -AAoJEHl50JoXRlvT7y8D/02ckx4OMkKBZo7viyrBw0MLG92i+DC2bs35PooHR6zz -786mitjOp5z2QWNLBvxC70S0qVfCIz8jKupO1J6rq6Z8CcbLF3qjm6h1omUBf8Nd -EfXKD2/2HV6zMKVknnKzIEzauh+eCKS2CeJUSSSryap/QLVAjRnckaES/OsEWhNB -=RZia ------END PGP PUBLIC KEY BLOCK----- -` - -const signedMessageV3 = `-----BEGIN PGP MESSAGE----- -Comment: GPGTools - https://gpgtools.org - -owGbwMvMwMVYWXlhlrhb9GXG03JJDKF/MtxDMjKLFYAoUaEktbhEITe1uDgxPVWP -q5NhKjMrWAVcC9evD8z/bF/uWNjqtk/X3y5/38XGRQHm/57rrDRYuGnTw597Xqka -uM3137/hH3Os+Jf2dc0fXOITKwJvXJvecPVs0ta+Vg7ZO1MLn8w58Xx+6L58mbka -DGHyU9yTueZE8D+QF/Tz28Y78dqtF56R1VPn9Xw4uJqrWYdd7b3vIZ1V6R4Nh05d -iT57d/OhWwA= -=hG7R ------END PGP MESSAGE----- -` - -// https://mailarchive.ietf.org/arch/msg/openpgp/9SheW_LENE0Kxf7haNllovPyAdY/ -const v5PrivKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- - -lGEFXJH05BYAAAAtCSsGAQQB2kcPAQEHQFhZlVcVVtwf+21xNQPX+ecMJJBL0MPd -fj75iux+my8QAAAAAAAiAQCHZ1SnSUmWqxEsoI6facIVZQu6mph3cBFzzTvcm5lA -Ng5ctBhlbW1hLmdvbGRtYW5AZXhhbXBsZS5uZXSIlgUTFggASCIhBRk0e8mHJGQC -X5nfPsLgAA7ZiEiS4fez6kyUAJFZVptUBQJckfTkAhsDBQsJCAcCAyICAQYVCgkI -CwIEFgIDAQIeBwIXgAAA9cAA/jiR3yMsZMeEQ40u6uzEoXa6UXeV/S3wwJAXRJy9 -M8s0AP9vuL/7AyTfFXwwzSjDnYmzS0qAhbLDQ643N+MXGBJ2BZxmBVyR9OQSAAAA -MgorBgEEAZdVAQUBAQdA+nysrzml2UCweAqtpDuncSPlvrcBWKU0yfU0YvYWWAoD -AQgHAAAAAAAiAP9OdAPppjU1WwpqjIItkxr+VPQRT8Zm/Riw7U3F6v3OiBFHiHoF -GBYIACwiIQUZNHvJhyRkAl+Z3z7C4AAO2YhIkuH3s+pMlACRWVabVAUCXJH05AIb -DAAAOSQBAP4BOOIR/sGLNMOfeb5fPs/02QMieoiSjIBnijhob2U5AQC+RtOHCHx7 -TcIYl5/Uyoi+FOvPLcNw4hOv2nwUzSSVAw== -=IiS2 ------END PGP PRIVATE KEY BLOCK-----` - -// Generated with the above private key -const v5PrivKeyMsg = `-----BEGIN PGP MESSAGE----- -Version: OpenPGP.js v4.10.7 -Comment: https://openpgpjs.org - -xA0DAQoWGTR7yYckZAIByxF1B21zZy50eHRfbIGSdGVzdMJ3BQEWCgAGBQJf -bIGSACMiIQUZNHvJhyRkAl+Z3z7C4AAO2YhIkuH3s+pMlACRWVabVDQvAP9G -y29VPonFXqi2zKkpZrvyvZxg+n5e8Nt9wNbuxeCd3QD/TtO2s+JvjrE4Siwv -UQdl5MlBka1QSNbMq2Bz7XwNPg4= -=6lbM ------END PGP MESSAGE-----` diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go deleted file mode 100644 index 14f58548bd2..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go +++ /dev/null @@ -1,367 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package s2k implements the various OpenPGP string-to-key transforms as -// specified in RFC 4800 section 3.7.1. -package s2k // import "github.com/ProtonMail/go-crypto/openpgp/s2k" - -import ( - "crypto" - "hash" - "io" - "strconv" - - "github.com/ProtonMail/go-crypto/openpgp/errors" - "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" -) - -// Config collects configuration parameters for s2k key-stretching -// transformations. A nil *Config is valid and results in all default -// values. Currently, Config is used only by the Serialize function in -// this package. -type Config struct { - // S2KMode is the mode of s2k function. - // It can be 0 (simple), 1(salted), 3(iterated) - // 2(reserved) 100-110(private/experimental). - S2KMode uint8 - // Hash is the default hash function to be used. If - // nil, SHA256 is used. - Hash crypto.Hash - // S2KCount is only used for symmetric encryption. It - // determines the strength of the passphrase stretching when - // the said passphrase is hashed to produce a key. S2KCount - // should be between 65536 and 65011712, inclusive. If Config - // is nil or S2KCount is 0, the value 16777216 used. Not all - // values in the above range can be represented. S2KCount will - // be rounded up to the next representable value if it cannot - // be encoded exactly. See RFC 4880 Section 3.7.1.3. - S2KCount int -} - -// Params contains all the parameters of the s2k packet -type Params struct { - // mode is the mode of s2k function. - // It can be 0 (simple), 1(salted), 3(iterated) - // 2(reserved) 100-110(private/experimental). - mode uint8 - // hashId is the ID of the hash function used in any of the modes - hashId byte - // salt is a byte array to use as a salt in hashing process - salt []byte - // countByte is used to determine how many rounds of hashing are to - // be performed in s2k mode 3. See RFC 4880 Section 3.7.1.3. - countByte byte -} - -func (c *Config) hash() crypto.Hash { - if c == nil || uint(c.Hash) == 0 { - return crypto.SHA256 - } - - return c.Hash -} - -// EncodedCount get encoded count -func (c *Config) EncodedCount() uint8 { - if c == nil || c.S2KCount == 0 { - return 224 // The common case. Corresponding to 16777216 - } - - i := c.S2KCount - - switch { - case i < 65536: - i = 65536 - case i > 65011712: - i = 65011712 - } - - return encodeCount(i) -} - -// encodeCount converts an iterative "count" in the range 1024 to -// 65011712, inclusive, to an encoded count. The return value is the -// octet that is actually stored in the GPG file. encodeCount panics -// if i is not in the above range (encodedCount above takes care to -// pass i in the correct range). See RFC 4880 Section 3.7.7.1. -func encodeCount(i int) uint8 { - if i < 65536 || i > 65011712 { - panic("count arg i outside the required range") - } - - for encoded := 96; encoded < 256; encoded++ { - count := decodeCount(uint8(encoded)) - if count >= i { - return uint8(encoded) - } - } - - return 255 -} - -// decodeCount returns the s2k mode 3 iterative "count" corresponding to -// the encoded octet c. -func decodeCount(c uint8) int { - return (16 + int(c&15)) << (uint32(c>>4) + 6) -} - -// Simple writes to out the result of computing the Simple S2K function (RFC -// 4880, section 3.7.1.1) using the given hash and input passphrase. -func Simple(out []byte, h hash.Hash, in []byte) { - Salted(out, h, in, nil) -} - -var zero [1]byte - -// Salted writes to out the result of computing the Salted S2K function (RFC -// 4880, section 3.7.1.2) using the given hash, input passphrase and salt. -func Salted(out []byte, h hash.Hash, in []byte, salt []byte) { - done := 0 - var digest []byte - - for i := 0; done < len(out); i++ { - h.Reset() - for j := 0; j < i; j++ { - h.Write(zero[:]) - } - h.Write(salt) - h.Write(in) - digest = h.Sum(digest[:0]) - n := copy(out[done:], digest) - done += n - } -} - -// Iterated writes to out the result of computing the Iterated and Salted S2K -// function (RFC 4880, section 3.7.1.3) using the given hash, input passphrase, -// salt and iteration count. -func Iterated(out []byte, h hash.Hash, in []byte, salt []byte, count int) { - combined := make([]byte, len(in)+len(salt)) - copy(combined, salt) - copy(combined[len(salt):], in) - - if count < len(combined) { - count = len(combined) - } - - done := 0 - var digest []byte - for i := 0; done < len(out); i++ { - h.Reset() - for j := 0; j < i; j++ { - h.Write(zero[:]) - } - written := 0 - for written < count { - if written+len(combined) > count { - todo := count - written - h.Write(combined[:todo]) - written = count - } else { - h.Write(combined) - written += len(combined) - } - } - digest = h.Sum(digest[:0]) - n := copy(out[done:], digest) - done += n - } -} - -// Generate generates valid parameters from given configuration. -// It will enforce salted + hashed s2k method -func Generate(rand io.Reader, c *Config) (*Params, error) { - hashId, ok := HashToHashId(c.Hash) - if !ok { - return nil, errors.UnsupportedError("no such hash") - } - - params := &Params{ - mode: 3, // Enforce iterared + salted method - hashId: hashId, - salt: make([]byte, 8), - countByte: c.EncodedCount(), - } - - if _, err := io.ReadFull(rand, params.salt); err != nil { - return nil, err - } - - return params, nil -} - -// Parse reads a binary specification for a string-to-key transformation from r -// and returns a function which performs that transform. If the S2K is a special -// GNU extension that indicates that the private key is missing, then the error -// returned is errors.ErrDummyPrivateKey. -func Parse(r io.Reader) (f func(out, in []byte), err error) { - params, err := ParseIntoParams(r) - if err != nil { - return nil, err - } - - return params.Function() -} - -// ParseIntoParams reads a binary specification for a string-to-key -// transformation from r and returns a struct describing the s2k parameters. -func ParseIntoParams(r io.Reader) (params *Params, err error) { - var buf [9]byte - - _, err = io.ReadFull(r, buf[:2]) - if err != nil { - return - } - - params = &Params{ - mode: buf[0], - hashId: buf[1], - } - - switch params.mode { - case 0: - return params, nil - case 1: - _, err = io.ReadFull(r, buf[:8]) - if err != nil { - return nil, err - } - - params.salt = buf[:8] - return params, nil - case 3: - _, err = io.ReadFull(r, buf[:9]) - if err != nil { - return nil, err - } - - params.salt = buf[:8] - params.countByte = buf[8] - return params, nil - case 101: - // This is a GNU extension. See - // https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=blob;f=doc/DETAILS;h=fe55ae16ab4e26d8356dc574c9e8bc935e71aef1;hb=23191d7851eae2217ecdac6484349849a24fd94a#l1109 - if _, err = io.ReadFull(r, buf[:4]); err != nil { - return nil, err - } - if buf[0] == 'G' && buf[1] == 'N' && buf[2] == 'U' && buf[3] == 1 { - return params, nil - } - return nil, errors.UnsupportedError("GNU S2K extension") - } - - return nil, errors.UnsupportedError("S2K function") -} - -func (params *Params) Dummy() bool { - return params != nil && params.mode == 101 -} - -func (params *Params) Function() (f func(out, in []byte), err error) { - if params.Dummy() { - return nil, errors.ErrDummyPrivateKey("dummy key found") - } - hashObj, ok := HashIdToHash(params.hashId) - if !ok { - return nil, errors.UnsupportedError("hash for S2K function: " + strconv.Itoa(int(params.hashId))) - } - if !hashObj.Available() { - return nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashObj))) - } - - switch params.mode { - case 0: - f := func(out, in []byte) { - Simple(out, hashObj.New(), in) - } - - return f, nil - case 1: - f := func(out, in []byte) { - Salted(out, hashObj.New(), in, params.salt) - } - - return f, nil - case 3: - f := func(out, in []byte) { - Iterated(out, hashObj.New(), in, params.salt, decodeCount(params.countByte)) - } - - return f, nil - } - - return nil, errors.UnsupportedError("S2K function") -} - -func (params *Params) Serialize(w io.Writer) (err error) { - if _, err = w.Write([]byte{params.mode}); err != nil { - return - } - if _, err = w.Write([]byte{params.hashId}); err != nil { - return - } - if params.Dummy() { - _, err = w.Write(append([]byte("GNU"), 1)) - return - } - if params.mode > 0 { - if _, err = w.Write(params.salt); err != nil { - return - } - if params.mode == 3 { - _, err = w.Write([]byte{params.countByte}) - } - } - return -} - -// Serialize salts and stretches the given passphrase and writes the -// resulting key into key. It also serializes an S2K descriptor to -// w. The key stretching can be configured with c, which may be -// nil. In that case, sensible defaults will be used. -func Serialize(w io.Writer, key []byte, rand io.Reader, passphrase []byte, c *Config) error { - params, err := Generate(rand, c) - if err != nil { - return err - } - err = params.Serialize(w) - if err != nil { - return err - } - - f, err := params.Function() - if err != nil { - return err - } - f(key, passphrase) - return nil -} - -// HashIdToHash returns a crypto.Hash which corresponds to the given OpenPGP -// hash id. -func HashIdToHash(id byte) (h crypto.Hash, ok bool) { - if hash, ok := algorithm.HashById[id]; ok { - return hash.HashFunc(), true - } - return 0, false -} - -// HashIdToString returns the name of the hash function corresponding to the -// given OpenPGP hash id. -func HashIdToString(id byte) (name string, ok bool) { - if hash, ok := algorithm.HashById[id]; ok { - return hash.String(), true - } - return "", false -} - -// HashIdToHash returns an OpenPGP hash id which corresponds the given Hash. -func HashToHashId(h crypto.Hash) (id byte, ok bool) { - for id, hash := range algorithm.HashById { - if hash.HashFunc() == h { - return id, true - } - } - return 0, false -} diff --git a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go b/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go deleted file mode 100644 index bb74b13872a..00000000000 --- a/ui/wasm/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go +++ /dev/null @@ -1,559 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package openpgp - -import ( - "crypto" - "hash" - "io" - "strconv" - "time" - - "github.com/ProtonMail/go-crypto/openpgp/armor" - "github.com/ProtonMail/go-crypto/openpgp/errors" - "github.com/ProtonMail/go-crypto/openpgp/packet" - "github.com/ProtonMail/go-crypto/openpgp/s2k" -) - -// DetachSign signs message with the private key from signer (which must -// already have been decrypted) and writes the signature to w. -// If config is nil, sensible defaults will be used. -func DetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { - return detachSign(w, signer, message, packet.SigTypeBinary, config) -} - -// ArmoredDetachSign signs message with the private key from signer (which -// must already have been decrypted) and writes an armored signature to w. -// If config is nil, sensible defaults will be used. -func ArmoredDetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) (err error) { - return armoredDetachSign(w, signer, message, packet.SigTypeBinary, config) -} - -// DetachSignText signs message (after canonicalising the line endings) with -// the private key from signer (which must already have been decrypted) and -// writes the signature to w. -// If config is nil, sensible defaults will be used. -func DetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { - return detachSign(w, signer, message, packet.SigTypeText, config) -} - -// ArmoredDetachSignText signs message (after canonicalising the line endings) -// with the private key from signer (which must already have been decrypted) -// and writes an armored signature to w. -// If config is nil, sensible defaults will be used. -func ArmoredDetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { - return armoredDetachSign(w, signer, message, packet.SigTypeText, config) -} - -func armoredDetachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { - out, err := armor.Encode(w, SignatureType, nil) - if err != nil { - return - } - err = detachSign(out, signer, message, sigType, config) - if err != nil { - return - } - return out.Close() -} - -func detachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { - signingKey, ok := signer.SigningKeyById(config.Now(), config.SigningKey()) - if !ok { - return errors.InvalidArgumentError("no valid signing keys") - } - if signingKey.PrivateKey == nil { - return errors.InvalidArgumentError("signing key doesn't have a private key") - } - if signingKey.PrivateKey.Encrypted { - return errors.InvalidArgumentError("signing key is encrypted") - } - - sig := new(packet.Signature) - sig.SigType = sigType - sig.PubKeyAlgo = signingKey.PrivateKey.PubKeyAlgo - sig.Hash = config.Hash() - sig.CreationTime = config.Now() - sigLifetimeSecs := config.SigLifetime() - sig.SigLifetimeSecs = &sigLifetimeSecs - sig.IssuerKeyId = &signingKey.PrivateKey.KeyId - - h, wrappedHash, err := hashForSignature(sig.Hash, sig.SigType) - if err != nil { - return - } - if _, err = io.Copy(wrappedHash, message); err != nil { - return err - } - - err = sig.Sign(h, signingKey.PrivateKey, config) - if err != nil { - return - } - - return sig.Serialize(w) -} - -// FileHints contains metadata about encrypted files. This metadata is, itself, -// encrypted. -type FileHints struct { - // IsBinary can be set to hint that the contents are binary data. - IsBinary bool - // FileName hints at the name of the file that should be written. It's - // truncated to 255 bytes if longer. It may be empty to suggest that the - // file should not be written to disk. It may be equal to "_CONSOLE" to - // suggest the data should not be written to disk. - FileName string - // ModTime contains the modification time of the file, or the zero time if not applicable. - ModTime time.Time -} - -// SymmetricallyEncrypt acts like gpg -c: it encrypts a file with a passphrase. -// The resulting WriteCloser must be closed after the contents of the file have -// been written. -// If config is nil, sensible defaults will be used. -func SymmetricallyEncrypt(ciphertext io.Writer, passphrase []byte, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { - if hints == nil { - hints = &FileHints{} - } - - key, err := packet.SerializeSymmetricKeyEncrypted(ciphertext, passphrase, config) - if err != nil { - return - } - - var w io.WriteCloser - if config.AEAD() != nil { - w, err = packet.SerializeAEADEncrypted(ciphertext, key, config.Cipher(), config.AEAD().Mode(), config) - if err != nil { - return - } - } else { - w, err = packet.SerializeSymmetricallyEncrypted(ciphertext, config.Cipher(), key, config) - if err != nil { - return - } - } - - literalData := w - if algo := config.Compression(); algo != packet.CompressionNone { - var compConfig *packet.CompressionConfig - if config != nil { - compConfig = config.CompressionConfig - } - literalData, err = packet.SerializeCompressed(w, algo, compConfig) - if err != nil { - return - } - } - - var epochSeconds uint32 - if !hints.ModTime.IsZero() { - epochSeconds = uint32(hints.ModTime.Unix()) - } - return packet.SerializeLiteral(literalData, hints.IsBinary, hints.FileName, epochSeconds) -} - -// intersectPreferences mutates and returns a prefix of a that contains only -// the values in the intersection of a and b. The order of a is preserved. -func intersectPreferences(a []uint8, b []uint8) (intersection []uint8) { - var j int - for _, v := range a { - for _, v2 := range b { - if v == v2 { - a[j] = v - j++ - break - } - } - } - - return a[:j] -} - -func hashToHashId(h crypto.Hash) uint8 { - v, ok := s2k.HashToHashId(h) - if !ok { - panic("tried to convert unknown hash") - } - return v -} - -// EncryptText encrypts a message to a number of recipients and, optionally, -// signs it. Optional information is contained in 'hints', also encrypted, that -// aids the recipients in processing the message. The resulting WriteCloser -// must be closed after the contents of the file have been written. If config -// is nil, sensible defaults will be used. The signing is done in text mode. -func EncryptText(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { - return encrypt(ciphertext, ciphertext, to, signed, hints, packet.SigTypeText, config) -} - -// Encrypt encrypts a message to a number of recipients and, optionally, signs -// it. hints contains optional information, that is also encrypted, that aids -// the recipients in processing the message. The resulting WriteCloser must -// be closed after the contents of the file have been written. -// If config is nil, sensible defaults will be used. -func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { - return encrypt(ciphertext, ciphertext, to, signed, hints, packet.SigTypeBinary, config) -} - -// EncryptSplit encrypts a message to a number of recipients and, optionally, signs -// it. hints contains optional information, that is also encrypted, that aids -// the recipients in processing the message. The resulting WriteCloser must -// be closed after the contents of the file have been written. -// If config is nil, sensible defaults will be used. -func EncryptSplit(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { - return encrypt(keyWriter, dataWriter, to, signed, hints, packet.SigTypeBinary, config) -} - -// writeAndSign writes the data as a payload package and, optionally, signs -// it. hints contains optional information, that is also encrypted, -// that aids the recipients in processing the message. The resulting -// WriteCloser must be closed after the contents of the file have been -// written. If config is nil, sensible defaults will be used. -func writeAndSign(payload io.WriteCloser, candidateHashes []uint8, signed *Entity, hints *FileHints, sigType packet.SignatureType, config *packet.Config) (plaintext io.WriteCloser, err error) { - var signer *packet.PrivateKey - if signed != nil { - signKey, ok := signed.SigningKeyById(config.Now(), config.SigningKey()) - if !ok { - return nil, errors.InvalidArgumentError("no valid signing keys") - } - signer = signKey.PrivateKey - if signer == nil { - return nil, errors.InvalidArgumentError("no private key in signing key") - } - if signer.Encrypted { - return nil, errors.InvalidArgumentError("signing key must be decrypted") - } - } - - var hash crypto.Hash - for _, hashId := range candidateHashes { - if h, ok := s2k.HashIdToHash(hashId); ok && h.Available() { - hash = h - break - } - } - - // If the hash specified by config is a candidate, we'll use that. - if configuredHash := config.Hash(); configuredHash.Available() { - for _, hashId := range candidateHashes { - if h, ok := s2k.HashIdToHash(hashId); ok && h == configuredHash { - hash = h - break - } - } - } - - if hash == 0 { - hashId := candidateHashes[0] - name, ok := s2k.HashIdToString(hashId) - if !ok { - name = "#" + strconv.Itoa(int(hashId)) - } - return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)") - } - - if signer != nil { - ops := &packet.OnePassSignature{ - SigType: sigType, - Hash: hash, - PubKeyAlgo: signer.PubKeyAlgo, - KeyId: signer.KeyId, - IsLast: true, - } - if err := ops.Serialize(payload); err != nil { - return nil, err - } - } - - if hints == nil { - hints = &FileHints{} - } - - w := payload - if signer != nil { - // If we need to write a signature packet after the literal - // data then we need to stop literalData from closing - // encryptedData. - w = noOpCloser{w} - - } - var epochSeconds uint32 - if !hints.ModTime.IsZero() { - epochSeconds = uint32(hints.ModTime.Unix()) - } - literalData, err := packet.SerializeLiteral(w, hints.IsBinary, hints.FileName, epochSeconds) - if err != nil { - return nil, err - } - - if signer != nil { - h, wrappedHash, err := hashForSignature(hash, sigType) - if err != nil { - return nil, err - } - metadata := &packet.LiteralData{ - Format: 't', - FileName: hints.FileName, - Time: epochSeconds, - } - if hints.IsBinary { - metadata.Format = 'b' - } - return signatureWriter{payload, literalData, hash, wrappedHash, h, signer, sigType, config, metadata}, nil - } - return literalData, nil -} - -// encrypt encrypts a message to a number of recipients and, optionally, signs -// it. hints contains optional information, that is also encrypted, that aids -// the recipients in processing the message. The resulting WriteCloser must -// be closed after the contents of the file have been written. -// If config is nil, sensible defaults will be used. -func encrypt(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *Entity, hints *FileHints, sigType packet.SignatureType, config *packet.Config) (plaintext io.WriteCloser, err error) { - if len(to) == 0 { - return nil, errors.InvalidArgumentError("no encryption recipient provided") - } - - // These are the possible ciphers that we'll use for the message. - candidateCiphers := []uint8{ - uint8(packet.CipherAES128), - uint8(packet.CipherAES256), - uint8(packet.CipherCAST5), - } - // These are the possible hash functions that we'll use for the signature. - candidateHashes := []uint8{ - hashToHashId(crypto.SHA256), - hashToHashId(crypto.SHA384), - hashToHashId(crypto.SHA512), - hashToHashId(crypto.SHA1), - hashToHashId(crypto.RIPEMD160), - } - candidateAeadModes := []uint8{ - uint8(packet.AEADModeEAX), - uint8(packet.AEADModeOCB), - uint8(packet.AEADModeExperimentalGCM), - } - candidateCompression := []uint8{ - uint8(packet.CompressionNone), - uint8(packet.CompressionZIP), - uint8(packet.CompressionZLIB), - } - // In the event that a recipient doesn't specify any supported ciphers - // or hash functions, these are the ones that we assume that every - // implementation supports. - defaultCiphers := candidateCiphers[0:1] - defaultHashes := candidateHashes[0:1] - defaultAeadModes := candidateAeadModes[0:1] - defaultCompression := candidateCompression[0:1] - - encryptKeys := make([]Key, len(to)) - // AEAD is used only if every key supports it. - aeadSupported := true - - for i := range to { - var ok bool - encryptKeys[i], ok = to[i].EncryptionKey(config.Now()) - if !ok { - return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no encryption keys") - } - - sig := to[i].PrimaryIdentity().SelfSignature - if sig.AEAD == false { - aeadSupported = false - } - - preferredSymmetric := sig.PreferredSymmetric - if len(preferredSymmetric) == 0 { - preferredSymmetric = defaultCiphers - } - preferredHashes := sig.PreferredHash - if len(preferredHashes) == 0 { - preferredHashes = defaultHashes - } - preferredAeadModes := sig.PreferredAEAD - if len(preferredAeadModes) == 0 { - preferredAeadModes = defaultAeadModes - } - preferredCompression := sig.PreferredCompression - if len(preferredCompression) == 0 { - preferredCompression = defaultCompression - } - candidateCiphers = intersectPreferences(candidateCiphers, preferredSymmetric) - candidateHashes = intersectPreferences(candidateHashes, preferredHashes) - candidateAeadModes = intersectPreferences(candidateAeadModes, preferredAeadModes) - candidateCompression = intersectPreferences(candidateCompression, preferredCompression) - } - - if len(candidateCiphers) == 0 || len(candidateHashes) == 0 || len(candidateAeadModes) == 0 { - return nil, errors.InvalidArgumentError("cannot encrypt because recipient set shares no common algorithms") - } - - cipher := packet.CipherFunction(candidateCiphers[0]) - mode := packet.AEADMode(candidateAeadModes[0]) - // If the cipher specified by config is a candidate, we'll use that. - configuredCipher := config.Cipher() - for _, c := range candidateCiphers { - cipherFunc := packet.CipherFunction(c) - if cipherFunc == configuredCipher { - cipher = cipherFunc - break - } - } - - symKey := make([]byte, cipher.KeySize()) - if _, err := io.ReadFull(config.Random(), symKey); err != nil { - return nil, err - } - - for _, key := range encryptKeys { - if err := packet.SerializeEncryptedKey(keyWriter, key.PublicKey, cipher, symKey, config); err != nil { - return nil, err - } - } - - var payload io.WriteCloser - if aeadSupported { - payload, err = packet.SerializeAEADEncrypted(dataWriter, symKey, cipher, mode, config) - if err != nil { - return - } - } else { - payload, err = packet.SerializeSymmetricallyEncrypted(dataWriter, cipher, symKey, config) - if err != nil { - return - } - } - payload, err = handleCompression(payload, candidateCompression, config) - if err != nil { - return nil, err - } - - return writeAndSign(payload, candidateHashes, signed, hints, sigType, config) -} - -// Sign signs a message. The resulting WriteCloser must be closed after the -// contents of the file have been written. hints contains optional information -// that aids the recipients in processing the message. -// If config is nil, sensible defaults will be used. -func Sign(output io.Writer, signed *Entity, hints *FileHints, config *packet.Config) (input io.WriteCloser, err error) { - if signed == nil { - return nil, errors.InvalidArgumentError("no signer provided") - } - - // These are the possible hash functions that we'll use for the signature. - candidateHashes := []uint8{ - hashToHashId(crypto.SHA256), - hashToHashId(crypto.SHA384), - hashToHashId(crypto.SHA512), - hashToHashId(crypto.SHA1), - hashToHashId(crypto.RIPEMD160), - } - defaultHashes := candidateHashes[0:1] - preferredHashes := signed.PrimaryIdentity().SelfSignature.PreferredHash - if len(preferredHashes) == 0 { - preferredHashes = defaultHashes - } - candidateHashes = intersectPreferences(candidateHashes, preferredHashes) - if len(candidateHashes) == 0 { - return nil, errors.InvalidArgumentError("cannot sign because signing key shares no common algorithms with candidate hashes") - } - - return writeAndSign(noOpCloser{output}, candidateHashes, signed, hints, packet.SigTypeBinary, config) -} - -// signatureWriter hashes the contents of a message while passing it along to -// literalData. When closed, it closes literalData, writes a signature packet -// to encryptedData and then also closes encryptedData. -type signatureWriter struct { - encryptedData io.WriteCloser - literalData io.WriteCloser - hashType crypto.Hash - wrappedHash hash.Hash - h hash.Hash - signer *packet.PrivateKey - sigType packet.SignatureType - config *packet.Config - metadata *packet.LiteralData // V5 signatures protect document metadata -} - -func (s signatureWriter) Write(data []byte) (int, error) { - s.wrappedHash.Write(data) - switch s.sigType { - case packet.SigTypeBinary: - return s.literalData.Write(data) - case packet.SigTypeText: - flag := 0 - return writeCanonical(s.literalData, data, &flag) - } - return 0, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(s.sigType))) -} - -func (s signatureWriter) Close() error { - sig := &packet.Signature{ - Version: s.signer.Version, - SigType: s.sigType, - PubKeyAlgo: s.signer.PubKeyAlgo, - Hash: s.hashType, - CreationTime: s.config.Now(), - IssuerKeyId: &s.signer.KeyId, - Metadata: s.metadata, - } - - if err := sig.Sign(s.h, s.signer, s.config); err != nil { - return err - } - if err := s.literalData.Close(); err != nil { - return err - } - if err := sig.Serialize(s.encryptedData); err != nil { - return err - } - return s.encryptedData.Close() -} - -// noOpCloser is like an ioutil.NopCloser, but for an io.Writer. -// TODO: we have two of these in OpenPGP packages alone. This probably needs -// to be promoted somewhere more common. -type noOpCloser struct { - w io.Writer -} - -func (c noOpCloser) Write(data []byte) (n int, err error) { - return c.w.Write(data) -} - -func (c noOpCloser) Close() error { - return nil -} - -func handleCompression(compressed io.WriteCloser, candidateCompression []uint8, config *packet.Config) (data io.WriteCloser, err error) { - data = compressed - confAlgo := config.Compression() - if confAlgo == packet.CompressionNone { - return - } - finalAlgo := packet.CompressionNone - // if compression specified by config available we will use it - for _, c := range candidateCompression { - if uint8(confAlgo) == c { - finalAlgo = confAlgo - break - } - } - - if finalAlgo != packet.CompressionNone { - var compConfig *packet.CompressionConfig - if config != nil { - compConfig = config.CompressionConfig - } - data, err = packet.SerializeCompressed(compressed, finalAlgo, compConfig) - if err != nil { - return - } - } - return data, nil -} diff --git a/ui/wasm/vendor/github.com/acomagu/bufpipe/README.md b/ui/wasm/vendor/github.com/acomagu/bufpipe/README.md deleted file mode 100644 index 19df083142d..00000000000 --- a/ui/wasm/vendor/github.com/acomagu/bufpipe/README.md +++ /dev/null @@ -1,42 +0,0 @@ -# bufpipe: Buffered Pipe - -[![CircleCI](https://img.shields.io/circleci/build/github/acomagu/bufpipe.svg?style=flat-square)](https://circleci.com/gh/acomagu/bufpipe) [![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat-square)](https://godoc.org/github.com/acomagu/bufpipe) - -The buffered version of io.Pipe. It's safe for concurrent use. - -## How does it differ from io.Pipe? - -Writes never block because the pipe has variable-sized buffer. - -```Go -r, w := bufpipe.New(nil) -io.WriteString(w, "abc") // No blocking. -io.WriteString(w, "def") // No blocking, too. -w.Close() -io.Copy(os.Stdout, r) -// Output: abcdef -``` - -[Playground](https://play.golang.org/p/PdyBAS3pVob) - -## How does it differ from bytes.Buffer? - -Reads block if the internal buffer is empty until the writer is closed. - -```Go -r, w := bufpipe.New(nil) - -done := make(chan struct{}) -go func() { - io.Copy(os.Stdout, r) // The reads block until the writer is closed. - done <- struct{}{} -}() - -io.WriteString(w, "abc") -io.WriteString(w, "def") -w.Close() -<-done -// Output: abcdef -``` - -[Playground](https://play.golang.org/p/UppmyLeRgX6) diff --git a/ui/wasm/vendor/github.com/acomagu/bufpipe/bufpipe.go b/ui/wasm/vendor/github.com/acomagu/bufpipe/bufpipe.go deleted file mode 100644 index 846dbcc290f..00000000000 --- a/ui/wasm/vendor/github.com/acomagu/bufpipe/bufpipe.go +++ /dev/null @@ -1,128 +0,0 @@ -package bufpipe - -import ( - "bytes" - "errors" - "io" - "sync" -) - -// ErrClosedPipe is the error used for read or write operations on a closed pipe. -var ErrClosedPipe = errors.New("bufpipe: read/write on closed pipe") - -type pipe struct { - cond *sync.Cond - buf *bytes.Buffer - rerr, werr error -} - -// A PipeReader is the read half of a pipe. -type PipeReader struct { - *pipe -} - -// A PipeWriter is the write half of a pipe. -type PipeWriter struct { - *pipe -} - -// New creates a synchronous pipe using buf as its initial contents. It can be -// used to connect code expecting an io.Reader with code expecting an io.Writer. -// -// Unlike io.Pipe, writes never block because the internal buffer has variable -// size. Reads block only when the buffer is empty. -// -// It is safe to call Read and Write in parallel with each other or with Close. -// Parallel calls to Read and parallel calls to Write are also safe: the -// individual calls will be gated sequentially. -// -// The new pipe takes ownership of buf, and the caller should not use buf after -// this call. New is intended to prepare a PipeReader to read existing data. It -// can also be used to set the initial size of the internal buffer for writing. -// To do that, buf should have the desired capacity but a length of zero. -func New(buf []byte) (*PipeReader, *PipeWriter) { - p := &pipe{ - buf: bytes.NewBuffer(buf), - cond: sync.NewCond(new(sync.Mutex)), - } - return &PipeReader{ - pipe: p, - }, &PipeWriter{ - pipe: p, - } -} - -// Read implements the standard Read interface: it reads data from the pipe, -// reading from the internal buffer, otherwise blocking until a writer arrives -// or the write end is closed. If the write end is closed with an error, that -// error is returned as err; otherwise err is io.EOF. -func (r *PipeReader) Read(data []byte) (int, error) { - r.cond.L.Lock() - defer r.cond.L.Unlock() - -RETRY: - n, err := r.buf.Read(data) - // If not closed and no read, wait for writing. - if err == io.EOF && r.rerr == nil && n == 0 { - r.cond.Wait() - goto RETRY - } - if err == io.EOF { - return n, r.rerr - } - return n, err -} - -// Close closes the reader; subsequent writes from the write half of the pipe -// will return error ErrClosedPipe. -func (r *PipeReader) Close() error { - return r.CloseWithError(nil) -} - -// CloseWithError closes the reader; subsequent writes to the write half of the -// pipe will return the error err. -func (r *PipeReader) CloseWithError(err error) error { - r.cond.L.Lock() - defer r.cond.L.Unlock() - - if err == nil { - err = ErrClosedPipe - } - r.werr = err - return nil -} - -// Write implements the standard Write interface: it writes data to the internal -// buffer. If the read end is closed with an error, that err is returned as err; -// otherwise err is ErrClosedPipe. -func (w *PipeWriter) Write(data []byte) (int, error) { - w.cond.L.Lock() - defer w.cond.L.Unlock() - - if w.werr != nil { - return 0, w.werr - } - - n, err := w.buf.Write(data) - w.cond.Signal() - return n, err -} - -// Close closes the writer; subsequent reads from the read half of the pipe will -// return io.EOF once the internal buffer get empty. -func (w *PipeWriter) Close() error { - return w.CloseWithError(nil) -} - -// Close closes the writer; subsequent reads from the read half of the pipe will -// return err once the internal buffer get empty. -func (w *PipeWriter) CloseWithError(err error) error { - w.cond.L.Lock() - defer w.cond.L.Unlock() - - if err == nil { - err = io.EOF - } - w.rerr = err - return nil -} diff --git a/ui/wasm/vendor/github.com/acomagu/bufpipe/doc.go b/ui/wasm/vendor/github.com/acomagu/bufpipe/doc.go deleted file mode 100644 index 16a39480017..00000000000 --- a/ui/wasm/vendor/github.com/acomagu/bufpipe/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package bufpipe provides a IO pipe, has variable-sized buffer. -package bufpipe diff --git a/ui/wasm/vendor/github.com/beorn7/perks/LICENSE b/ui/wasm/vendor/github.com/beorn7/perks/LICENSE deleted file mode 100644 index 339177be663..00000000000 --- a/ui/wasm/vendor/github.com/beorn7/perks/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (C) 2013 Blake Mizerany - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/ui/wasm/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/ui/wasm/vendor/github.com/beorn7/perks/quantile/exampledata.txt deleted file mode 100644 index 1602287d7ce..00000000000 --- a/ui/wasm/vendor/github.com/beorn7/perks/quantile/exampledata.txt +++ /dev/null @@ -1,2388 +0,0 @@ -8 -5 -26 -12 -5 -235 -13 -6 -28 -30 -3 -3 -3 -3 -5 -2 -33 -7 -2 -4 -7 -12 -14 -5 -8 -3 -10 -4 -5 -3 -6 -6 -209 -20 -3 -10 -14 -3 -4 -6 -8 -5 -11 -7 -3 -2 -3 -3 -212 -5 -222 -4 -10 -10 -5 -6 -3 -8 -3 -10 -254 -220 -2 -3 -5 -24 -5 -4 -222 -7 -3 -3 -223 -8 -15 -12 -14 -14 -3 -2 -2 -3 -13 -3 -11 -4 -4 -6 -5 -7 -13 -5 -3 -5 -2 -5 -3 -5 -2 -7 -15 -17 -14 -3 -6 -6 -3 -17 -5 -4 -7 -6 -4 -4 -8 -6 -8 -3 -9 -3 -6 -3 -4 -5 -3 -3 -660 -4 -6 -10 -3 -6 -3 -2 -5 -13 -2 -4 -4 -10 -4 -8 -4 -3 -7 -9 -9 -3 -10 -37 -3 -13 -4 -12 -3 -6 -10 -8 -5 -21 -2 -3 -8 -3 -2 -3 -3 -4 -12 -2 -4 -8 -8 -4 -3 -2 -20 -1 -6 -32 -2 -11 -6 -18 -3 -8 -11 -3 -212 -3 -4 -2 -6 -7 -12 -11 -3 -2 -16 -10 -6 -4 -6 -3 -2 -7 -3 -2 -2 -2 -2 -5 -6 -4 -3 -10 -3 -4 -6 -5 -3 -4 -4 -5 -6 -4 -3 -4 -4 -5 -7 -5 -5 -3 -2 -7 -2 -4 -12 -4 -5 -6 -2 -4 -4 -8 -4 -15 -13 -7 -16 -5 -3 -23 -5 -5 -7 -3 -2 -9 -8 -7 -5 -8 -11 -4 -10 -76 -4 -47 -4 -3 -2 -7 -4 -2 -3 -37 -10 -4 -2 -20 -5 -4 -4 -10 -10 -4 -3 -7 -23 -240 -7 -13 -5 -5 -3 -3 -2 -5 -4 -2 -8 -7 -19 -2 -23 -8 -7 -2 -5 -3 -8 -3 -8 -13 -5 -5 -5 -2 -3 -23 -4 -9 -8 -4 -3 -3 -5 -220 -2 -3 -4 -6 -14 -3 -53 -6 -2 -5 -18 -6 -3 -219 -6 -5 -2 -5 -3 -6 -5 -15 -4 -3 -17 -3 -2 -4 -7 -2 -3 -3 -4 -4 -3 -2 -664 -6 -3 -23 -5 -5 -16 -5 -8 -2 -4 -2 -24 -12 -3 -2 -3 -5 -8 -3 -5 -4 -3 -14 -3 -5 -8 -2 -3 -7 -9 -4 -2 -3 -6 -8 -4 -3 -4 -6 -5 -3 -3 -6 -3 -19 -4 -4 -6 -3 -6 -3 -5 -22 -5 -4 -4 -3 -8 -11 -4 -9 -7 -6 -13 -4 -4 -4 -6 -17 -9 -3 -3 -3 -4 -3 -221 -5 -11 -3 -4 -2 -12 -6 -3 -5 -7 -5 -7 -4 -9 -7 -14 -37 -19 -217 -16 -3 -5 -2 -2 -7 -19 -7 -6 -7 -4 -24 -5 -11 -4 -7 -7 -9 -13 -3 -4 -3 -6 -28 -4 -4 -5 -5 -2 -5 -6 -4 -4 -6 -10 -5 -4 -3 -2 -3 -3 -6 -5 -5 -4 -3 -2 -3 -7 -4 -6 -18 -16 -8 -16 -4 -5 -8 -6 -9 -13 -1545 -6 -215 -6 -5 -6 -3 -45 -31 -5 -2 -2 -4 -3 -3 -2 -5 -4 -3 -5 -7 -7 -4 -5 -8 -5 -4 -749 -2 -31 -9 -11 -2 -11 -5 -4 -4 -7 -9 -11 -4 -5 -4 -7 -3 -4 -6 -2 -15 -3 -4 -3 -4 -3 -5 -2 -13 -5 -5 -3 -3 -23 -4 -4 -5 -7 -4 -13 -2 -4 -3 -4 -2 -6 -2 -7 -3 -5 -5 -3 -29 -5 -4 -4 -3 -10 -2 -3 -79 -16 -6 -6 -7 -7 -3 -5 -5 -7 -4 -3 -7 -9 -5 -6 -5 -9 -6 -3 -6 -4 -17 -2 -10 -9 -3 -6 -2 -3 -21 -22 -5 -11 -4 -2 -17 -2 -224 -2 -14 -3 -4 -4 -2 -4 -4 -4 -4 -5 -3 -4 -4 -10 -2 -6 -3 -3 -5 -7 -2 -7 -5 -6 -3 -218 -2 -2 -5 -2 -6 -3 -5 -222 -14 -6 -33 -3 -2 -5 -3 -3 -3 -9 -5 -3 -3 -2 -7 -4 -3 -4 -3 -5 -6 -5 -26 -4 -13 -9 -7 -3 -221 -3 -3 -4 -4 -4 -4 -2 -18 -5 -3 -7 -9 -6 -8 -3 -10 -3 -11 -9 -5 -4 -17 -5 -5 -6 -6 -3 -2 -4 -12 -17 -6 -7 -218 -4 -2 -4 -10 -3 -5 -15 -3 -9 -4 -3 -3 -6 -29 -3 -3 -4 -5 -5 -3 -8 -5 -6 -6 -7 -5 -3 -5 -3 -29 -2 -31 -5 -15 -24 -16 -5 -207 -4 -3 -3 -2 -15 -4 -4 -13 -5 -5 -4 -6 -10 -2 -7 -8 -4 -6 -20 -5 -3 -4 -3 -12 -12 -5 -17 -7 -3 -3 -3 -6 -10 -3 -5 -25 -80 -4 -9 -3 -2 -11 -3 -3 -2 -3 -8 -7 -5 -5 -19 -5 -3 -3 -12 -11 -2 -6 -5 -5 -5 -3 -3 -3 -4 -209 -14 -3 -2 -5 -19 -4 -4 -3 -4 -14 -5 -6 -4 -13 -9 -7 -4 -7 -10 -2 -9 -5 -7 -2 -8 -4 -6 -5 -5 -222 -8 -7 -12 -5 -216 -3 -4 -4 -6 -3 -14 -8 -7 -13 -4 -3 -3 -3 -3 -17 -5 -4 -3 -33 -6 -6 -33 -7 -5 -3 -8 -7 -5 -2 -9 -4 -2 -233 -24 -7 -4 -8 -10 -3 -4 -15 -2 -16 -3 -3 -13 -12 -7 -5 -4 -207 -4 -2 -4 -27 -15 -2 -5 -2 -25 -6 -5 -5 -6 -13 -6 -18 -6 -4 -12 -225 -10 -7 -5 -2 -2 -11 -4 -14 -21 -8 -10 -3 -5 -4 -232 -2 -5 -5 -3 -7 -17 -11 -6 -6 -23 -4 -6 -3 -5 -4 -2 -17 -3 -6 -5 -8 -3 -2 -2 -14 -9 -4 -4 -2 -5 -5 -3 -7 -6 -12 -6 -10 -3 -6 -2 -2 -19 -5 -4 -4 -9 -2 -4 -13 -3 -5 -6 -3 -6 -5 -4 -9 -6 -3 -5 -7 -3 -6 -6 -4 -3 -10 -6 -3 -221 -3 -5 -3 -6 -4 -8 -5 -3 -6 -4 -4 -2 -54 -5 -6 -11 -3 -3 -4 -4 -4 -3 -7 -3 -11 -11 -7 -10 -6 -13 -223 -213 -15 -231 -7 -3 -7 -228 -2 -3 -4 -4 -5 -6 -7 -4 -13 -3 -4 -5 -3 -6 -4 -6 -7 -2 -4 -3 -4 -3 -3 -6 -3 -7 -3 -5 -18 -5 -6 -8 -10 -3 -3 -3 -2 -4 -2 -4 -4 -5 -6 -6 -4 -10 -13 -3 -12 -5 -12 -16 -8 -4 -19 -11 -2 -4 -5 -6 -8 -5 -6 -4 -18 -10 -4 -2 -216 -6 -6 -6 -2 -4 -12 -8 -3 -11 -5 -6 -14 -5 -3 -13 -4 -5 -4 -5 -3 -28 -6 -3 -7 -219 -3 -9 -7 -3 -10 -6 -3 -4 -19 -5 -7 -11 -6 -15 -19 -4 -13 -11 -3 -7 -5 -10 -2 -8 -11 -2 -6 -4 -6 -24 -6 -3 -3 -3 -3 -6 -18 -4 -11 -4 -2 -5 -10 -8 -3 -9 -5 -3 -4 -5 -6 -2 -5 -7 -4 -4 -14 -6 -4 -4 -5 -5 -7 -2 -4 -3 -7 -3 -3 -6 -4 -5 -4 -4 -4 -3 -3 -3 -3 -8 -14 -2 -3 -5 -3 -2 -4 -5 -3 -7 -3 -3 -18 -3 -4 -4 -5 -7 -3 -3 -3 -13 -5 -4 -8 -211 -5 -5 -3 -5 -2 -5 -4 -2 -655 -6 -3 -5 -11 -2 -5 -3 -12 -9 -15 -11 -5 -12 -217 -2 -6 -17 -3 -3 -207 -5 -5 -4 -5 -9 -3 -2 -8 -5 -4 -3 -2 -5 -12 -4 -14 -5 -4 -2 -13 -5 -8 -4 -225 -4 -3 -4 -5 -4 -3 -3 -6 -23 -9 -2 -6 -7 -233 -4 -4 -6 -18 -3 -4 -6 -3 -4 -4 -2 -3 -7 -4 -13 -227 -4 -3 -5 -4 -2 -12 -9 -17 -3 -7 -14 -6 -4 -5 -21 -4 -8 -9 -2 -9 -25 -16 -3 -6 -4 -7 -8 -5 -2 -3 -5 -4 -3 -3 -5 -3 -3 -3 -2 -3 -19 -2 -4 -3 -4 -2 -3 -4 -4 -2 -4 -3 -3 -3 -2 -6 -3 -17 -5 -6 -4 -3 -13 -5 -3 -3 -3 -4 -9 -4 -2 -14 -12 -4 -5 -24 -4 -3 -37 -12 -11 -21 -3 -4 -3 -13 -4 -2 -3 -15 -4 -11 -4 -4 -3 -8 -3 -4 -4 -12 -8 -5 -3 -3 -4 -2 -220 -3 -5 -223 -3 -3 -3 -10 -3 -15 -4 -241 -9 -7 -3 -6 -6 -23 -4 -13 -7 -3 -4 -7 -4 -9 -3 -3 -4 -10 -5 -5 -1 -5 -24 -2 -4 -5 -5 -6 -14 -3 -8 -2 -3 -5 -13 -13 -3 -5 -2 -3 -15 -3 -4 -2 -10 -4 -4 -4 -5 -5 -3 -5 -3 -4 -7 -4 -27 -3 -6 -4 -15 -3 -5 -6 -6 -5 -4 -8 -3 -9 -2 -6 -3 -4 -3 -7 -4 -18 -3 -11 -3 -3 -8 -9 -7 -24 -3 -219 -7 -10 -4 -5 -9 -12 -2 -5 -4 -4 -4 -3 -3 -19 -5 -8 -16 -8 -6 -22 -3 -23 -3 -242 -9 -4 -3 -3 -5 -7 -3 -3 -5 -8 -3 -7 -5 -14 -8 -10 -3 -4 -3 -7 -4 -6 -7 -4 -10 -4 -3 -11 -3 -7 -10 -3 -13 -6 -8 -12 -10 -5 -7 -9 -3 -4 -7 -7 -10 -8 -30 -9 -19 -4 -3 -19 -15 -4 -13 -3 -215 -223 -4 -7 -4 -8 -17 -16 -3 -7 -6 -5 -5 -4 -12 -3 -7 -4 -4 -13 -4 -5 -2 -5 -6 -5 -6 -6 -7 -10 -18 -23 -9 -3 -3 -6 -5 -2 -4 -2 -7 -3 -3 -2 -5 -5 -14 -10 -224 -6 -3 -4 -3 -7 -5 -9 -3 -6 -4 -2 -5 -11 -4 -3 -3 -2 -8 -4 -7 -4 -10 -7 -3 -3 -18 -18 -17 -3 -3 -3 -4 -5 -3 -3 -4 -12 -7 -3 -11 -13 -5 -4 -7 -13 -5 -4 -11 -3 -12 -3 -6 -4 -4 -21 -4 -6 -9 -5 -3 -10 -8 -4 -6 -4 -4 -6 -5 -4 -8 -6 -4 -6 -4 -4 -5 -9 -6 -3 -4 -2 -9 -3 -18 -2 -4 -3 -13 -3 -6 -6 -8 -7 -9 -3 -2 -16 -3 -4 -6 -3 -2 -33 -22 -14 -4 -9 -12 -4 -5 -6 -3 -23 -9 -4 -3 -5 -5 -3 -4 -5 -3 -5 -3 -10 -4 -5 -5 -8 -4 -4 -6 -8 -5 -4 -3 -4 -6 -3 -3 -3 -5 -9 -12 -6 -5 -9 -3 -5 -3 -2 -2 -2 -18 -3 -2 -21 -2 -5 -4 -6 -4 -5 -10 -3 -9 -3 -2 -10 -7 -3 -6 -6 -4 -4 -8 -12 -7 -3 -7 -3 -3 -9 -3 -4 -5 -4 -4 -5 -5 -10 -15 -4 -4 -14 -6 -227 -3 -14 -5 -216 -22 -5 -4 -2 -2 -6 -3 -4 -2 -9 -9 -4 -3 -28 -13 -11 -4 -5 -3 -3 -2 -3 -3 -5 -3 -4 -3 -5 -23 -26 -3 -4 -5 -6 -4 -6 -3 -5 -5 -3 -4 -3 -2 -2 -2 -7 -14 -3 -6 -7 -17 -2 -2 -15 -14 -16 -4 -6 -7 -13 -6 -4 -5 -6 -16 -3 -3 -28 -3 -6 -15 -3 -9 -2 -4 -6 -3 -3 -22 -4 -12 -6 -7 -2 -5 -4 -10 -3 -16 -6 -9 -2 -5 -12 -7 -5 -5 -5 -5 -2 -11 -9 -17 -4 -3 -11 -7 -3 -5 -15 -4 -3 -4 -211 -8 -7 -5 -4 -7 -6 -7 -6 -3 -6 -5 -6 -5 -3 -4 -4 -26 -4 -6 -10 -4 -4 -3 -2 -3 -3 -4 -5 -9 -3 -9 -4 -4 -5 -5 -8 -2 -4 -2 -3 -8 -4 -11 -19 -5 -8 -6 -3 -5 -6 -12 -3 -2 -4 -16 -12 -3 -4 -4 -8 -6 -5 -6 -6 -219 -8 -222 -6 -16 -3 -13 -19 -5 -4 -3 -11 -6 -10 -4 -7 -7 -12 -5 -3 -3 -5 -6 -10 -3 -8 -2 -5 -4 -7 -2 -4 -4 -2 -12 -9 -6 -4 -2 -40 -2 -4 -10 -4 -223 -4 -2 -20 -6 -7 -24 -5 -4 -5 -2 -20 -16 -6 -5 -13 -2 -3 -3 -19 -3 -2 -4 -5 -6 -7 -11 -12 -5 -6 -7 -7 -3 -5 -3 -5 -3 -14 -3 -4 -4 -2 -11 -1 -7 -3 -9 -6 -11 -12 -5 -8 -6 -221 -4 -2 -12 -4 -3 -15 -4 -5 -226 -7 -218 -7 -5 -4 -5 -18 -4 -5 -9 -4 -4 -2 -9 -18 -18 -9 -5 -6 -6 -3 -3 -7 -3 -5 -4 -4 -4 -12 -3 -6 -31 -5 -4 -7 -3 -6 -5 -6 -5 -11 -2 -2 -11 -11 -6 -7 -5 -8 -7 -10 -5 -23 -7 -4 -3 -5 -34 -2 -5 -23 -7 -3 -6 -8 -4 -4 -4 -2 -5 -3 -8 -5 -4 -8 -25 -2 -3 -17 -8 -3 -4 -8 -7 -3 -15 -6 -5 -7 -21 -9 -5 -6 -6 -5 -3 -2 -3 -10 -3 -6 -3 -14 -7 -4 -4 -8 -7 -8 -2 -6 -12 -4 -213 -6 -5 -21 -8 -2 -5 -23 -3 -11 -2 -3 -6 -25 -2 -3 -6 -7 -6 -6 -4 -4 -6 -3 -17 -9 -7 -6 -4 -3 -10 -7 -2 -3 -3 -3 -11 -8 -3 -7 -6 -4 -14 -36 -3 -4 -3 -3 -22 -13 -21 -4 -2 -7 -4 -4 -17 -15 -3 -7 -11 -2 -4 -7 -6 -209 -6 -3 -2 -2 -24 -4 -9 -4 -3 -3 -3 -29 -2 -2 -4 -3 -3 -5 -4 -6 -3 -3 -2 -4 diff --git a/ui/wasm/vendor/github.com/beorn7/perks/quantile/stream.go b/ui/wasm/vendor/github.com/beorn7/perks/quantile/stream.go deleted file mode 100644 index d7d14f8eb63..00000000000 --- a/ui/wasm/vendor/github.com/beorn7/perks/quantile/stream.go +++ /dev/null @@ -1,316 +0,0 @@ -// Package quantile computes approximate quantiles over an unbounded data -// stream within low memory and CPU bounds. -// -// A small amount of accuracy is traded to achieve the above properties. -// -// Multiple streams can be merged before calling Query to generate a single set -// of results. This is meaningful when the streams represent the same type of -// data. See Merge and Samples. -// -// For more detailed information about the algorithm used, see: -// -// Effective Computation of Biased Quantiles over Data Streams -// -// http://www.cs.rutgers.edu/~muthu/bquant.pdf -package quantile - -import ( - "math" - "sort" -) - -// Sample holds an observed value and meta information for compression. JSON -// tags have been added for convenience. -type Sample struct { - Value float64 `json:",string"` - Width float64 `json:",string"` - Delta float64 `json:",string"` -} - -// Samples represents a slice of samples. It implements sort.Interface. -type Samples []Sample - -func (a Samples) Len() int { return len(a) } -func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } -func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -type invariant func(s *stream, r float64) float64 - -// NewLowBiased returns an initialized Stream for low-biased quantiles -// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but -// error guarantees can still be given even for the lower ranks of the data -// distribution. -// -// The provided epsilon is a relative error, i.e. the true quantile of a value -// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error -// properties. -func NewLowBiased(epsilon float64) *Stream { - ƒ := func(s *stream, r float64) float64 { - return 2 * epsilon * r - } - return newStream(ƒ) -} - -// NewHighBiased returns an initialized Stream for high-biased quantiles -// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but -// error guarantees can still be given even for the higher ranks of the data -// distribution. -// -// The provided epsilon is a relative error, i.e. the true quantile of a value -// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error -// properties. -func NewHighBiased(epsilon float64) *Stream { - ƒ := func(s *stream, r float64) float64 { - return 2 * epsilon * (s.n - r) - } - return newStream(ƒ) -} - -// NewTargeted returns an initialized Stream concerned with a particular set of -// quantile values that are supplied a priori. Knowing these a priori reduces -// space and computation time. The targets map maps the desired quantiles to -// their absolute errors, i.e. the true quantile of a value returned by a query -// is guaranteed to be within (Quantile±Epsilon). -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. -func NewTargeted(targetMap map[float64]float64) *Stream { - // Convert map to slice to avoid slow iterations on a map. - // ƒ is called on the hot path, so converting the map to a slice - // beforehand results in significant CPU savings. - targets := targetMapToSlice(targetMap) - - ƒ := func(s *stream, r float64) float64 { - var m = math.MaxFloat64 - var f float64 - for _, t := range targets { - if t.quantile*s.n <= r { - f = (2 * t.epsilon * r) / t.quantile - } else { - f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile) - } - if f < m { - m = f - } - } - return m - } - return newStream(ƒ) -} - -type target struct { - quantile float64 - epsilon float64 -} - -func targetMapToSlice(targetMap map[float64]float64) []target { - targets := make([]target, 0, len(targetMap)) - - for quantile, epsilon := range targetMap { - t := target{ - quantile: quantile, - epsilon: epsilon, - } - targets = append(targets, t) - } - - return targets -} - -// Stream computes quantiles for a stream of float64s. It is not thread-safe by -// design. Take care when using across multiple goroutines. -type Stream struct { - *stream - b Samples - sorted bool -} - -func newStream(ƒ invariant) *Stream { - x := &stream{ƒ: ƒ} - return &Stream{x, make(Samples, 0, 500), true} -} - -// Insert inserts v into the stream. -func (s *Stream) Insert(v float64) { - s.insert(Sample{Value: v, Width: 1}) -} - -func (s *Stream) insert(sample Sample) { - s.b = append(s.b, sample) - s.sorted = false - if len(s.b) == cap(s.b) { - s.flush() - } -} - -// Query returns the computed qth percentiles value. If s was created with -// NewTargeted, and q is not in the set of quantiles provided a priori, Query -// will return an unspecified result. -func (s *Stream) Query(q float64) float64 { - if !s.flushed() { - // Fast path when there hasn't been enough data for a flush; - // this also yields better accuracy for small sets of data. - l := len(s.b) - if l == 0 { - return 0 - } - i := int(math.Ceil(float64(l) * q)) - if i > 0 { - i -= 1 - } - s.maybeSort() - return s.b[i].Value - } - s.flush() - return s.stream.query(q) -} - -// Merge merges samples into the underlying streams samples. This is handy when -// merging multiple streams from separate threads, database shards, etc. -// -// ATTENTION: This method is broken and does not yield correct results. The -// underlying algorithm is not capable of merging streams correctly. -func (s *Stream) Merge(samples Samples) { - sort.Sort(samples) - s.stream.merge(samples) -} - -// Reset reinitializes and clears the list reusing the samples buffer memory. -func (s *Stream) Reset() { - s.stream.reset() - s.b = s.b[:0] -} - -// Samples returns stream samples held by s. -func (s *Stream) Samples() Samples { - if !s.flushed() { - return s.b - } - s.flush() - return s.stream.samples() -} - -// Count returns the total number of samples observed in the stream -// since initialization. -func (s *Stream) Count() int { - return len(s.b) + s.stream.count() -} - -func (s *Stream) flush() { - s.maybeSort() - s.stream.merge(s.b) - s.b = s.b[:0] -} - -func (s *Stream) maybeSort() { - if !s.sorted { - s.sorted = true - sort.Sort(s.b) - } -} - -func (s *Stream) flushed() bool { - return len(s.stream.l) > 0 -} - -type stream struct { - n float64 - l []Sample - ƒ invariant -} - -func (s *stream) reset() { - s.l = s.l[:0] - s.n = 0 -} - -func (s *stream) insert(v float64) { - s.merge(Samples{{v, 1, 0}}) -} - -func (s *stream) merge(samples Samples) { - // TODO(beorn7): This tries to merge not only individual samples, but - // whole summaries. The paper doesn't mention merging summaries at - // all. Unittests show that the merging is inaccurate. Find out how to - // do merges properly. - var r float64 - i := 0 - for _, sample := range samples { - for ; i < len(s.l); i++ { - c := s.l[i] - if c.Value > sample.Value { - // Insert at position i. - s.l = append(s.l, Sample{}) - copy(s.l[i+1:], s.l[i:]) - s.l[i] = Sample{ - sample.Value, - sample.Width, - math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), - // TODO(beorn7): How to calculate delta correctly? - } - i++ - goto inserted - } - r += c.Width - } - s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) - i++ - inserted: - s.n += sample.Width - r += sample.Width - } - s.compress() -} - -func (s *stream) count() int { - return int(s.n) -} - -func (s *stream) query(q float64) float64 { - t := math.Ceil(q * s.n) - t += math.Ceil(s.ƒ(s, t) / 2) - p := s.l[0] - var r float64 - for _, c := range s.l[1:] { - r += p.Width - if r+c.Width+c.Delta > t { - return p.Value - } - p = c - } - return p.Value -} - -func (s *stream) compress() { - if len(s.l) < 2 { - return - } - x := s.l[len(s.l)-1] - xi := len(s.l) - 1 - r := s.n - 1 - x.Width - - for i := len(s.l) - 2; i >= 0; i-- { - c := s.l[i] - if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { - x.Width += c.Width - s.l[xi] = x - // Remove element at i. - copy(s.l[i:], s.l[i+1:]) - s.l = s.l[:len(s.l)-1] - xi -= 1 - } else { - x = c - xi = i - } - r -= c.Width - } -} - -func (s *stream) samples() Samples { - samples := make(Samples, len(s.l)) - copy(samples, s.l) - return samples -} diff --git a/ui/wasm/vendor/github.com/cespare/xxhash/v2/.travis.yml b/ui/wasm/vendor/github.com/cespare/xxhash/v2/.travis.yml deleted file mode 100644 index c516ea88da7..00000000000 --- a/ui/wasm/vendor/github.com/cespare/xxhash/v2/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go -go: - - "1.x" - - master -env: - - TAGS="" - - TAGS="-tags purego" -script: go test $TAGS -v ./... diff --git a/ui/wasm/vendor/github.com/cespare/xxhash/v2/LICENSE.txt b/ui/wasm/vendor/github.com/cespare/xxhash/v2/LICENSE.txt deleted file mode 100644 index 24b53065f40..00000000000 --- a/ui/wasm/vendor/github.com/cespare/xxhash/v2/LICENSE.txt +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2016 Caleb Spare - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/ui/wasm/vendor/github.com/cespare/xxhash/v2/README.md b/ui/wasm/vendor/github.com/cespare/xxhash/v2/README.md deleted file mode 100644 index 2fd8693c21b..00000000000 --- a/ui/wasm/vendor/github.com/cespare/xxhash/v2/README.md +++ /dev/null @@ -1,67 +0,0 @@ -# xxhash - -[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash) -[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash) - -xxhash is a Go implementation of the 64-bit -[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a -high-quality hashing algorithm that is much faster than anything in the Go -standard library. - -This package provides a straightforward API: - -``` -func Sum64(b []byte) uint64 -func Sum64String(s string) uint64 -type Digest struct{ ... } - func New() *Digest -``` - -The `Digest` type implements hash.Hash64. Its key methods are: - -``` -func (*Digest) Write([]byte) (int, error) -func (*Digest) WriteString(string) (int, error) -func (*Digest) Sum64() uint64 -``` - -This implementation provides a fast pure-Go implementation and an even faster -assembly implementation for amd64. - -## Compatibility - -This package is in a module and the latest code is in version 2 of the module. -You need a version of Go with at least "minimal module compatibility" to use -github.com/cespare/xxhash/v2: - -* 1.9.7+ for Go 1.9 -* 1.10.3+ for Go 1.10 -* Go 1.11 or later - -I recommend using the latest release of Go. - -## Benchmarks - -Here are some quick benchmarks comparing the pure-Go and assembly -implementations of Sum64. - -| input size | purego | asm | -| --- | --- | --- | -| 5 B | 979.66 MB/s | 1291.17 MB/s | -| 100 B | 7475.26 MB/s | 7973.40 MB/s | -| 4 KB | 17573.46 MB/s | 17602.65 MB/s | -| 10 MB | 17131.46 MB/s | 17142.16 MB/s | - -These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using -the following commands under Go 1.11.2: - -``` -$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' -$ go test -benchtime 10s -bench '/xxhash,direct,bytes' -``` - -## Projects using this package - -- [InfluxDB](https://github.com/influxdata/influxdb) -- [Prometheus](https://github.com/prometheus/prometheus) -- [FreeCache](https://github.com/coocood/freecache) diff --git a/ui/wasm/vendor/github.com/cespare/xxhash/v2/xxhash.go b/ui/wasm/vendor/github.com/cespare/xxhash/v2/xxhash.go deleted file mode 100644 index db0b35fbe39..00000000000 --- a/ui/wasm/vendor/github.com/cespare/xxhash/v2/xxhash.go +++ /dev/null @@ -1,236 +0,0 @@ -// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described -// at http://cyan4973.github.io/xxHash/. -package xxhash - -import ( - "encoding/binary" - "errors" - "math/bits" -) - -const ( - prime1 uint64 = 11400714785074694791 - prime2 uint64 = 14029467366897019727 - prime3 uint64 = 1609587929392839161 - prime4 uint64 = 9650029242287828579 - prime5 uint64 = 2870177450012600261 -) - -// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where -// possible in the Go code is worth a small (but measurable) performance boost -// by avoiding some MOVQs. Vars are needed for the asm and also are useful for -// convenience in the Go code in a few places where we need to intentionally -// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the -// result overflows a uint64). -var ( - prime1v = prime1 - prime2v = prime2 - prime3v = prime3 - prime4v = prime4 - prime5v = prime5 -) - -// Digest implements hash.Hash64. -type Digest struct { - v1 uint64 - v2 uint64 - v3 uint64 - v4 uint64 - total uint64 - mem [32]byte - n int // how much of mem is used -} - -// New creates a new Digest that computes the 64-bit xxHash algorithm. -func New() *Digest { - var d Digest - d.Reset() - return &d -} - -// Reset clears the Digest's state so that it can be reused. -func (d *Digest) Reset() { - d.v1 = prime1v + prime2 - d.v2 = prime2 - d.v3 = 0 - d.v4 = -prime1v - d.total = 0 - d.n = 0 -} - -// Size always returns 8 bytes. -func (d *Digest) Size() int { return 8 } - -// BlockSize always returns 32 bytes. -func (d *Digest) BlockSize() int { return 32 } - -// Write adds more data to d. It always returns len(b), nil. -func (d *Digest) Write(b []byte) (n int, err error) { - n = len(b) - d.total += uint64(n) - - if d.n+n < 32 { - // This new data doesn't even fill the current block. - copy(d.mem[d.n:], b) - d.n += n - return - } - - if d.n > 0 { - // Finish off the partial block. - copy(d.mem[d.n:], b) - d.v1 = round(d.v1, u64(d.mem[0:8])) - d.v2 = round(d.v2, u64(d.mem[8:16])) - d.v3 = round(d.v3, u64(d.mem[16:24])) - d.v4 = round(d.v4, u64(d.mem[24:32])) - b = b[32-d.n:] - d.n = 0 - } - - if len(b) >= 32 { - // One or more full blocks left. - nw := writeBlocks(d, b) - b = b[nw:] - } - - // Store any remaining partial block. - copy(d.mem[:], b) - d.n = len(b) - - return -} - -// Sum appends the current hash to b and returns the resulting slice. -func (d *Digest) Sum(b []byte) []byte { - s := d.Sum64() - return append( - b, - byte(s>>56), - byte(s>>48), - byte(s>>40), - byte(s>>32), - byte(s>>24), - byte(s>>16), - byte(s>>8), - byte(s), - ) -} - -// Sum64 returns the current hash. -func (d *Digest) Sum64() uint64 { - var h uint64 - - if d.total >= 32 { - v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = d.v3 + prime5 - } - - h += d.total - - i, end := 0, d.n - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(d.mem[i:i+8])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if i+4 <= end { - h ^= uint64(u32(d.mem[i:i+4])) * prime1 - h = rol23(h)*prime2 + prime3 - i += 4 - } - for i < end { - h ^= uint64(d.mem[i]) * prime5 - h = rol11(h) * prime1 - i++ - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -const ( - magic = "xxh\x06" - marshaledSize = len(magic) + 8*5 + 32 -) - -// MarshalBinary implements the encoding.BinaryMarshaler interface. -func (d *Digest) MarshalBinary() ([]byte, error) { - b := make([]byte, 0, marshaledSize) - b = append(b, magic...) - b = appendUint64(b, d.v1) - b = appendUint64(b, d.v2) - b = appendUint64(b, d.v3) - b = appendUint64(b, d.v4) - b = appendUint64(b, d.total) - b = append(b, d.mem[:d.n]...) - b = b[:len(b)+len(d.mem)-d.n] - return b, nil -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. -func (d *Digest) UnmarshalBinary(b []byte) error { - if len(b) < len(magic) || string(b[:len(magic)]) != magic { - return errors.New("xxhash: invalid hash state identifier") - } - if len(b) != marshaledSize { - return errors.New("xxhash: invalid hash state size") - } - b = b[len(magic):] - b, d.v1 = consumeUint64(b) - b, d.v2 = consumeUint64(b) - b, d.v3 = consumeUint64(b) - b, d.v4 = consumeUint64(b) - b, d.total = consumeUint64(b) - copy(d.mem[:], b) - b = b[len(d.mem):] - d.n = int(d.total % uint64(len(d.mem))) - return nil -} - -func appendUint64(b []byte, x uint64) []byte { - var a [8]byte - binary.LittleEndian.PutUint64(a[:], x) - return append(b, a[:]...) -} - -func consumeUint64(b []byte) ([]byte, uint64) { - x := u64(b) - return b[8:], x -} - -func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } -func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } - -func round(acc, input uint64) uint64 { - acc += input * prime2 - acc = rol31(acc) - acc *= prime1 - return acc -} - -func mergeRound(acc, val uint64) uint64 { - val = round(0, val) - acc ^= val - acc = acc*prime1 + prime4 - return acc -} - -func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } -func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } -func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } -func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } -func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } -func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } -func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } -func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/ui/wasm/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go b/ui/wasm/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go deleted file mode 100644 index ad14b807f4d..00000000000 --- a/ui/wasm/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !appengine -// +build gc -// +build !purego - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -// -//go:noescape -func Sum64(b []byte) uint64 - -//go:noescape -func writeBlocks(d *Digest, b []byte) int diff --git a/ui/wasm/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/ui/wasm/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s deleted file mode 100644 index d580e32aed4..00000000000 --- a/ui/wasm/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s +++ /dev/null @@ -1,215 +0,0 @@ -// +build !appengine -// +build gc -// +build !purego - -#include "textflag.h" - -// Register allocation: -// AX h -// CX pointer to advance through b -// DX n -// BX loop end -// R8 v1, k1 -// R9 v2 -// R10 v3 -// R11 v4 -// R12 tmp -// R13 prime1v -// R14 prime2v -// R15 prime4v - -// round reads from and advances the buffer pointer in CX. -// It assumes that R13 has prime1v and R14 has prime2v. -#define round(r) \ - MOVQ (CX), R12 \ - ADDQ $8, CX \ - IMULQ R14, R12 \ - ADDQ R12, r \ - ROLQ $31, r \ - IMULQ R13, r - -// mergeRound applies a merge round on the two registers acc and val. -// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v. -#define mergeRound(acc, val) \ - IMULQ R14, val \ - ROLQ $31, val \ - IMULQ R13, val \ - XORQ val, acc \ - IMULQ R13, acc \ - ADDQ R15, acc - -// func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT, $0-32 - // Load fixed primes. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - MOVQ ·prime4v(SB), R15 - - // Load slice. - MOVQ b_base+0(FP), CX - MOVQ b_len+8(FP), DX - LEAQ (CX)(DX*1), BX - - // The first loop limit will be len(b)-32. - SUBQ $32, BX - - // Check whether we have at least one block. - CMPQ DX, $32 - JLT noBlocks - - // Set up initial state (v1, v2, v3, v4). - MOVQ R13, R8 - ADDQ R14, R8 - MOVQ R14, R9 - XORQ R10, R10 - XORQ R11, R11 - SUBQ R13, R11 - - // Loop until CX > BX. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ CX, BX - JLE blockLoop - - MOVQ R8, AX - ROLQ $1, AX - MOVQ R9, R12 - ROLQ $7, R12 - ADDQ R12, AX - MOVQ R10, R12 - ROLQ $12, R12 - ADDQ R12, AX - MOVQ R11, R12 - ROLQ $18, R12 - ADDQ R12, AX - - mergeRound(AX, R8) - mergeRound(AX, R9) - mergeRound(AX, R10) - mergeRound(AX, R11) - - JMP afterBlocks - -noBlocks: - MOVQ ·prime5v(SB), AX - -afterBlocks: - ADDQ DX, AX - - // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8. - ADDQ $24, BX - - CMPQ CX, BX - JG fourByte - -wordLoop: - // Calculate k1. - MOVQ (CX), R8 - ADDQ $8, CX - IMULQ R14, R8 - ROLQ $31, R8 - IMULQ R13, R8 - - XORQ R8, AX - ROLQ $27, AX - IMULQ R13, AX - ADDQ R15, AX - - CMPQ CX, BX - JLE wordLoop - -fourByte: - ADDQ $4, BX - CMPQ CX, BX - JG singles - - MOVL (CX), R8 - ADDQ $4, CX - IMULQ R13, R8 - XORQ R8, AX - - ROLQ $23, AX - IMULQ R14, AX - ADDQ ·prime3v(SB), AX - -singles: - ADDQ $4, BX - CMPQ CX, BX - JGE finalize - -singlesLoop: - MOVBQZX (CX), R12 - ADDQ $1, CX - IMULQ ·prime5v(SB), R12 - XORQ R12, AX - - ROLQ $11, AX - IMULQ R13, AX - - CMPQ CX, BX - JL singlesLoop - -finalize: - MOVQ AX, R12 - SHRQ $33, R12 - XORQ R12, AX - IMULQ R14, AX - MOVQ AX, R12 - SHRQ $29, R12 - XORQ R12, AX - IMULQ ·prime3v(SB), AX - MOVQ AX, R12 - SHRQ $32, R12 - XORQ R12, AX - - MOVQ AX, ret+24(FP) - RET - -// writeBlocks uses the same registers as above except that it uses AX to store -// the d pointer. - -// func writeBlocks(d *Digest, b []byte) int -TEXT ·writeBlocks(SB), NOSPLIT, $0-40 - // Load fixed primes needed for round. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - - // Load slice. - MOVQ b_base+8(FP), CX - MOVQ b_len+16(FP), DX - LEAQ (CX)(DX*1), BX - SUBQ $32, BX - - // Load vN from d. - MOVQ d+0(FP), AX - MOVQ 0(AX), R8 // v1 - MOVQ 8(AX), R9 // v2 - MOVQ 16(AX), R10 // v3 - MOVQ 24(AX), R11 // v4 - - // We don't need to check the loop condition here; this function is - // always called with at least one block of data to process. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ CX, BX - JLE blockLoop - - // Copy vN back to d. - MOVQ R8, 0(AX) - MOVQ R9, 8(AX) - MOVQ R10, 16(AX) - MOVQ R11, 24(AX) - - // The number of bytes written is CX minus the old base pointer. - SUBQ b_base+8(FP), CX - MOVQ CX, ret+32(FP) - - RET diff --git a/ui/wasm/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/ui/wasm/vendor/github.com/cespare/xxhash/v2/xxhash_other.go deleted file mode 100644 index 4a5a821603e..00000000000 --- a/ui/wasm/vendor/github.com/cespare/xxhash/v2/xxhash_other.go +++ /dev/null @@ -1,76 +0,0 @@ -// +build !amd64 appengine !gc purego - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -func Sum64(b []byte) uint64 { - // A simpler version would be - // d := New() - // d.Write(b) - // return d.Sum64() - // but this is faster, particularly for small inputs. - - n := len(b) - var h uint64 - - if n >= 32 { - v1 := prime1v + prime2 - v2 := prime2 - v3 := uint64(0) - v4 := -prime1v - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = prime5 - } - - h += uint64(n) - - i, end := 0, len(b) - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(b[i:i+8:len(b)])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if i+4 <= end { - h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 - h = rol23(h)*prime2 + prime3 - i += 4 - } - for ; i < end; i++ { - h ^= uint64(b[i]) * prime5 - h = rol11(h) * prime1 - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -func writeBlocks(d *Digest, b []byte) int { - v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 - n := len(b) - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 - return n - len(b) -} diff --git a/ui/wasm/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/ui/wasm/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go deleted file mode 100644 index fc9bea7a31f..00000000000 --- a/ui/wasm/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build appengine - -// This file contains the safe implementations of otherwise unsafe-using code. - -package xxhash - -// Sum64String computes the 64-bit xxHash digest of s. -func Sum64String(s string) uint64 { - return Sum64([]byte(s)) -} - -// WriteString adds more data to d. It always returns len(s), nil. -func (d *Digest) WriteString(s string) (n int, err error) { - return d.Write([]byte(s)) -} diff --git a/ui/wasm/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/ui/wasm/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go deleted file mode 100644 index 53bf76efbc2..00000000000 --- a/ui/wasm/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go +++ /dev/null @@ -1,46 +0,0 @@ -// +build !appengine - -// This file encapsulates usage of unsafe. -// xxhash_safe.go contains the safe implementations. - -package xxhash - -import ( - "reflect" - "unsafe" -) - -// Notes: -// -// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ -// for some discussion about these unsafe conversions. -// -// In the future it's possible that compiler optimizations will make these -// unsafe operations unnecessary: https://golang.org/issue/2205. -// -// Both of these wrapper functions still incur function call overhead since they -// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write -// for strings to squeeze out a bit more speed. Mid-stack inlining should -// eventually fix this. - -// Sum64String computes the 64-bit xxHash digest of s. -// It may be faster than Sum64([]byte(s)) by avoiding a copy. -func Sum64String(s string) uint64 { - var b []byte - bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data - bh.Len = len(s) - bh.Cap = len(s) - return Sum64(b) -} - -// WriteString adds more data to d. It always returns len(s), nil. -// It may be faster than Write([]byte(s)) by avoiding a copy. -func (d *Digest) WriteString(s string) (n int, err error) { - var b []byte - bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data - bh.Len = len(s) - bh.Cap = len(s) - return d.Write(b) -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/LICENSE b/ui/wasm/vendor/github.com/containerd/containerd/LICENSE deleted file mode 100644 index 584149b6ee2..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright The containerd Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/ui/wasm/vendor/github.com/containerd/containerd/NOTICE b/ui/wasm/vendor/github.com/containerd/containerd/NOTICE deleted file mode 100644 index 8915f02773f..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/NOTICE +++ /dev/null @@ -1,16 +0,0 @@ -Docker -Copyright 2012-2015 Docker, Inc. - -This product includes software developed at Docker, Inc. (https://www.docker.com). - -The following is courtesy of our legal counsel: - - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see https://www.bis.doc.gov - -See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/ui/wasm/vendor/github.com/containerd/containerd/archive/compression/compression.go b/ui/wasm/vendor/github.com/containerd/containerd/archive/compression/compression.go deleted file mode 100644 index a883e4d58ce..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/archive/compression/compression.go +++ /dev/null @@ -1,287 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package compression - -import ( - "bufio" - "bytes" - "compress/gzip" - "context" - "fmt" - "io" - "os" - "os/exec" - "strconv" - "sync" - - "github.com/containerd/containerd/log" - "github.com/klauspost/compress/zstd" -) - -type ( - // Compression is the state represents if compressed or not. - Compression int -) - -const ( - // Uncompressed represents the uncompressed. - Uncompressed Compression = iota - // Gzip is gzip compression algorithm. - Gzip - // Zstd is zstd compression algorithm. - Zstd -) - -const disablePigzEnv = "CONTAINERD_DISABLE_PIGZ" - -var ( - initPigz sync.Once - unpigzPath string -) - -var ( - bufioReader32KPool = &sync.Pool{ - New: func() interface{} { return bufio.NewReaderSize(nil, 32*1024) }, - } -) - -// DecompressReadCloser include the stream after decompress and the compress method detected. -type DecompressReadCloser interface { - io.ReadCloser - // GetCompression returns the compress method which is used before decompressing - GetCompression() Compression -} - -type readCloserWrapper struct { - io.Reader - compression Compression - closer func() error -} - -func (r *readCloserWrapper) Close() error { - if r.closer != nil { - return r.closer() - } - return nil -} - -func (r *readCloserWrapper) GetCompression() Compression { - return r.compression -} - -type writeCloserWrapper struct { - io.Writer - closer func() error -} - -func (w *writeCloserWrapper) Close() error { - if w.closer != nil { - w.closer() - } - return nil -} - -type bufferedReader struct { - buf *bufio.Reader -} - -func newBufferedReader(r io.Reader) *bufferedReader { - buf := bufioReader32KPool.Get().(*bufio.Reader) - buf.Reset(r) - return &bufferedReader{buf} -} - -func (r *bufferedReader) Read(p []byte) (n int, err error) { - if r.buf == nil { - return 0, io.EOF - } - n, err = r.buf.Read(p) - if err == io.EOF { - r.buf.Reset(nil) - bufioReader32KPool.Put(r.buf) - r.buf = nil - } - return -} - -func (r *bufferedReader) Peek(n int) ([]byte, error) { - if r.buf == nil { - return nil, io.EOF - } - return r.buf.Peek(n) -} - -// DetectCompression detects the compression algorithm of the source. -func DetectCompression(source []byte) Compression { - for compression, m := range map[Compression][]byte{ - Gzip: {0x1F, 0x8B, 0x08}, - Zstd: {0x28, 0xb5, 0x2f, 0xfd}, - } { - if len(source) < len(m) { - // Len too short - continue - } - if bytes.Equal(m, source[:len(m)]) { - return compression - } - } - return Uncompressed -} - -// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. -func DecompressStream(archive io.Reader) (DecompressReadCloser, error) { - buf := newBufferedReader(archive) - bs, err := buf.Peek(10) - if err != nil && err != io.EOF { - // Note: we'll ignore any io.EOF error because there are some odd - // cases where the layer.tar file will be empty (zero bytes) and - // that results in an io.EOF from the Peek() call. So, in those - // cases we'll just treat it as a non-compressed stream and - // that means just create an empty layer. - // See Issue docker/docker#18170 - return nil, err - } - - switch compression := DetectCompression(bs); compression { - case Uncompressed: - return &readCloserWrapper{ - Reader: buf, - compression: compression, - }, nil - case Gzip: - ctx, cancel := context.WithCancel(context.Background()) - gzReader, err := gzipDecompress(ctx, buf) - if err != nil { - cancel() - return nil, err - } - - return &readCloserWrapper{ - Reader: gzReader, - compression: compression, - closer: func() error { - cancel() - return gzReader.Close() - }, - }, nil - case Zstd: - zstdReader, err := zstd.NewReader(buf) - if err != nil { - return nil, err - } - return &readCloserWrapper{ - Reader: zstdReader, - compression: compression, - closer: func() error { - zstdReader.Close() - return nil - }, - }, nil - - default: - return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension()) - } -} - -// CompressStream compresses the dest with specified compression algorithm. -func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { - switch compression { - case Uncompressed: - return &writeCloserWrapper{dest, nil}, nil - case Gzip: - return gzip.NewWriter(dest), nil - case Zstd: - return zstd.NewWriter(dest) - default: - return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension()) - } -} - -// Extension returns the extension of a file that uses the specified compression algorithm. -func (compression *Compression) Extension() string { - switch *compression { - case Gzip: - return "gz" - case Zstd: - return "zst" - } - return "" -} - -func gzipDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { - initPigz.Do(func() { - if unpigzPath = detectPigz(); unpigzPath != "" { - log.L.Debug("using pigz for decompression") - } - }) - - if unpigzPath == "" { - return gzip.NewReader(buf) - } - - return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) -} - -func cmdStream(cmd *exec.Cmd, in io.Reader) (io.ReadCloser, error) { - reader, writer := io.Pipe() - - cmd.Stdin = in - cmd.Stdout = writer - - var errBuf bytes.Buffer - cmd.Stderr = &errBuf - - if err := cmd.Start(); err != nil { - return nil, err - } - - go func() { - if err := cmd.Wait(); err != nil { - writer.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) - } else { - writer.Close() - } - }() - - return reader, nil -} - -func detectPigz() string { - path, err := exec.LookPath("unpigz") - if err != nil { - log.L.WithError(err).Debug("unpigz not found, falling back to go gzip") - return "" - } - - // Check if pigz disabled via CONTAINERD_DISABLE_PIGZ env variable - value := os.Getenv(disablePigzEnv) - if value == "" { - return path - } - - disable, err := strconv.ParseBool(value) - if err != nil { - log.L.WithError(err).Warnf("could not parse %s: %s", disablePigzEnv, value) - return path - } - - if disable { - return "" - } - - return path -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/content/adaptor.go b/ui/wasm/vendor/github.com/containerd/containerd/content/adaptor.go deleted file mode 100644 index 88bad2610e8..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/content/adaptor.go +++ /dev/null @@ -1,52 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package content - -import ( - "strings" - - "github.com/containerd/containerd/filters" -) - -// AdaptInfo returns `filters.Adaptor` that handles `content.Info`. -func AdaptInfo(info Info) filters.Adaptor { - return filters.AdapterFunc(func(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } - - switch fieldpath[0] { - case "digest": - return info.Digest.String(), true - case "size": - // TODO: support size based filtering - case "labels": - return checkMap(fieldpath[1:], info.Labels) - } - - return "", false - }) -} - -func checkMap(fieldpath []string, m map[string]string) (string, bool) { - if len(m) == 0 { - return "", false - } - - value, ok := m[strings.Join(fieldpath, ".")] - return value, ok -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/content/content.go b/ui/wasm/vendor/github.com/containerd/containerd/content/content.go deleted file mode 100644 index ff17a8417b3..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/content/content.go +++ /dev/null @@ -1,182 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package content - -import ( - "context" - "io" - "time" - - "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -// ReaderAt extends the standard io.ReaderAt interface with reporting of Size and io.Closer -type ReaderAt interface { - io.ReaderAt - io.Closer - Size() int64 -} - -// Provider provides a reader interface for specific content -type Provider interface { - // ReaderAt only requires desc.Digest to be set. - // Other fields in the descriptor may be used internally for resolving - // the location of the actual data. - ReaderAt(ctx context.Context, desc ocispec.Descriptor) (ReaderAt, error) -} - -// Ingester writes content -type Ingester interface { - // Some implementations require WithRef to be included in opts. - Writer(ctx context.Context, opts ...WriterOpt) (Writer, error) -} - -// Info holds content specific information -// -// TODO(stevvooe): Consider a very different name for this struct. Info is way -// to general. It also reads very weird in certain context, like pluralization. -type Info struct { - Digest digest.Digest - Size int64 - CreatedAt time.Time - UpdatedAt time.Time - Labels map[string]string -} - -// Status of a content operation -type Status struct { - Ref string - Offset int64 - Total int64 - Expected digest.Digest - StartedAt time.Time - UpdatedAt time.Time -} - -// WalkFunc defines the callback for a blob walk. -type WalkFunc func(Info) error - -// Manager provides methods for inspecting, listing and removing content. -type Manager interface { - // Info will return metadata about content available in the content store. - // - // If the content is not present, ErrNotFound will be returned. - Info(ctx context.Context, dgst digest.Digest) (Info, error) - - // Update updates mutable information related to content. - // If one or more fieldpaths are provided, only those - // fields will be updated. - // Mutable fields: - // labels.* - Update(ctx context.Context, info Info, fieldpaths ...string) (Info, error) - - // Walk will call fn for each item in the content store which - // match the provided filters. If no filters are given all - // items will be walked. - Walk(ctx context.Context, fn WalkFunc, filters ...string) error - - // Delete removes the content from the store. - Delete(ctx context.Context, dgst digest.Digest) error -} - -// IngestManager provides methods for managing ingests. -type IngestManager interface { - // Status returns the status of the provided ref. - Status(ctx context.Context, ref string) (Status, error) - - // ListStatuses returns the status of any active ingestions whose ref match the - // provided regular expression. If empty, all active ingestions will be - // returned. - ListStatuses(ctx context.Context, filters ...string) ([]Status, error) - - // Abort completely cancels the ingest operation targeted by ref. - Abort(ctx context.Context, ref string) error -} - -// Writer handles the write of content into a content store -type Writer interface { - // Close closes the writer, if the writer has not been - // committed this allows resuming or aborting. - // Calling Close on a closed writer will not error. - io.WriteCloser - - // Digest may return empty digest or panics until committed. - Digest() digest.Digest - - // Commit commits the blob (but no roll-back is guaranteed on an error). - // size and expected can be zero-value when unknown. - // Commit always closes the writer, even on error. - // ErrAlreadyExists aborts the writer. - Commit(ctx context.Context, size int64, expected digest.Digest, opts ...Opt) error - - // Status returns the current state of write - Status() (Status, error) - - // Truncate updates the size of the target blob - Truncate(size int64) error -} - -// Store combines the methods of content-oriented interfaces into a set that -// are commonly provided by complete implementations. -type Store interface { - Manager - Provider - IngestManager - Ingester -} - -// Opt is used to alter the mutable properties of content -type Opt func(*Info) error - -// WithLabels allows labels to be set on content -func WithLabels(labels map[string]string) Opt { - return func(info *Info) error { - info.Labels = labels - return nil - } -} - -// WriterOpts is internally used by WriterOpt. -type WriterOpts struct { - Ref string - Desc ocispec.Descriptor -} - -// WriterOpt is used for passing options to Ingester.Writer. -type WriterOpt func(*WriterOpts) error - -// WithDescriptor specifies an OCI descriptor. -// Writer may optionally use the descriptor internally for resolving -// the location of the actual data. -// Write does not require any field of desc to be set. -// If the data size is unknown, desc.Size should be set to 0. -// Some implementations may also accept negative values as "unknown". -func WithDescriptor(desc ocispec.Descriptor) WriterOpt { - return func(opts *WriterOpts) error { - opts.Desc = desc - return nil - } -} - -// WithRef specifies a ref string. -func WithRef(ref string) WriterOpt { - return func(opts *WriterOpts) error { - opts.Ref = ref - return nil - } -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/content/helpers.go b/ui/wasm/vendor/github.com/containerd/containerd/content/helpers.go deleted file mode 100644 index 00fae1fc80d..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/content/helpers.go +++ /dev/null @@ -1,287 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package content - -import ( - "context" - "io" - "io/ioutil" - "math/rand" - "sync" - "time" - - "github.com/containerd/containerd/errdefs" - "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -var bufPool = sync.Pool{ - New: func() interface{} { - buffer := make([]byte, 1<<20) - return &buffer - }, -} - -// NewReader returns a io.Reader from a ReaderAt -func NewReader(ra ReaderAt) io.Reader { - rd := io.NewSectionReader(ra, 0, ra.Size()) - return rd -} - -// ReadBlob retrieves the entire contents of the blob from the provider. -// -// Avoid using this for large blobs, such as layers. -func ReadBlob(ctx context.Context, provider Provider, desc ocispec.Descriptor) ([]byte, error) { - ra, err := provider.ReaderAt(ctx, desc) - if err != nil { - return nil, err - } - defer ra.Close() - - p := make([]byte, ra.Size()) - - n, err := ra.ReadAt(p, 0) - if err == io.EOF { - if int64(n) != ra.Size() { - err = io.ErrUnexpectedEOF - } else { - err = nil - } - } - return p, err -} - -// WriteBlob writes data with the expected digest into the content store. If -// expected already exists, the method returns immediately and the reader will -// not be consumed. -// -// This is useful when the digest and size are known beforehand. -// -// Copy is buffered, so no need to wrap reader in buffered io. -func WriteBlob(ctx context.Context, cs Ingester, ref string, r io.Reader, desc ocispec.Descriptor, opts ...Opt) error { - cw, err := OpenWriter(ctx, cs, WithRef(ref), WithDescriptor(desc)) - if err != nil { - if !errdefs.IsAlreadyExists(err) { - return errors.Wrap(err, "failed to open writer") - } - - return nil // all ready present - } - defer cw.Close() - - return Copy(ctx, cw, r, desc.Size, desc.Digest, opts...) -} - -// OpenWriter opens a new writer for the given reference, retrying if the writer -// is locked until the reference is available or returns an error. -func OpenWriter(ctx context.Context, cs Ingester, opts ...WriterOpt) (Writer, error) { - var ( - cw Writer - err error - retry = 16 - ) - for { - cw, err = cs.Writer(ctx, opts...) - if err != nil { - if !errdefs.IsUnavailable(err) { - return nil, err - } - - // TODO: Check status to determine if the writer is active, - // continue waiting while active, otherwise return lock - // error or abort. Requires asserting for an ingest manager - - select { - case <-time.After(time.Millisecond * time.Duration(rand.Intn(retry))): - if retry < 2048 { - retry = retry << 1 - } - continue - case <-ctx.Done(): - // Propagate lock error - return nil, err - } - - } - break - } - - return cw, err -} - -// Copy copies data with the expected digest from the reader into the -// provided content store writer. This copy commits the writer. -// -// This is useful when the digest and size are known beforehand. When -// the size or digest is unknown, these values may be empty. -// -// Copy is buffered, so no need to wrap reader in buffered io. -func Copy(ctx context.Context, cw Writer, r io.Reader, size int64, expected digest.Digest, opts ...Opt) error { - ws, err := cw.Status() - if err != nil { - return errors.Wrap(err, "failed to get status") - } - - if ws.Offset > 0 { - r, err = seekReader(r, ws.Offset, size) - if err != nil { - return errors.Wrapf(err, "unable to resume write to %v", ws.Ref) - } - } - - copied, err := copyWithBuffer(cw, r) - if err != nil { - return errors.Wrap(err, "failed to copy") - } - if size != 0 && copied < size-ws.Offset { - // Short writes would return its own error, this indicates a read failure - return errors.Wrapf(io.ErrUnexpectedEOF, "failed to read expected number of bytes") - } - - if err := cw.Commit(ctx, size, expected, opts...); err != nil { - if !errdefs.IsAlreadyExists(err) { - return errors.Wrapf(err, "failed commit on ref %q", ws.Ref) - } - } - - return nil -} - -// CopyReaderAt copies to a writer from a given reader at for the given -// number of bytes. This copy does not commit the writer. -func CopyReaderAt(cw Writer, ra ReaderAt, n int64) error { - ws, err := cw.Status() - if err != nil { - return err - } - - copied, err := copyWithBuffer(cw, io.NewSectionReader(ra, ws.Offset, n)) - if err != nil { - return errors.Wrap(err, "failed to copy") - } - if copied < n { - // Short writes would return its own error, this indicates a read failure - return errors.Wrap(io.ErrUnexpectedEOF, "failed to read expected number of bytes") - } - return nil -} - -// CopyReader copies to a writer from a given reader, returning -// the number of bytes copied. -// Note: if the writer has a non-zero offset, the total number -// of bytes read may be greater than those copied if the reader -// is not an io.Seeker. -// This copy does not commit the writer. -func CopyReader(cw Writer, r io.Reader) (int64, error) { - ws, err := cw.Status() - if err != nil { - return 0, errors.Wrap(err, "failed to get status") - } - - if ws.Offset > 0 { - r, err = seekReader(r, ws.Offset, 0) - if err != nil { - return 0, errors.Wrapf(err, "unable to resume write to %v", ws.Ref) - } - } - - return copyWithBuffer(cw, r) -} - -// seekReader attempts to seek the reader to the given offset, either by -// resolving `io.Seeker`, by detecting `io.ReaderAt`, or discarding -// up to the given offset. -func seekReader(r io.Reader, offset, size int64) (io.Reader, error) { - // attempt to resolve r as a seeker and setup the offset. - seeker, ok := r.(io.Seeker) - if ok { - nn, err := seeker.Seek(offset, io.SeekStart) - if nn != offset { - return nil, errors.Wrapf(err, "failed to seek to offset %v", offset) - } - - if err != nil { - return nil, err - } - - return r, nil - } - - // ok, let's try io.ReaderAt! - readerAt, ok := r.(io.ReaderAt) - if ok && size > offset { - sr := io.NewSectionReader(readerAt, offset, size) - return sr, nil - } - - // well then, let's just discard up to the offset - n, err := copyWithBuffer(ioutil.Discard, io.LimitReader(r, offset)) - if err != nil { - return nil, errors.Wrap(err, "failed to discard to offset") - } - if n != offset { - return nil, errors.Errorf("unable to discard to offset") - } - - return r, nil -} - -// copyWithBuffer is very similar to io.CopyBuffer https://golang.org/pkg/io/#CopyBuffer -// but instead of using Read to read from the src, we use ReadAtLeast to make sure we have -// a full buffer before we do a write operation to dst to reduce overheads associated -// with the write operations of small buffers. -func copyWithBuffer(dst io.Writer, src io.Reader) (written int64, err error) { - // If the reader has a WriteTo method, use it to do the copy. - // Avoids an allocation and a copy. - if wt, ok := src.(io.WriterTo); ok { - return wt.WriteTo(dst) - } - // Similarly, if the writer has a ReadFrom method, use it to do the copy. - if rt, ok := dst.(io.ReaderFrom); ok { - return rt.ReadFrom(src) - } - bufRef := bufPool.Get().(*[]byte) - defer bufPool.Put(bufRef) - buf := *bufRef - for { - nr, er := io.ReadAtLeast(src, buf, len(buf)) - if nr > 0 { - nw, ew := dst.Write(buf[0:nr]) - if nw > 0 { - written += int64(nw) - } - if ew != nil { - err = ew - break - } - if nr != nw { - err = io.ErrShortWrite - break - } - } - if er != nil { - // If an EOF happens after reading fewer than the requested bytes, - // ReadAtLeast returns ErrUnexpectedEOF. - if er != io.EOF && er != io.ErrUnexpectedEOF { - err = er - } - break - } - } - return -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/content/local/locks.go b/ui/wasm/vendor/github.com/containerd/containerd/content/local/locks.go deleted file mode 100644 index d1d2d564df2..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/content/local/locks.go +++ /dev/null @@ -1,56 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package local - -import ( - "sync" - "time" - - "github.com/containerd/containerd/errdefs" - "github.com/pkg/errors" -) - -// Handles locking references - -type lock struct { - since time.Time -} - -var ( - // locks lets us lock in process - locks = make(map[string]*lock) - locksMu sync.Mutex -) - -func tryLock(ref string) error { - locksMu.Lock() - defer locksMu.Unlock() - - if v, ok := locks[ref]; ok { - return errors.Wrapf(errdefs.ErrUnavailable, "ref %s locked since %s", ref, v.since) - } - - locks[ref] = &lock{time.Now()} - return nil -} - -func unlock(ref string) { - locksMu.Lock() - defer locksMu.Unlock() - - delete(locks, ref) -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/content/local/readerat.go b/ui/wasm/vendor/github.com/containerd/containerd/content/local/readerat.go deleted file mode 100644 index 5d3ae039038..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/content/local/readerat.go +++ /dev/null @@ -1,68 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package local - -import ( - "os" - - "github.com/pkg/errors" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" -) - -// readerat implements io.ReaderAt in a completely stateless manner by opening -// the referenced file for each call to ReadAt. -type sizeReaderAt struct { - size int64 - fp *os.File -} - -// OpenReader creates ReaderAt from a file -func OpenReader(p string) (content.ReaderAt, error) { - fi, err := os.Stat(p) - if err != nil { - if !os.IsNotExist(err) { - return nil, err - } - - return nil, errors.Wrap(errdefs.ErrNotFound, "blob not found") - } - - fp, err := os.Open(p) - if err != nil { - if !os.IsNotExist(err) { - return nil, err - } - - return nil, errors.Wrap(errdefs.ErrNotFound, "blob not found") - } - - return sizeReaderAt{size: fi.Size(), fp: fp}, nil -} - -func (ra sizeReaderAt) ReadAt(p []byte, offset int64) (int, error) { - return ra.fp.ReadAt(p, offset) -} - -func (ra sizeReaderAt) Size() int64 { - return ra.size -} - -func (ra sizeReaderAt) Close() error { - return ra.fp.Close() -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/content/local/store.go b/ui/wasm/vendor/github.com/containerd/containerd/content/local/store.go deleted file mode 100644 index 314d913673b..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/content/local/store.go +++ /dev/null @@ -1,701 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package local - -import ( - "context" - "fmt" - "io" - "io/ioutil" - "math/rand" - "os" - "path/filepath" - "strconv" - "strings" - "sync" - "time" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/filters" - "github.com/containerd/containerd/log" - "github.com/sirupsen/logrus" - - digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -var bufPool = sync.Pool{ - New: func() interface{} { - buffer := make([]byte, 1<<20) - return &buffer - }, -} - -// LabelStore is used to store mutable labels for digests -type LabelStore interface { - // Get returns all the labels for the given digest - Get(digest.Digest) (map[string]string, error) - - // Set sets all the labels for a given digest - Set(digest.Digest, map[string]string) error - - // Update replaces the given labels for a digest, - // a key with an empty value removes a label. - Update(digest.Digest, map[string]string) (map[string]string, error) -} - -// Store is digest-keyed store for content. All data written into the store is -// stored under a verifiable digest. -// -// Store can generally support multi-reader, single-writer ingest of data, -// including resumable ingest. -type store struct { - root string - ls LabelStore -} - -// NewStore returns a local content store -func NewStore(root string) (content.Store, error) { - return NewLabeledStore(root, nil) -} - -// NewLabeledStore returns a new content store using the provided label store -// -// Note: content stores which are used underneath a metadata store may not -// require labels and should use `NewStore`. `NewLabeledStore` is primarily -// useful for tests or standalone implementations. -func NewLabeledStore(root string, ls LabelStore) (content.Store, error) { - if err := os.MkdirAll(filepath.Join(root, "ingest"), 0777); err != nil { - return nil, err - } - - return &store{ - root: root, - ls: ls, - }, nil -} - -func (s *store) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) { - p, err := s.blobPath(dgst) - if err != nil { - return content.Info{}, errors.Wrapf(err, "calculating blob info path") - } - - fi, err := os.Stat(p) - if err != nil { - if os.IsNotExist(err) { - err = errors.Wrapf(errdefs.ErrNotFound, "content %v", dgst) - } - - return content.Info{}, err - } - var labels map[string]string - if s.ls != nil { - labels, err = s.ls.Get(dgst) - if err != nil { - return content.Info{}, err - } - } - return s.info(dgst, fi, labels), nil -} - -func (s *store) info(dgst digest.Digest, fi os.FileInfo, labels map[string]string) content.Info { - return content.Info{ - Digest: dgst, - Size: fi.Size(), - CreatedAt: fi.ModTime(), - UpdatedAt: getATime(fi), - Labels: labels, - } -} - -// ReaderAt returns an io.ReaderAt for the blob. -func (s *store) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { - p, err := s.blobPath(desc.Digest) - if err != nil { - return nil, errors.Wrapf(err, "calculating blob path for ReaderAt") - } - - reader, err := OpenReader(p) - if err != nil { - return nil, errors.Wrapf(err, "blob %s expected at %s", desc.Digest, p) - } - - return reader, nil -} - -// Delete removes a blob by its digest. -// -// While this is safe to do concurrently, safe exist-removal logic must hold -// some global lock on the store. -func (s *store) Delete(ctx context.Context, dgst digest.Digest) error { - bp, err := s.blobPath(dgst) - if err != nil { - return errors.Wrapf(err, "calculating blob path for delete") - } - - if err := os.RemoveAll(bp); err != nil { - if !os.IsNotExist(err) { - return err - } - - return errors.Wrapf(errdefs.ErrNotFound, "content %v", dgst) - } - - return nil -} - -func (s *store) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) { - if s.ls == nil { - return content.Info{}, errors.Wrapf(errdefs.ErrFailedPrecondition, "update not supported on immutable content store") - } - - p, err := s.blobPath(info.Digest) - if err != nil { - return content.Info{}, errors.Wrapf(err, "calculating blob path for update") - } - - fi, err := os.Stat(p) - if err != nil { - if os.IsNotExist(err) { - err = errors.Wrapf(errdefs.ErrNotFound, "content %v", info.Digest) - } - - return content.Info{}, err - } - - var ( - all bool - labels map[string]string - ) - if len(fieldpaths) > 0 { - for _, path := range fieldpaths { - if strings.HasPrefix(path, "labels.") { - if labels == nil { - labels = map[string]string{} - } - - key := strings.TrimPrefix(path, "labels.") - labels[key] = info.Labels[key] - continue - } - - switch path { - case "labels": - all = true - labels = info.Labels - default: - return content.Info{}, errors.Wrapf(errdefs.ErrInvalidArgument, "cannot update %q field on content info %q", path, info.Digest) - } - } - } else { - all = true - labels = info.Labels - } - - if all { - err = s.ls.Set(info.Digest, labels) - } else { - labels, err = s.ls.Update(info.Digest, labels) - } - if err != nil { - return content.Info{}, err - } - - info = s.info(info.Digest, fi, labels) - info.UpdatedAt = time.Now() - - if err := os.Chtimes(p, info.UpdatedAt, info.CreatedAt); err != nil { - log.G(ctx).WithError(err).Warnf("could not change access time for %s", info.Digest) - } - - return info, nil -} - -func (s *store) Walk(ctx context.Context, fn content.WalkFunc, fs ...string) error { - root := filepath.Join(s.root, "blobs") - - filter, err := filters.ParseAll(fs...) - if err != nil { - return err - } - - var alg digest.Algorithm - return filepath.Walk(root, func(path string, fi os.FileInfo, err error) error { - if err != nil { - return err - } - if !fi.IsDir() && !alg.Available() { - return nil - } - - // TODO(stevvooe): There are few more cases with subdirs that should be - // handled in case the layout gets corrupted. This isn't strict enough - // and may spew bad data. - - if path == root { - return nil - } - if filepath.Dir(path) == root { - alg = digest.Algorithm(filepath.Base(path)) - - if !alg.Available() { - alg = "" - return filepath.SkipDir - } - - // descending into a hash directory - return nil - } - - dgst := digest.NewDigestFromHex(alg.String(), filepath.Base(path)) - if err := dgst.Validate(); err != nil { - // log error but don't report - log.L.WithError(err).WithField("path", path).Error("invalid digest for blob path") - // if we see this, it could mean some sort of corruption of the - // store or extra paths not expected previously. - } - - var labels map[string]string - if s.ls != nil { - labels, err = s.ls.Get(dgst) - if err != nil { - return err - } - } - - info := s.info(dgst, fi, labels) - if !filter.Match(content.AdaptInfo(info)) { - return nil - } - return fn(info) - }) -} - -func (s *store) Status(ctx context.Context, ref string) (content.Status, error) { - return s.status(s.ingestRoot(ref)) -} - -func (s *store) ListStatuses(ctx context.Context, fs ...string) ([]content.Status, error) { - fp, err := os.Open(filepath.Join(s.root, "ingest")) - if err != nil { - return nil, err - } - - defer fp.Close() - - fis, err := fp.Readdir(-1) - if err != nil { - return nil, err - } - - filter, err := filters.ParseAll(fs...) - if err != nil { - return nil, err - } - - var active []content.Status - for _, fi := range fis { - p := filepath.Join(s.root, "ingest", fi.Name()) - stat, err := s.status(p) - if err != nil { - if !os.IsNotExist(err) { - return nil, err - } - - // TODO(stevvooe): This is a common error if uploads are being - // completed while making this listing. Need to consider taking a - // lock on the whole store to coordinate this aspect. - // - // Another option is to cleanup downloads asynchronously and - // coordinate this method with the cleanup process. - // - // For now, we just skip them, as they really don't exist. - continue - } - - if filter.Match(adaptStatus(stat)) { - active = append(active, stat) - } - } - - return active, nil -} - -// WalkStatusRefs is used to walk all status references -// Failed status reads will be logged and ignored, if -// this function is called while references are being altered, -// these error messages may be produced. -func (s *store) WalkStatusRefs(ctx context.Context, fn func(string) error) error { - fp, err := os.Open(filepath.Join(s.root, "ingest")) - if err != nil { - return err - } - - defer fp.Close() - - fis, err := fp.Readdir(-1) - if err != nil { - return err - } - - for _, fi := range fis { - rf := filepath.Join(s.root, "ingest", fi.Name(), "ref") - - ref, err := readFileString(rf) - if err != nil { - log.G(ctx).WithError(err).WithField("path", rf).Error("failed to read ingest ref") - continue - } - - if err := fn(ref); err != nil { - return err - } - } - - return nil -} - -// status works like stat above except uses the path to the ingest. -func (s *store) status(ingestPath string) (content.Status, error) { - dp := filepath.Join(ingestPath, "data") - fi, err := os.Stat(dp) - if err != nil { - if os.IsNotExist(err) { - err = errors.Wrap(errdefs.ErrNotFound, err.Error()) - } - return content.Status{}, err - } - - ref, err := readFileString(filepath.Join(ingestPath, "ref")) - if err != nil { - if os.IsNotExist(err) { - err = errors.Wrap(errdefs.ErrNotFound, err.Error()) - } - return content.Status{}, err - } - - startedAt, err := readFileTimestamp(filepath.Join(ingestPath, "startedat")) - if err != nil { - return content.Status{}, errors.Wrapf(err, "could not read startedat") - } - - updatedAt, err := readFileTimestamp(filepath.Join(ingestPath, "updatedat")) - if err != nil { - return content.Status{}, errors.Wrapf(err, "could not read updatedat") - } - - // because we don't write updatedat on every write, the mod time may - // actually be more up to date. - if fi.ModTime().After(updatedAt) { - updatedAt = fi.ModTime() - } - - return content.Status{ - Ref: ref, - Offset: fi.Size(), - Total: s.total(ingestPath), - UpdatedAt: updatedAt, - StartedAt: startedAt, - }, nil -} - -func adaptStatus(status content.Status) filters.Adaptor { - return filters.AdapterFunc(func(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } - switch fieldpath[0] { - case "ref": - return status.Ref, true - } - - return "", false - }) -} - -// total attempts to resolve the total expected size for the write. -func (s *store) total(ingestPath string) int64 { - totalS, err := readFileString(filepath.Join(ingestPath, "total")) - if err != nil { - return 0 - } - - total, err := strconv.ParseInt(totalS, 10, 64) - if err != nil { - // represents a corrupted file, should probably remove. - return 0 - } - - return total -} - -// Writer begins or resumes the active writer identified by ref. If the writer -// is already in use, an error is returned. Only one writer may be in use per -// ref at a time. -// -// The argument `ref` is used to uniquely identify a long-lived writer transaction. -func (s *store) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { - var wOpts content.WriterOpts - for _, opt := range opts { - if err := opt(&wOpts); err != nil { - return nil, err - } - } - // TODO(AkihiroSuda): we could create a random string or one calculated based on the context - // https://github.com/containerd/containerd/issues/2129#issuecomment-380255019 - if wOpts.Ref == "" { - return nil, errors.Wrap(errdefs.ErrInvalidArgument, "ref must not be empty") - } - var lockErr error - for count := uint64(0); count < 10; count++ { - if err := tryLock(wOpts.Ref); err != nil { - if !errdefs.IsUnavailable(err) { - return nil, err - } - - lockErr = err - } else { - lockErr = nil - break - } - time.Sleep(time.Millisecond * time.Duration(rand.Intn(1< 0 && status.Total > 0 && total != status.Total { - return status, errors.Errorf("provided total differs from status: %v != %v", total, status.Total) - } - - // TODO(stevvooe): slow slow slow!!, send to goroutine or use resumable hashes - fp, err := os.Open(data) - if err != nil { - return status, err - } - - p := bufPool.Get().(*[]byte) - status.Offset, err = io.CopyBuffer(digester.Hash(), fp, *p) - bufPool.Put(p) - fp.Close() - return status, err -} - -// writer provides the main implementation of the Writer method. The caller -// must hold the lock correctly and release on error if there is a problem. -func (s *store) writer(ctx context.Context, ref string, total int64, expected digest.Digest) (content.Writer, error) { - // TODO(stevvooe): Need to actually store expected here. We have - // code in the service that shouldn't be dealing with this. - if expected != "" { - p, err := s.blobPath(expected) - if err != nil { - return nil, errors.Wrap(err, "calculating expected blob path for writer") - } - if _, err := os.Stat(p); err == nil { - return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v", expected) - } - } - - path, refp, data := s.ingestPaths(ref) - - var ( - digester = digest.Canonical.Digester() - offset int64 - startedAt time.Time - updatedAt time.Time - ) - - foundValidIngest := false - // ensure that the ingest path has been created. - if err := os.Mkdir(path, 0755); err != nil { - if !os.IsExist(err) { - return nil, err - } - status, err := s.resumeStatus(ref, total, digester) - if err == nil { - foundValidIngest = true - updatedAt = status.UpdatedAt - startedAt = status.StartedAt - total = status.Total - offset = status.Offset - } else { - logrus.Infof("failed to resume the status from path %s: %s. will recreate them", path, err.Error()) - } - } - - if !foundValidIngest { - startedAt = time.Now() - updatedAt = startedAt - - // the ingest is new, we need to setup the target location. - // write the ref to a file for later use - if err := ioutil.WriteFile(refp, []byte(ref), 0666); err != nil { - return nil, err - } - - if err := writeTimestampFile(filepath.Join(path, "startedat"), startedAt); err != nil { - return nil, err - } - - if err := writeTimestampFile(filepath.Join(path, "updatedat"), startedAt); err != nil { - return nil, err - } - - if total > 0 { - if err := ioutil.WriteFile(filepath.Join(path, "total"), []byte(fmt.Sprint(total)), 0666); err != nil { - return nil, err - } - } - } - - fp, err := os.OpenFile(data, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - return nil, errors.Wrap(err, "failed to open data file") - } - - if _, err := fp.Seek(offset, io.SeekStart); err != nil { - return nil, errors.Wrap(err, "could not seek to current write offset") - } - - return &writer{ - s: s, - fp: fp, - ref: ref, - path: path, - offset: offset, - total: total, - digester: digester, - startedAt: startedAt, - updatedAt: updatedAt, - }, nil -} - -// Abort an active transaction keyed by ref. If the ingest is active, it will -// be cancelled. Any resources associated with the ingest will be cleaned. -func (s *store) Abort(ctx context.Context, ref string) error { - root := s.ingestRoot(ref) - if err := os.RemoveAll(root); err != nil { - if os.IsNotExist(err) { - return errors.Wrapf(errdefs.ErrNotFound, "ingest ref %q", ref) - } - - return err - } - - return nil -} - -func (s *store) blobPath(dgst digest.Digest) (string, error) { - if err := dgst.Validate(); err != nil { - return "", errors.Wrapf(errdefs.ErrInvalidArgument, "cannot calculate blob path from invalid digest: %v", err) - } - - return filepath.Join(s.root, "blobs", dgst.Algorithm().String(), dgst.Hex()), nil -} - -func (s *store) ingestRoot(ref string) string { - // we take a digest of the ref to keep the ingest paths constant length. - // Note that this is not the current or potential digest of incoming content. - dgst := digest.FromString(ref) - return filepath.Join(s.root, "ingest", dgst.Hex()) -} - -// ingestPaths are returned. The paths are the following: -// -// - root: entire ingest directory -// - ref: name of the starting ref, must be unique -// - data: file where data is written -// -func (s *store) ingestPaths(ref string) (string, string, string) { - var ( - fp = s.ingestRoot(ref) - rp = filepath.Join(fp, "ref") - dp = filepath.Join(fp, "data") - ) - - return fp, rp, dp -} - -func readFileString(path string) (string, error) { - p, err := ioutil.ReadFile(path) - return string(p), err -} - -// readFileTimestamp reads a file with just a timestamp present. -func readFileTimestamp(p string) (time.Time, error) { - b, err := ioutil.ReadFile(p) - if err != nil { - if os.IsNotExist(err) { - err = errors.Wrap(errdefs.ErrNotFound, err.Error()) - } - return time.Time{}, err - } - - var t time.Time - if err := t.UnmarshalText(b); err != nil { - return time.Time{}, errors.Wrapf(err, "could not parse timestamp file %v", p) - } - - return t, nil -} - -func writeTimestampFile(p string, t time.Time) error { - b, err := t.MarshalText() - if err != nil { - return err - } - return atomicWrite(p, b, 0666) -} - -func atomicWrite(path string, data []byte, mode os.FileMode) error { - tmp := fmt.Sprintf("%s.tmp", path) - f, err := os.OpenFile(tmp, os.O_RDWR|os.O_CREATE|os.O_TRUNC|os.O_SYNC, mode) - if err != nil { - return errors.Wrap(err, "create tmp file") - } - _, err = f.Write(data) - f.Close() - if err != nil { - return errors.Wrap(err, "write atomic data") - } - return os.Rename(tmp, path) -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/content/local/store_bsd.go b/ui/wasm/vendor/github.com/containerd/containerd/content/local/store_bsd.go deleted file mode 100644 index da149a2fdae..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/content/local/store_bsd.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build darwin freebsd netbsd - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package local - -import ( - "os" - "syscall" - "time" -) - -func getATime(fi os.FileInfo) time.Time { - if st, ok := fi.Sys().(*syscall.Stat_t); ok { - return time.Unix(int64(st.Atimespec.Sec), int64(st.Atimespec.Nsec)) //nolint: unconvert // int64 conversions ensure the line compiles for 32-bit systems as well. - } - - return fi.ModTime() -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/content/local/store_openbsd.go b/ui/wasm/vendor/github.com/containerd/containerd/content/local/store_openbsd.go deleted file mode 100644 index f34f0dad2b9..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/content/local/store_openbsd.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build openbsd - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package local - -import ( - "os" - "syscall" - "time" -) - -func getATime(fi os.FileInfo) time.Time { - if st, ok := fi.Sys().(*syscall.Stat_t); ok { - return time.Unix(int64(st.Atim.Sec), int64(st.Atim.Nsec)) //nolint: unconvert // int64 conversions ensure the line compiles for 32-bit systems as well. - } - - return fi.ModTime() -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/content/local/store_unix.go b/ui/wasm/vendor/github.com/containerd/containerd/content/local/store_unix.go deleted file mode 100644 index 69a74bab0ea..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/content/local/store_unix.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build linux solaris - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package local - -import ( - "os" - "syscall" - "time" -) - -func getATime(fi os.FileInfo) time.Time { - if st, ok := fi.Sys().(*syscall.Stat_t); ok { - return time.Unix(int64(st.Atim.Sec), int64(st.Atim.Nsec)) //nolint: unconvert // int64 conversions ensure the line compiles for 32-bit systems as well. - } - - return fi.ModTime() -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/content/local/store_windows.go b/ui/wasm/vendor/github.com/containerd/containerd/content/local/store_windows.go deleted file mode 100644 index bce84997909..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/content/local/store_windows.go +++ /dev/null @@ -1,26 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package local - -import ( - "os" - "time" -) - -func getATime(fi os.FileInfo) time.Time { - return fi.ModTime() -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/content/local/writer.go b/ui/wasm/vendor/github.com/containerd/containerd/content/local/writer.go deleted file mode 100644 index 0a11f4d912e..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/content/local/writer.go +++ /dev/null @@ -1,207 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package local - -import ( - "context" - "io" - "os" - "path/filepath" - "runtime" - "time" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/log" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -// writer represents a write transaction against the blob store. -type writer struct { - s *store - fp *os.File // opened data file - path string // path to writer dir - ref string // ref key - offset int64 - total int64 - digester digest.Digester - startedAt time.Time - updatedAt time.Time -} - -func (w *writer) Status() (content.Status, error) { - return content.Status{ - Ref: w.ref, - Offset: w.offset, - Total: w.total, - StartedAt: w.startedAt, - UpdatedAt: w.updatedAt, - }, nil -} - -// Digest returns the current digest of the content, up to the current write. -// -// Cannot be called concurrently with `Write`. -func (w *writer) Digest() digest.Digest { - return w.digester.Digest() -} - -// Write p to the transaction. -// -// Note that writes are unbuffered to the backing file. When writing, it is -// recommended to wrap in a bufio.Writer or, preferably, use io.CopyBuffer. -func (w *writer) Write(p []byte) (n int, err error) { - n, err = w.fp.Write(p) - w.digester.Hash().Write(p[:n]) - w.offset += int64(len(p)) - w.updatedAt = time.Now() - return n, err -} - -func (w *writer) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { - // Ensure even on error the writer is fully closed - defer unlock(w.ref) - - var base content.Info - for _, opt := range opts { - if err := opt(&base); err != nil { - return err - } - } - - fp := w.fp - w.fp = nil - - if fp == nil { - return errors.Wrap(errdefs.ErrFailedPrecondition, "cannot commit on closed writer") - } - - if err := fp.Sync(); err != nil { - fp.Close() - return errors.Wrap(err, "sync failed") - } - - fi, err := fp.Stat() - closeErr := fp.Close() - if err != nil { - return errors.Wrap(err, "stat on ingest file failed") - } - if closeErr != nil { - return errors.Wrap(err, "failed to close ingest file") - } - - if size > 0 && size != fi.Size() { - return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit size %d, expected %d", fi.Size(), size) - } - - dgst := w.digester.Digest() - if expected != "" && expected != dgst { - return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit digest %s, expected %s", dgst, expected) - } - - var ( - ingest = filepath.Join(w.path, "data") - target, _ = w.s.blobPath(dgst) // ignore error because we calculated this dgst - ) - - // make sure parent directories of blob exist - if err := os.MkdirAll(filepath.Dir(target), 0755); err != nil { - return err - } - - if _, err := os.Stat(target); err == nil { - // collision with the target file! - if err := os.RemoveAll(w.path); err != nil { - log.G(ctx).WithField("ref", w.ref).WithField("path", w.path).Errorf("failed to remove ingest directory") - } - return errors.Wrapf(errdefs.ErrAlreadyExists, "content %v", dgst) - } - - if err := os.Rename(ingest, target); err != nil { - return err - } - - // Ingest has now been made available in the content store, attempt to complete - // setting metadata but errors should only be logged and not returned since - // the content store cannot be cleanly rolled back. - - commitTime := time.Now() - if err := os.Chtimes(target, commitTime, commitTime); err != nil { - log.G(ctx).WithField("digest", dgst).Errorf("failed to change file time to commit time") - } - - // clean up!! - if err := os.RemoveAll(w.path); err != nil { - log.G(ctx).WithField("ref", w.ref).WithField("path", w.path).Errorf("failed to remove ingest directory") - } - - if w.s.ls != nil && base.Labels != nil { - if err := w.s.ls.Set(dgst, base.Labels); err != nil { - log.G(ctx).WithField("digest", dgst).Errorf("failed to set labels") - } - } - - // change to readonly, more important for read, but provides _some_ - // protection from this point on. We use the existing perms with a mask - // only allowing reads honoring the umask on creation. - // - // This removes write and exec, only allowing read per the creation umask. - // - // NOTE: Windows does not support this operation - if runtime.GOOS != "windows" { - if err := os.Chmod(target, (fi.Mode()&os.ModePerm)&^0333); err != nil { - log.G(ctx).WithField("ref", w.ref).Errorf("failed to make readonly") - } - } - - return nil -} - -// Close the writer, flushing any unwritten data and leaving the progress in -// tact. -// -// If one needs to resume the transaction, a new writer can be obtained from -// `Ingester.Writer` using the same key. The write can then be continued -// from it was left off. -// -// To abandon a transaction completely, first call close then `IngestManager.Abort` to -// clean up the associated resources. -func (w *writer) Close() (err error) { - if w.fp != nil { - w.fp.Sync() - err = w.fp.Close() - writeTimestampFile(filepath.Join(w.path, "updatedat"), w.updatedAt) - w.fp = nil - unlock(w.ref) - return - } - - return nil -} - -func (w *writer) Truncate(size int64) error { - if size != 0 { - return errors.New("Truncate: unsupported size") - } - w.offset = 0 - w.digester.Hash().Reset() - if _, err := w.fp.Seek(0, io.SeekStart); err != nil { - return err - } - return w.fp.Truncate(0) -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/errdefs/errors.go b/ui/wasm/vendor/github.com/containerd/containerd/errdefs/errors.go deleted file mode 100644 index 05a35228ca4..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/errdefs/errors.go +++ /dev/null @@ -1,93 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -// Package errdefs defines the common errors used throughout containerd -// packages. -// -// Use with errors.Wrap and error.Wrapf to add context to an error. -// -// To detect an error class, use the IsXXX functions to tell whether an error -// is of a certain type. -// -// The functions ToGRPC and FromGRPC can be used to map server-side and -// client-side errors to the correct types. -package errdefs - -import ( - "context" - - "github.com/pkg/errors" -) - -// Definitions of common error types used throughout containerd. All containerd -// errors returned by most packages will map into one of these errors classes. -// Packages should return errors of these types when they want to instruct a -// client to take a particular action. -// -// For the most part, we just try to provide local grpc errors. Most conditions -// map very well to those defined by grpc. -var ( - ErrUnknown = errors.New("unknown") // used internally to represent a missed mapping. - ErrInvalidArgument = errors.New("invalid argument") - ErrNotFound = errors.New("not found") - ErrAlreadyExists = errors.New("already exists") - ErrFailedPrecondition = errors.New("failed precondition") - ErrUnavailable = errors.New("unavailable") - ErrNotImplemented = errors.New("not implemented") // represents not supported and unimplemented -) - -// IsInvalidArgument returns true if the error is due to an invalid argument -func IsInvalidArgument(err error) bool { - return errors.Is(err, ErrInvalidArgument) -} - -// IsNotFound returns true if the error is due to a missing object -func IsNotFound(err error) bool { - return errors.Is(err, ErrNotFound) -} - -// IsAlreadyExists returns true if the error is due to an already existing -// metadata item -func IsAlreadyExists(err error) bool { - return errors.Is(err, ErrAlreadyExists) -} - -// IsFailedPrecondition returns true if an operation could not proceed to the -// lack of a particular condition -func IsFailedPrecondition(err error) bool { - return errors.Is(err, ErrFailedPrecondition) -} - -// IsUnavailable returns true if the error is due to a resource being unavailable -func IsUnavailable(err error) bool { - return errors.Is(err, ErrUnavailable) -} - -// IsNotImplemented returns true if the error is due to not being implemented -func IsNotImplemented(err error) bool { - return errors.Is(err, ErrNotImplemented) -} - -// IsCanceled returns true if the error is due to `context.Canceled`. -func IsCanceled(err error) bool { - return errors.Is(err, context.Canceled) -} - -// IsDeadlineExceeded returns true if the error is due to -// `context.DeadlineExceeded`. -func IsDeadlineExceeded(err error) bool { - return errors.Is(err, context.DeadlineExceeded) -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/errdefs/grpc.go b/ui/wasm/vendor/github.com/containerd/containerd/errdefs/grpc.go deleted file mode 100644 index 209f63bd0fc..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/errdefs/grpc.go +++ /dev/null @@ -1,147 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package errdefs - -import ( - "context" - "strings" - - "github.com/pkg/errors" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// ToGRPC will attempt to map the backend containerd error into a grpc error, -// using the original error message as a description. -// -// Further information may be extracted from certain errors depending on their -// type. -// -// If the error is unmapped, the original error will be returned to be handled -// by the regular grpc error handling stack. -func ToGRPC(err error) error { - if err == nil { - return nil - } - - if isGRPCError(err) { - // error has already been mapped to grpc - return err - } - - switch { - case IsInvalidArgument(err): - return status.Errorf(codes.InvalidArgument, err.Error()) - case IsNotFound(err): - return status.Errorf(codes.NotFound, err.Error()) - case IsAlreadyExists(err): - return status.Errorf(codes.AlreadyExists, err.Error()) - case IsFailedPrecondition(err): - return status.Errorf(codes.FailedPrecondition, err.Error()) - case IsUnavailable(err): - return status.Errorf(codes.Unavailable, err.Error()) - case IsNotImplemented(err): - return status.Errorf(codes.Unimplemented, err.Error()) - case IsCanceled(err): - return status.Errorf(codes.Canceled, err.Error()) - case IsDeadlineExceeded(err): - return status.Errorf(codes.DeadlineExceeded, err.Error()) - } - - return err -} - -// ToGRPCf maps the error to grpc error codes, assembling the formatting string -// and combining it with the target error string. -// -// This is equivalent to errors.ToGRPC(errors.Wrapf(err, format, args...)) -func ToGRPCf(err error, format string, args ...interface{}) error { - return ToGRPC(errors.Wrapf(err, format, args...)) -} - -// FromGRPC returns the underlying error from a grpc service based on the grpc error code -func FromGRPC(err error) error { - if err == nil { - return nil - } - - var cls error // divide these into error classes, becomes the cause - - switch code(err) { - case codes.InvalidArgument: - cls = ErrInvalidArgument - case codes.AlreadyExists: - cls = ErrAlreadyExists - case codes.NotFound: - cls = ErrNotFound - case codes.Unavailable: - cls = ErrUnavailable - case codes.FailedPrecondition: - cls = ErrFailedPrecondition - case codes.Unimplemented: - cls = ErrNotImplemented - case codes.Canceled: - cls = context.Canceled - case codes.DeadlineExceeded: - cls = context.DeadlineExceeded - default: - cls = ErrUnknown - } - - msg := rebaseMessage(cls, err) - if msg != "" { - err = errors.Wrap(cls, msg) - } else { - err = errors.WithStack(cls) - } - - return err -} - -// rebaseMessage removes the repeats for an error at the end of an error -// string. This will happen when taking an error over grpc then remapping it. -// -// Effectively, we just remove the string of cls from the end of err if it -// appears there. -func rebaseMessage(cls error, err error) string { - desc := errDesc(err) - clss := cls.Error() - if desc == clss { - return "" - } - - return strings.TrimSuffix(desc, ": "+clss) -} - -func isGRPCError(err error) bool { - _, ok := status.FromError(err) - return ok -} - -func code(err error) codes.Code { - if s, ok := status.FromError(err); ok { - return s.Code() - } - return codes.Unknown -} - -func errDesc(err error) string { - if s, ok := status.FromError(err); ok { - return s.Message() - } - return err.Error() -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/filters/adaptor.go b/ui/wasm/vendor/github.com/containerd/containerd/filters/adaptor.go deleted file mode 100644 index 5a9c559c1e0..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/filters/adaptor.go +++ /dev/null @@ -1,33 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package filters - -// Adaptor specifies the mapping of fieldpaths to a type. For the given field -// path, the value and whether it is present should be returned. The mapping of -// the fieldpath to a field is deferred to the adaptor implementation, but -// should generally follow protobuf field path/mask semantics. -type Adaptor interface { - Field(fieldpath []string) (value string, present bool) -} - -// AdapterFunc allows implementation specific matching of fieldpaths -type AdapterFunc func(fieldpath []string) (string, bool) - -// Field returns the field name and true if it exists -func (fn AdapterFunc) Field(fieldpath []string) (string, bool) { - return fn(fieldpath) -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/filters/filter.go b/ui/wasm/vendor/github.com/containerd/containerd/filters/filter.go deleted file mode 100644 index cf09d8d9e4f..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/filters/filter.go +++ /dev/null @@ -1,179 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -// Package filters defines a syntax and parser that can be used for the -// filtration of items across the containerd API. The core is built on the -// concept of protobuf field paths, with quoting. Several operators allow the -// user to flexibly select items based on field presence, equality, inequality -// and regular expressions. Flexible adaptors support working with any type. -// -// The syntax is fairly familiar, if you've used container ecosystem -// projects. At the core, we base it on the concept of protobuf field -// paths, augmenting with the ability to quote portions of the field path -// to match arbitrary labels. These "selectors" come in the following -// syntax: -// -// ``` -// [] -// ``` -// -// A basic example is as follows: -// -// ``` -// name==foo -// ``` -// -// This would match all objects that have a field `name` with the value -// `foo`. If we only want to test if the field is present, we can omit the -// operator. This is most useful for matching labels in containerd. The -// following will match objects that have the field "labels" and have the -// label "foo" defined: -// -// ``` -// labels.foo -// ``` -// -// We also allow for quoting of parts of the field path to allow matching -// of arbitrary items: -// -// ``` -// labels."very complex label"==something -// ``` -// -// We also define `!=` and `~=` as operators. The `!=` will match all -// objects that don't match the value for a field and `~=` will compile the -// target value as a regular expression and match the field value against that. -// -// Selectors can be combined using a comma, such that the resulting -// selector will require all selectors are matched for the object to match. -// The following example will match objects that are named `foo` and have -// the label `bar`: -// -// ``` -// name==foo,labels.bar -// ``` -// -package filters - -import ( - "regexp" - - "github.com/containerd/containerd/log" -) - -// Filter matches specific resources based the provided filter -type Filter interface { - Match(adaptor Adaptor) bool -} - -// FilterFunc is a function that handles matching with an adaptor -type FilterFunc func(Adaptor) bool - -// Match matches the FilterFunc returning true if the object matches the filter -func (fn FilterFunc) Match(adaptor Adaptor) bool { - return fn(adaptor) -} - -// Always is a filter that always returns true for any type of object -var Always FilterFunc = func(adaptor Adaptor) bool { - return true -} - -// Any allows multiple filters to be matched against the object -type Any []Filter - -// Match returns true if any of the provided filters are true -func (m Any) Match(adaptor Adaptor) bool { - for _, m := range m { - if m.Match(adaptor) { - return true - } - } - - return false -} - -// All allows multiple filters to be matched against the object -type All []Filter - -// Match only returns true if all filters match the object -func (m All) Match(adaptor Adaptor) bool { - for _, m := range m { - if !m.Match(adaptor) { - return false - } - } - - return true -} - -type operator int - -const ( - operatorPresent = iota - operatorEqual - operatorNotEqual - operatorMatches -) - -func (op operator) String() string { - switch op { - case operatorPresent: - return "?" - case operatorEqual: - return "==" - case operatorNotEqual: - return "!=" - case operatorMatches: - return "~=" - } - - return "unknown" -} - -type selector struct { - fieldpath []string - operator operator - value string - re *regexp.Regexp -} - -func (m selector) Match(adaptor Adaptor) bool { - value, present := adaptor.Field(m.fieldpath) - - switch m.operator { - case operatorPresent: - return present - case operatorEqual: - return present && value == m.value - case operatorNotEqual: - return value != m.value - case operatorMatches: - if m.re == nil { - r, err := regexp.Compile(m.value) - if err != nil { - log.L.Errorf("error compiling regexp %q", m.value) - return false - } - - m.re = r - } - - return m.re.MatchString(value) - default: - return false - } -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/filters/parser.go b/ui/wasm/vendor/github.com/containerd/containerd/filters/parser.go deleted file mode 100644 index 0825d668caf..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/filters/parser.go +++ /dev/null @@ -1,292 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package filters - -import ( - "fmt" - "io" - - "github.com/containerd/containerd/errdefs" - "github.com/pkg/errors" -) - -/* -Parse the strings into a filter that may be used with an adaptor. - -The filter is made up of zero or more selectors. - -The format is a comma separated list of expressions, in the form of -``, known as selectors. All selectors must match the -target object for the filter to be true. - -We define the operators "==" for equality, "!=" for not equal and "~=" for a -regular expression. If the operator and value are not present, the matcher will -test for the presence of a value, as defined by the target object. - -The formal grammar is as follows: - -selectors := selector ("," selector)* -selector := fieldpath (operator value) -fieldpath := field ('.' field)* -field := quoted | [A-Za-z] [A-Za-z0-9_]+ -operator := "==" | "!=" | "~=" -value := quoted | [^\s,]+ -quoted := - -*/ -func Parse(s string) (Filter, error) { - // special case empty to match all - if s == "" { - return Always, nil - } - - p := parser{input: s} - return p.parse() -} - -// ParseAll parses each filter in ss and returns a filter that will return true -// if any filter matches the expression. -// -// If no filters are provided, the filter will match anything. -func ParseAll(ss ...string) (Filter, error) { - if len(ss) == 0 { - return Always, nil - } - - var fs []Filter - for _, s := range ss { - f, err := Parse(s) - if err != nil { - return nil, errors.Wrap(errdefs.ErrInvalidArgument, err.Error()) - } - - fs = append(fs, f) - } - - return Any(fs), nil -} - -type parser struct { - input string - scanner scanner -} - -func (p *parser) parse() (Filter, error) { - p.scanner.init(p.input) - - ss, err := p.selectors() - if err != nil { - return nil, errors.Wrap(err, "filters") - } - - return ss, nil -} - -func (p *parser) selectors() (Filter, error) { - s, err := p.selector() - if err != nil { - return nil, err - } - - ss := All{s} - -loop: - for { - tok := p.scanner.peek() - switch tok { - case ',': - pos, tok, _ := p.scanner.scan() - if tok != tokenSeparator { - return nil, p.mkerr(pos, "expected a separator") - } - - s, err := p.selector() - if err != nil { - return nil, err - } - - ss = append(ss, s) - case tokenEOF: - break loop - default: - return nil, p.mkerr(p.scanner.ppos, "unexpected input: %v", string(tok)) - } - } - - return ss, nil -} - -func (p *parser) selector() (selector, error) { - fieldpath, err := p.fieldpath() - if err != nil { - return selector{}, err - } - - switch p.scanner.peek() { - case ',', tokenSeparator, tokenEOF: - return selector{ - fieldpath: fieldpath, - operator: operatorPresent, - }, nil - } - - op, err := p.operator() - if err != nil { - return selector{}, err - } - - var allowAltQuotes bool - if op == operatorMatches { - allowAltQuotes = true - } - - value, err := p.value(allowAltQuotes) - if err != nil { - if err == io.EOF { - return selector{}, io.ErrUnexpectedEOF - } - return selector{}, err - } - - return selector{ - fieldpath: fieldpath, - value: value, - operator: op, - }, nil -} - -func (p *parser) fieldpath() ([]string, error) { - f, err := p.field() - if err != nil { - return nil, err - } - - fs := []string{f} -loop: - for { - tok := p.scanner.peek() // lookahead to consume field separator - - switch tok { - case '.': - pos, tok, _ := p.scanner.scan() // consume separator - if tok != tokenSeparator { - return nil, p.mkerr(pos, "expected a field separator (`.`)") - } - - f, err := p.field() - if err != nil { - return nil, err - } - - fs = append(fs, f) - default: - // let the layer above handle the other bad cases. - break loop - } - } - - return fs, nil -} - -func (p *parser) field() (string, error) { - pos, tok, s := p.scanner.scan() - switch tok { - case tokenField: - return s, nil - case tokenQuoted: - return p.unquote(pos, s, false) - case tokenIllegal: - return "", p.mkerr(pos, p.scanner.err) - } - - return "", p.mkerr(pos, "expected field or quoted") -} - -func (p *parser) operator() (operator, error) { - pos, tok, s := p.scanner.scan() - switch tok { - case tokenOperator: - switch s { - case "==": - return operatorEqual, nil - case "!=": - return operatorNotEqual, nil - case "~=": - return operatorMatches, nil - default: - return 0, p.mkerr(pos, "unsupported operator %q", s) - } - case tokenIllegal: - return 0, p.mkerr(pos, p.scanner.err) - } - - return 0, p.mkerr(pos, `expected an operator ("=="|"!="|"~=")`) -} - -func (p *parser) value(allowAltQuotes bool) (string, error) { - pos, tok, s := p.scanner.scan() - - switch tok { - case tokenValue, tokenField: - return s, nil - case tokenQuoted: - return p.unquote(pos, s, allowAltQuotes) - case tokenIllegal: - return "", p.mkerr(pos, p.scanner.err) - } - - return "", p.mkerr(pos, "expected value or quoted") -} - -func (p *parser) unquote(pos int, s string, allowAlts bool) (string, error) { - if !allowAlts && s[0] != '\'' && s[0] != '"' { - return "", p.mkerr(pos, "invalid quote encountered") - } - - uq, err := unquote(s) - if err != nil { - return "", p.mkerr(pos, "unquoting failed: %v", err) - } - - return uq, nil -} - -type parseError struct { - input string - pos int - msg string -} - -func (pe parseError) Error() string { - if pe.pos < len(pe.input) { - before := pe.input[:pe.pos] - location := pe.input[pe.pos : pe.pos+1] // need to handle end - after := pe.input[pe.pos+1:] - - return fmt.Sprintf("[%s >|%s|< %s]: %v", before, location, after, pe.msg) - } - - return fmt.Sprintf("[%s]: %v", pe.input, pe.msg) -} - -func (p *parser) mkerr(pos int, format string, args ...interface{}) error { - return errors.Wrap(parseError{ - input: p.input, - pos: pos, - msg: fmt.Sprintf(format, args...), - }, "parse error") -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/filters/quote.go b/ui/wasm/vendor/github.com/containerd/containerd/filters/quote.go deleted file mode 100644 index 2d64e23a300..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/filters/quote.go +++ /dev/null @@ -1,253 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package filters - -import ( - "unicode/utf8" - - "github.com/pkg/errors" -) - -// NOTE(stevvooe): Most of this code in this file is copied from the stdlib -// strconv package and modified to be able to handle quoting with `/` and `|` -// as delimiters. The copyright is held by the Go authors. - -var errQuoteSyntax = errors.New("quote syntax error") - -// UnquoteChar decodes the first character or byte in the escaped string -// or character literal represented by the string s. -// It returns four values: -// -// 1) value, the decoded Unicode code point or byte value; -// 2) multibyte, a boolean indicating whether the decoded character requires a multibyte UTF-8 representation; -// 3) tail, the remainder of the string after the character; and -// 4) an error that will be nil if the character is syntactically valid. -// -// The second argument, quote, specifies the type of literal being parsed -// and therefore which escaped quote character is permitted. -// If set to a single quote, it permits the sequence \' and disallows unescaped '. -// If set to a double quote, it permits \" and disallows unescaped ". -// If set to zero, it does not permit either escape and allows both quote characters to appear unescaped. -// -// This is from Go strconv package, modified to support `|` and `/` as double -// quotes for use with regular expressions. -func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) { - // easy cases - switch c := s[0]; { - case c == quote && (quote == '\'' || quote == '"' || quote == '/' || quote == '|'): - err = errQuoteSyntax - return - case c >= utf8.RuneSelf: - r, size := utf8.DecodeRuneInString(s) - return r, true, s[size:], nil - case c != '\\': - return rune(s[0]), false, s[1:], nil - } - - // hard case: c is backslash - if len(s) <= 1 { - err = errQuoteSyntax - return - } - c := s[1] - s = s[2:] - - switch c { - case 'a': - value = '\a' - case 'b': - value = '\b' - case 'f': - value = '\f' - case 'n': - value = '\n' - case 'r': - value = '\r' - case 't': - value = '\t' - case 'v': - value = '\v' - case 'x', 'u', 'U': - n := 0 - switch c { - case 'x': - n = 2 - case 'u': - n = 4 - case 'U': - n = 8 - } - var v rune - if len(s) < n { - err = errQuoteSyntax - return - } - for j := 0; j < n; j++ { - x, ok := unhex(s[j]) - if !ok { - err = errQuoteSyntax - return - } - v = v<<4 | x - } - s = s[n:] - if c == 'x' { - // single-byte string, possibly not UTF-8 - value = v - break - } - if v > utf8.MaxRune { - err = errQuoteSyntax - return - } - value = v - multibyte = true - case '0', '1', '2', '3', '4', '5', '6', '7': - v := rune(c) - '0' - if len(s) < 2 { - err = errQuoteSyntax - return - } - for j := 0; j < 2; j++ { // one digit already; two more - x := rune(s[j]) - '0' - if x < 0 || x > 7 { - err = errQuoteSyntax - return - } - v = (v << 3) | x - } - s = s[2:] - if v > 255 { - err = errQuoteSyntax - return - } - value = v - case '\\': - value = '\\' - case '\'', '"', '|', '/': - if c != quote { - err = errQuoteSyntax - return - } - value = rune(c) - default: - err = errQuoteSyntax - return - } - tail = s - return -} - -// unquote interprets s as a single-quoted, double-quoted, -// or backquoted Go string literal, returning the string value -// that s quotes. (If s is single-quoted, it would be a Go -// character literal; Unquote returns the corresponding -// one-character string.) -// -// This is modified from the standard library to support `|` and `/` as quote -// characters for use with regular expressions. -func unquote(s string) (string, error) { - n := len(s) - if n < 2 { - return "", errQuoteSyntax - } - quote := s[0] - if quote != s[n-1] { - return "", errQuoteSyntax - } - s = s[1 : n-1] - - if quote == '`' { - if contains(s, '`') { - return "", errQuoteSyntax - } - if contains(s, '\r') { - // -1 because we know there is at least one \r to remove. - buf := make([]byte, 0, len(s)-1) - for i := 0; i < len(s); i++ { - if s[i] != '\r' { - buf = append(buf, s[i]) - } - } - return string(buf), nil - } - return s, nil - } - if quote != '"' && quote != '\'' && quote != '|' && quote != '/' { - return "", errQuoteSyntax - } - if contains(s, '\n') { - return "", errQuoteSyntax - } - - // Is it trivial? Avoid allocation. - if !contains(s, '\\') && !contains(s, quote) { - switch quote { - case '"', '/', '|': // pipe and slash are treated like double quote - return s, nil - case '\'': - r, size := utf8.DecodeRuneInString(s) - if size == len(s) && (r != utf8.RuneError || size != 1) { - return s, nil - } - } - } - - var runeTmp [utf8.UTFMax]byte - buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. - for len(s) > 0 { - c, multibyte, ss, err := unquoteChar(s, quote) - if err != nil { - return "", err - } - s = ss - if c < utf8.RuneSelf || !multibyte { - buf = append(buf, byte(c)) - } else { - n := utf8.EncodeRune(runeTmp[:], c) - buf = append(buf, runeTmp[:n]...) - } - if quote == '\'' && len(s) != 0 { - // single-quoted must be single character - return "", errQuoteSyntax - } - } - return string(buf), nil -} - -// contains reports whether the string contains the byte c. -func contains(s string, c byte) bool { - for i := 0; i < len(s); i++ { - if s[i] == c { - return true - } - } - return false -} - -func unhex(b byte) (v rune, ok bool) { - c := rune(b) - switch { - case '0' <= c && c <= '9': - return c - '0', true - case 'a' <= c && c <= 'f': - return c - 'a' + 10, true - case 'A' <= c && c <= 'F': - return c - 'A' + 10, true - } - return -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/filters/scanner.go b/ui/wasm/vendor/github.com/containerd/containerd/filters/scanner.go deleted file mode 100644 index 6a485467b8a..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/filters/scanner.go +++ /dev/null @@ -1,297 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package filters - -import ( - "unicode" - "unicode/utf8" -) - -const ( - tokenEOF = -(iota + 1) - tokenQuoted - tokenValue - tokenField - tokenSeparator - tokenOperator - tokenIllegal -) - -type token rune - -func (t token) String() string { - switch t { - case tokenEOF: - return "EOF" - case tokenQuoted: - return "Quoted" - case tokenValue: - return "Value" - case tokenField: - return "Field" - case tokenSeparator: - return "Separator" - case tokenOperator: - return "Operator" - case tokenIllegal: - return "Illegal" - } - - return string(t) -} - -func (t token) GoString() string { - return "token" + t.String() -} - -type scanner struct { - input string - pos int - ppos int // bounds the current rune in the string - value bool - err string -} - -func (s *scanner) init(input string) { - s.input = input - s.pos = 0 - s.ppos = 0 -} - -func (s *scanner) next() rune { - if s.pos >= len(s.input) { - return tokenEOF - } - s.pos = s.ppos - - r, w := utf8.DecodeRuneInString(s.input[s.ppos:]) - s.ppos += w - if r == utf8.RuneError { - if w > 0 { - s.error("rune error") - return tokenIllegal - } - return tokenEOF - } - - if r == 0 { - s.error("unexpected null") - return tokenIllegal - } - - return r -} - -func (s *scanner) peek() rune { - pos := s.pos - ppos := s.ppos - ch := s.next() - s.pos = pos - s.ppos = ppos - return ch -} - -func (s *scanner) scan() (nextp int, tk token, text string) { - var ( - ch = s.next() - pos = s.pos - ) - -chomp: - switch { - case ch == tokenEOF: - case ch == tokenIllegal: - case isQuoteRune(ch): - if !s.scanQuoted(ch) { - return pos, tokenIllegal, s.input[pos:s.ppos] - } - return pos, tokenQuoted, s.input[pos:s.ppos] - case isSeparatorRune(ch): - s.value = false - return pos, tokenSeparator, s.input[pos:s.ppos] - case isOperatorRune(ch): - s.scanOperator() - s.value = true - return pos, tokenOperator, s.input[pos:s.ppos] - case unicode.IsSpace(ch): - // chomp - ch = s.next() - pos = s.pos - goto chomp - case s.value: - s.scanValue() - s.value = false - return pos, tokenValue, s.input[pos:s.ppos] - case isFieldRune(ch): - s.scanField() - return pos, tokenField, s.input[pos:s.ppos] - } - - return s.pos, token(ch), "" -} - -func (s *scanner) scanField() { - for { - ch := s.peek() - if !isFieldRune(ch) { - break - } - s.next() - } -} - -func (s *scanner) scanOperator() { - for { - ch := s.peek() - switch ch { - case '=', '!', '~': - s.next() - default: - return - } - } -} - -func (s *scanner) scanValue() { - for { - ch := s.peek() - if !isValueRune(ch) { - break - } - s.next() - } -} - -func (s *scanner) scanQuoted(quote rune) bool { - var illegal bool - ch := s.next() // read character after quote - for ch != quote { - if ch == '\n' || ch < 0 { - s.error("quoted literal not terminated") - return false - } - if ch == '\\' { - var legal bool - ch, legal = s.scanEscape(quote) - if !legal { - illegal = true - } - } else { - ch = s.next() - } - } - return !illegal -} - -func (s *scanner) scanEscape(quote rune) (ch rune, legal bool) { - ch = s.next() // read character after '/' - switch ch { - case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', quote: - // nothing to do - ch = s.next() - legal = true - case '0', '1', '2', '3', '4', '5', '6', '7': - ch, legal = s.scanDigits(ch, 8, 3) - case 'x': - ch, legal = s.scanDigits(s.next(), 16, 2) - case 'u': - ch, legal = s.scanDigits(s.next(), 16, 4) - case 'U': - ch, legal = s.scanDigits(s.next(), 16, 8) - default: - s.error("illegal escape sequence") - } - return -} - -func (s *scanner) scanDigits(ch rune, base, n int) (rune, bool) { - for n > 0 && digitVal(ch) < base { - ch = s.next() - n-- - } - if n > 0 { - s.error("illegal numeric escape sequence") - return ch, false - } - return ch, true -} - -func (s *scanner) error(msg string) { - if s.err == "" { - s.err = msg - } -} - -func digitVal(ch rune) int { - switch { - case '0' <= ch && ch <= '9': - return int(ch - '0') - case 'a' <= ch && ch <= 'f': - return int(ch - 'a' + 10) - case 'A' <= ch && ch <= 'F': - return int(ch - 'A' + 10) - } - return 16 // larger than any legal digit val -} - -func isFieldRune(r rune) bool { - return (r == '_' || isAlphaRune(r) || isDigitRune(r)) -} - -func isAlphaRune(r rune) bool { - return r >= 'A' && r <= 'Z' || r >= 'a' && r <= 'z' -} - -func isDigitRune(r rune) bool { - return r >= '0' && r <= '9' -} - -func isOperatorRune(r rune) bool { - switch r { - case '=', '!', '~': - return true - } - - return false -} - -func isQuoteRune(r rune) bool { - switch r { - case '/', '|', '"': // maybe add single quoting? - return true - } - - return false -} - -func isSeparatorRune(r rune) bool { - switch r { - case ',', '.': - return true - } - - return false -} - -func isValueRune(r rune) bool { - return r != ',' && !unicode.IsSpace(r) && - (unicode.IsLetter(r) || - unicode.IsDigit(r) || - unicode.IsNumber(r) || - unicode.IsGraphic(r) || - unicode.IsPunct(r)) -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/images/annotations.go b/ui/wasm/vendor/github.com/containerd/containerd/images/annotations.go deleted file mode 100644 index 47d92104cdd..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/images/annotations.go +++ /dev/null @@ -1,23 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package images - -const ( - // AnnotationImageName is an annotation on a Descriptor in an index.json - // containing the `Name` value as used by an `Image` struct - AnnotationImageName = "io.containerd.image.name" -) diff --git a/ui/wasm/vendor/github.com/containerd/containerd/images/diffid.go b/ui/wasm/vendor/github.com/containerd/containerd/images/diffid.go deleted file mode 100644 index 56193cc289f..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/images/diffid.go +++ /dev/null @@ -1,81 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package images - -import ( - "context" - "io" - - "github.com/containerd/containerd/archive/compression" - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/labels" - "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/sirupsen/logrus" -) - -// GetDiffID gets the diff ID of the layer blob descriptor. -func GetDiffID(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (digest.Digest, error) { - switch desc.MediaType { - case - // If the layer is already uncompressed, we can just return its digest - MediaTypeDockerSchema2Layer, - ocispec.MediaTypeImageLayer, - MediaTypeDockerSchema2LayerForeign, - ocispec.MediaTypeImageLayerNonDistributable: - return desc.Digest, nil - } - info, err := cs.Info(ctx, desc.Digest) - if err != nil { - return "", err - } - v, ok := info.Labels[labels.LabelUncompressed] - if ok { - // Fast path: if the image is already unpacked, we can use the label value - return digest.Parse(v) - } - // if the image is not unpacked, we may not have the label - ra, err := cs.ReaderAt(ctx, desc) - if err != nil { - return "", err - } - defer ra.Close() - r := content.NewReader(ra) - uR, err := compression.DecompressStream(r) - if err != nil { - return "", err - } - defer uR.Close() - digester := digest.Canonical.Digester() - hashW := digester.Hash() - if _, err := io.Copy(hashW, uR); err != nil { - return "", err - } - if err := ra.Close(); err != nil { - return "", err - } - digest := digester.Digest() - // memorize the computed value - if info.Labels == nil { - info.Labels = make(map[string]string) - } - info.Labels[labels.LabelUncompressed] = digest.String() - if _, err := cs.Update(ctx, info, "labels"); err != nil { - logrus.WithError(err).Warnf("failed to set %s label for %s", labels.LabelUncompressed, desc.Digest) - } - return digest, nil -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/images/handlers.go b/ui/wasm/vendor/github.com/containerd/containerd/images/handlers.go deleted file mode 100644 index 05a9017bc27..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/images/handlers.go +++ /dev/null @@ -1,288 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package images - -import ( - "context" - "fmt" - "sort" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/platforms" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "golang.org/x/sync/errgroup" - "golang.org/x/sync/semaphore" -) - -var ( - // ErrSkipDesc is used to skip processing of a descriptor and - // its descendants. - ErrSkipDesc = fmt.Errorf("skip descriptor") - - // ErrStopHandler is used to signify that the descriptor - // has been handled and should not be handled further. - // This applies only to a single descriptor in a handler - // chain and does not apply to descendant descriptors. - ErrStopHandler = fmt.Errorf("stop handler") -) - -// Handler handles image manifests -type Handler interface { - Handle(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) -} - -// HandlerFunc function implementing the Handler interface -type HandlerFunc func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) - -// Handle image manifests -func (fn HandlerFunc) Handle(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) { - return fn(ctx, desc) -} - -// Handlers returns a handler that will run the handlers in sequence. -// -// A handler may return `ErrStopHandler` to stop calling additional handlers -func Handlers(handlers ...Handler) HandlerFunc { - return func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) { - var children []ocispec.Descriptor - for _, handler := range handlers { - ch, err := handler.Handle(ctx, desc) - if err != nil { - if errors.Is(err, ErrStopHandler) { - break - } - return nil, err - } - - children = append(children, ch...) - } - - return children, nil - } -} - -// Walk the resources of an image and call the handler for each. If the handler -// decodes the sub-resources for each image, -// -// This differs from dispatch in that each sibling resource is considered -// synchronously. -func Walk(ctx context.Context, handler Handler, descs ...ocispec.Descriptor) error { - for _, desc := range descs { - - children, err := handler.Handle(ctx, desc) - if err != nil { - if errors.Is(err, ErrSkipDesc) { - continue // don't traverse the children. - } - return err - } - - if len(children) > 0 { - if err := Walk(ctx, handler, children...); err != nil { - return err - } - } - } - - return nil -} - -// Dispatch runs the provided handler for content specified by the descriptors. -// If the handler decode subresources, they will be visited, as well. -// -// Handlers for siblings are run in parallel on the provided descriptors. A -// handler may return `ErrSkipDesc` to signal to the dispatcher to not traverse -// any children. -// -// A concurrency limiter can be passed in to limit the number of concurrent -// handlers running. When limiter is nil, there is no limit. -// -// Typically, this function will be used with `FetchHandler`, often composed -// with other handlers. -// -// If any handler returns an error, the dispatch session will be canceled. -func Dispatch(ctx context.Context, handler Handler, limiter *semaphore.Weighted, descs ...ocispec.Descriptor) error { - eg, ctx2 := errgroup.WithContext(ctx) - for _, desc := range descs { - desc := desc - - if limiter != nil { - if err := limiter.Acquire(ctx, 1); err != nil { - return err - } - } - - eg.Go(func() error { - desc := desc - - children, err := handler.Handle(ctx2, desc) - if limiter != nil { - limiter.Release(1) - } - if err != nil { - if errors.Is(err, ErrSkipDesc) { - return nil // don't traverse the children. - } - return err - } - - if len(children) > 0 { - return Dispatch(ctx2, handler, limiter, children...) - } - - return nil - }) - } - - return eg.Wait() -} - -// ChildrenHandler decodes well-known manifest types and returns their children. -// -// This is useful for supporting recursive fetch and other use cases where you -// want to do a full walk of resources. -// -// One can also replace this with another implementation to allow descending of -// arbitrary types. -func ChildrenHandler(provider content.Provider) HandlerFunc { - return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - return Children(ctx, provider, desc) - } -} - -// SetChildrenLabels is a handler wrapper which sets labels for the content on -// the children returned by the handler and passes through the children. -// Must follow a handler that returns the children to be labeled. -func SetChildrenLabels(manager content.Manager, f HandlerFunc) HandlerFunc { - return SetChildrenMappedLabels(manager, f, nil) -} - -// SetChildrenMappedLabels is a handler wrapper which sets labels for the content on -// the children returned by the handler and passes through the children. -// Must follow a handler that returns the children to be labeled. -// The label map allows the caller to control the labels per child descriptor. -// For returned labels, the index of the child will be appended to the end -// except for the first index when the returned label does not end with '.'. -func SetChildrenMappedLabels(manager content.Manager, f HandlerFunc, labelMap func(ocispec.Descriptor) []string) HandlerFunc { - if labelMap == nil { - labelMap = ChildGCLabels - } - return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - children, err := f(ctx, desc) - if err != nil { - return children, err - } - - if len(children) > 0 { - var ( - info = content.Info{ - Digest: desc.Digest, - Labels: map[string]string{}, - } - fields = []string{} - keys = map[string]uint{} - ) - for _, ch := range children { - labelKeys := labelMap(ch) - for _, key := range labelKeys { - idx := keys[key] - keys[key] = idx + 1 - if idx > 0 || key[len(key)-1] == '.' { - key = fmt.Sprintf("%s%d", key, idx) - } - - info.Labels[key] = ch.Digest.String() - fields = append(fields, "labels."+key) - } - } - - _, err := manager.Update(ctx, info, fields...) - if err != nil { - return nil, err - } - } - - return children, err - } -} - -// FilterPlatforms is a handler wrapper which limits the descriptors returned -// based on matching the specified platform matcher. -func FilterPlatforms(f HandlerFunc, m platforms.Matcher) HandlerFunc { - return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - children, err := f(ctx, desc) - if err != nil { - return children, err - } - - var descs []ocispec.Descriptor - - if m == nil { - descs = children - } else { - for _, d := range children { - if d.Platform == nil || m.Match(*d.Platform) { - descs = append(descs, d) - } - } - } - - return descs, nil - } -} - -// LimitManifests is a handler wrapper which filters the manifest descriptors -// returned using the provided platform. -// The results will be ordered according to the comparison operator and -// use the ordering in the manifests for equal matches. -// A limit of 0 or less is considered no limit. -// A not found error is returned if no manifest is matched. -func LimitManifests(f HandlerFunc, m platforms.MatchComparer, n int) HandlerFunc { - return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - children, err := f(ctx, desc) - if err != nil { - return children, err - } - - switch desc.MediaType { - case ocispec.MediaTypeImageIndex, MediaTypeDockerSchema2ManifestList: - sort.SliceStable(children, func(i, j int) bool { - if children[i].Platform == nil { - return false - } - if children[j].Platform == nil { - return true - } - return m.Less(*children[i].Platform, *children[j].Platform) - }) - - if n > 0 { - if len(children) == 0 { - return children, errors.Wrap(errdefs.ErrNotFound, "no match for platform in manifest") - } - if len(children) > n { - children = children[:n] - } - } - default: - // only limit manifests from an index - } - return children, nil - } -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/images/image.go b/ui/wasm/vendor/github.com/containerd/containerd/images/image.go deleted file mode 100644 index 2e5cd61c9a7..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/images/image.go +++ /dev/null @@ -1,441 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package images - -import ( - "context" - "encoding/json" - "fmt" - "sort" - "time" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/log" - "github.com/containerd/containerd/platforms" - digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -// Image provides the model for how containerd views container images. -type Image struct { - // Name of the image. - // - // To be pulled, it must be a reference compatible with resolvers. - // - // This field is required. - Name string - - // Labels provide runtime decoration for the image record. - // - // There is no default behavior for how these labels are propagated. They - // only decorate the static metadata object. - // - // This field is optional. - Labels map[string]string - - // Target describes the root content for this image. Typically, this is - // a manifest, index or manifest list. - Target ocispec.Descriptor - - CreatedAt, UpdatedAt time.Time -} - -// DeleteOptions provide options on image delete -type DeleteOptions struct { - Synchronous bool -} - -// DeleteOpt allows configuring a delete operation -type DeleteOpt func(context.Context, *DeleteOptions) error - -// SynchronousDelete is used to indicate that an image deletion and removal of -// the image resources should occur synchronously before returning a result. -func SynchronousDelete() DeleteOpt { - return func(ctx context.Context, o *DeleteOptions) error { - o.Synchronous = true - return nil - } -} - -// Store and interact with images -type Store interface { - Get(ctx context.Context, name string) (Image, error) - List(ctx context.Context, filters ...string) ([]Image, error) - Create(ctx context.Context, image Image) (Image, error) - - // Update will replace the data in the store with the provided image. If - // one or more fieldpaths are provided, only those fields will be updated. - Update(ctx context.Context, image Image, fieldpaths ...string) (Image, error) - - Delete(ctx context.Context, name string, opts ...DeleteOpt) error -} - -// TODO(stevvooe): Many of these functions make strong platform assumptions, -// which are untrue in a lot of cases. More refactoring must be done here to -// make this work in all cases. - -// Config resolves the image configuration descriptor. -// -// The caller can then use the descriptor to resolve and process the -// configuration of the image. -func (image *Image) Config(ctx context.Context, provider content.Provider, platform platforms.MatchComparer) (ocispec.Descriptor, error) { - return Config(ctx, provider, image.Target, platform) -} - -// RootFS returns the unpacked diffids that make up and images rootfs. -// -// These are used to verify that a set of layers unpacked to the expected -// values. -func (image *Image) RootFS(ctx context.Context, provider content.Provider, platform platforms.MatchComparer) ([]digest.Digest, error) { - desc, err := image.Config(ctx, provider, platform) - if err != nil { - return nil, err - } - return RootFS(ctx, provider, desc) -} - -// Size returns the total size of an image's packed resources. -func (image *Image) Size(ctx context.Context, provider content.Provider, platform platforms.MatchComparer) (int64, error) { - var size int64 - return size, Walk(ctx, Handlers(HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - if desc.Size < 0 { - return nil, errors.Errorf("invalid size %v in %v (%v)", desc.Size, desc.Digest, desc.MediaType) - } - size += desc.Size - return nil, nil - }), LimitManifests(FilterPlatforms(ChildrenHandler(provider), platform), platform, 1)), image.Target) -} - -type platformManifest struct { - p *ocispec.Platform - m *ocispec.Manifest -} - -// Manifest resolves a manifest from the image for the given platform. -// -// When a manifest descriptor inside of a manifest index does not have -// a platform defined, the platform from the image config is considered. -// -// If the descriptor points to a non-index manifest, then the manifest is -// unmarshalled and returned without considering the platform inside of the -// config. -// -// TODO(stevvooe): This violates the current platform agnostic approach to this -// package by returning a specific manifest type. We'll need to refactor this -// to return a manifest descriptor or decide that we want to bring the API in -// this direction because this abstraction is not needed.` -func Manifest(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform platforms.MatchComparer) (ocispec.Manifest, error) { - var ( - limit = 1 - m []platformManifest - wasIndex bool - ) - - if err := Walk(ctx, HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - switch desc.MediaType { - case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: - p, err := content.ReadBlob(ctx, provider, desc) - if err != nil { - return nil, err - } - - if err := validateMediaType(p, desc.MediaType); err != nil { - return nil, errors.Wrapf(err, "manifest: invalid desc %s", desc.Digest) - } - - var manifest ocispec.Manifest - if err := json.Unmarshal(p, &manifest); err != nil { - return nil, err - } - - if desc.Digest != image.Digest && platform != nil { - if desc.Platform != nil && !platform.Match(*desc.Platform) { - return nil, nil - } - - if desc.Platform == nil { - p, err := content.ReadBlob(ctx, provider, manifest.Config) - if err != nil { - return nil, err - } - - var image ocispec.Image - if err := json.Unmarshal(p, &image); err != nil { - return nil, err - } - - if !platform.Match(platforms.Normalize(ocispec.Platform{OS: image.OS, Architecture: image.Architecture})) { - return nil, nil - } - - } - } - - m = append(m, platformManifest{ - p: desc.Platform, - m: &manifest, - }) - - return nil, nil - case MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: - p, err := content.ReadBlob(ctx, provider, desc) - if err != nil { - return nil, err - } - - if err := validateMediaType(p, desc.MediaType); err != nil { - return nil, errors.Wrapf(err, "manifest: invalid desc %s", desc.Digest) - } - - var idx ocispec.Index - if err := json.Unmarshal(p, &idx); err != nil { - return nil, err - } - - if platform == nil { - return idx.Manifests, nil - } - - var descs []ocispec.Descriptor - for _, d := range idx.Manifests { - if d.Platform == nil || platform.Match(*d.Platform) { - descs = append(descs, d) - } - } - - sort.SliceStable(descs, func(i, j int) bool { - if descs[i].Platform == nil { - return false - } - if descs[j].Platform == nil { - return true - } - return platform.Less(*descs[i].Platform, *descs[j].Platform) - }) - - wasIndex = true - - if len(descs) > limit { - return descs[:limit], nil - } - return descs, nil - } - return nil, errors.Wrapf(errdefs.ErrNotFound, "unexpected media type %v for %v", desc.MediaType, desc.Digest) - }), image); err != nil { - return ocispec.Manifest{}, err - } - - if len(m) == 0 { - err := errors.Wrapf(errdefs.ErrNotFound, "manifest %v", image.Digest) - if wasIndex { - err = errors.Wrapf(errdefs.ErrNotFound, "no match for platform in manifest %v", image.Digest) - } - return ocispec.Manifest{}, err - } - return *m[0].m, nil -} - -// Config resolves the image configuration descriptor using a content provided -// to resolve child resources on the image. -// -// The caller can then use the descriptor to resolve and process the -// configuration of the image. -func Config(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform platforms.MatchComparer) (ocispec.Descriptor, error) { - manifest, err := Manifest(ctx, provider, image, platform) - if err != nil { - return ocispec.Descriptor{}, err - } - return manifest.Config, err -} - -// Platforms returns one or more platforms supported by the image. -func Platforms(ctx context.Context, provider content.Provider, image ocispec.Descriptor) ([]ocispec.Platform, error) { - var platformSpecs []ocispec.Platform - return platformSpecs, Walk(ctx, Handlers(HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - if desc.Platform != nil { - platformSpecs = append(platformSpecs, *desc.Platform) - return nil, ErrSkipDesc - } - - switch desc.MediaType { - case MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig: - p, err := content.ReadBlob(ctx, provider, desc) - if err != nil { - return nil, err - } - - var image ocispec.Image - if err := json.Unmarshal(p, &image); err != nil { - return nil, err - } - - platformSpecs = append(platformSpecs, - platforms.Normalize(ocispec.Platform{OS: image.OS, Architecture: image.Architecture})) - } - return nil, nil - }), ChildrenHandler(provider)), image) -} - -// Check returns nil if the all components of an image are available in the -// provider for the specified platform. -// -// If available is true, the caller can assume that required represents the -// complete set of content required for the image. -// -// missing will have the components that are part of required but not available -// in the provider. -// -// If there is a problem resolving content, an error will be returned. -func Check(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform platforms.MatchComparer) (available bool, required, present, missing []ocispec.Descriptor, err error) { - mfst, err := Manifest(ctx, provider, image, platform) - if err != nil { - if errdefs.IsNotFound(err) { - return false, []ocispec.Descriptor{image}, nil, []ocispec.Descriptor{image}, nil - } - - return false, nil, nil, nil, errors.Wrapf(err, "failed to check image %v", image.Digest) - } - - // TODO(stevvooe): It is possible that referenced conponents could have - // children, but this is rare. For now, we ignore this and only verify - // that manifest components are present. - required = append([]ocispec.Descriptor{mfst.Config}, mfst.Layers...) - - for _, desc := range required { - ra, err := provider.ReaderAt(ctx, desc) - if err != nil { - if errdefs.IsNotFound(err) { - missing = append(missing, desc) - continue - } else { - return false, nil, nil, nil, errors.Wrapf(err, "failed to check image %v", desc.Digest) - } - } - ra.Close() - present = append(present, desc) - - } - - return true, required, present, missing, nil -} - -// Children returns the immediate children of content described by the descriptor. -func Children(ctx context.Context, provider content.Provider, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - var descs []ocispec.Descriptor - switch desc.MediaType { - case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: - p, err := content.ReadBlob(ctx, provider, desc) - if err != nil { - return nil, err - } - - if err := validateMediaType(p, desc.MediaType); err != nil { - return nil, errors.Wrapf(err, "children: invalid desc %s", desc.Digest) - } - - // TODO(stevvooe): We just assume oci manifest, for now. There may be - // subtle differences from the docker version. - var manifest ocispec.Manifest - if err := json.Unmarshal(p, &manifest); err != nil { - return nil, err - } - - descs = append(descs, manifest.Config) - descs = append(descs, manifest.Layers...) - case MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: - p, err := content.ReadBlob(ctx, provider, desc) - if err != nil { - return nil, err - } - - if err := validateMediaType(p, desc.MediaType); err != nil { - return nil, errors.Wrapf(err, "children: invalid desc %s", desc.Digest) - } - - var index ocispec.Index - if err := json.Unmarshal(p, &index); err != nil { - return nil, err - } - - descs = append(descs, index.Manifests...) - default: - if IsLayerType(desc.MediaType) || IsKnownConfig(desc.MediaType) { - // childless data types. - return nil, nil - } - log.G(ctx).Debugf("encountered unknown type %v; children may not be fetched", desc.MediaType) - } - - return descs, nil -} - -// unknownDocument represents a manifest, manifest list, or index that has not -// yet been validated. -type unknownDocument struct { - MediaType string `json:"mediaType,omitempty"` - Config json.RawMessage `json:"config,omitempty"` - Layers json.RawMessage `json:"layers,omitempty"` - Manifests json.RawMessage `json:"manifests,omitempty"` - FSLayers json.RawMessage `json:"fsLayers,omitempty"` // schema 1 -} - -// validateMediaType returns an error if the byte slice is invalid JSON or if -// the media type identifies the blob as one format but it contains elements of -// another format. -func validateMediaType(b []byte, mt string) error { - var doc unknownDocument - if err := json.Unmarshal(b, &doc); err != nil { - return err - } - if len(doc.FSLayers) != 0 { - return fmt.Errorf("media-type: schema 1 not supported") - } - switch mt { - case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: - if len(doc.Manifests) != 0 || - doc.MediaType == MediaTypeDockerSchema2ManifestList || - doc.MediaType == ocispec.MediaTypeImageIndex { - return fmt.Errorf("media-type: expected manifest but found index (%s)", mt) - } - case MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: - if len(doc.Config) != 0 || len(doc.Layers) != 0 || - doc.MediaType == MediaTypeDockerSchema2Manifest || - doc.MediaType == ocispec.MediaTypeImageManifest { - return fmt.Errorf("media-type: expected index but found manifest (%s)", mt) - } - } - return nil -} - -// RootFS returns the unpacked diffids that make up and images rootfs. -// -// These are used to verify that a set of layers unpacked to the expected -// values. -func RootFS(ctx context.Context, provider content.Provider, configDesc ocispec.Descriptor) ([]digest.Digest, error) { - p, err := content.ReadBlob(ctx, provider, configDesc) - if err != nil { - return nil, err - } - - var config ocispec.Image - if err := json.Unmarshal(p, &config); err != nil { - return nil, err - } - return config.RootFS.DiffIDs, nil -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/images/importexport.go b/ui/wasm/vendor/github.com/containerd/containerd/images/importexport.go deleted file mode 100644 index 843adcadc7e..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/images/importexport.go +++ /dev/null @@ -1,37 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package images - -import ( - "context" - "io" - - "github.com/containerd/containerd/content" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -// Importer is the interface for image importer. -type Importer interface { - // Import imports an image from a tar stream. - Import(ctx context.Context, store content.Store, reader io.Reader) (ocispec.Descriptor, error) -} - -// Exporter is the interface for image exporter. -type Exporter interface { - // Export exports an image to a tar stream. - Export(ctx context.Context, store content.Provider, desc ocispec.Descriptor, writer io.Writer) error -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/images/mediatypes.go b/ui/wasm/vendor/github.com/containerd/containerd/images/mediatypes.go deleted file mode 100644 index 785d71291e2..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/images/mediatypes.go +++ /dev/null @@ -1,204 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package images - -import ( - "context" - "sort" - "strings" - - "github.com/containerd/containerd/errdefs" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -// mediatype definitions for image components handled in containerd. -// -// oci components are generally referenced directly, although we may centralize -// here for clarity. -const ( - MediaTypeDockerSchema2Layer = "application/vnd.docker.image.rootfs.diff.tar" - MediaTypeDockerSchema2LayerForeign = "application/vnd.docker.image.rootfs.foreign.diff.tar" - MediaTypeDockerSchema2LayerGzip = "application/vnd.docker.image.rootfs.diff.tar.gzip" - MediaTypeDockerSchema2LayerForeignGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" - MediaTypeDockerSchema2Config = "application/vnd.docker.container.image.v1+json" - MediaTypeDockerSchema2Manifest = "application/vnd.docker.distribution.manifest.v2+json" - MediaTypeDockerSchema2ManifestList = "application/vnd.docker.distribution.manifest.list.v2+json" - // Checkpoint/Restore Media Types - MediaTypeContainerd1Checkpoint = "application/vnd.containerd.container.criu.checkpoint.criu.tar" - MediaTypeContainerd1CheckpointPreDump = "application/vnd.containerd.container.criu.checkpoint.predump.tar" - MediaTypeContainerd1Resource = "application/vnd.containerd.container.resource.tar" - MediaTypeContainerd1RW = "application/vnd.containerd.container.rw.tar" - MediaTypeContainerd1CheckpointConfig = "application/vnd.containerd.container.checkpoint.config.v1+proto" - MediaTypeContainerd1CheckpointOptions = "application/vnd.containerd.container.checkpoint.options.v1+proto" - MediaTypeContainerd1CheckpointRuntimeName = "application/vnd.containerd.container.checkpoint.runtime.name" - MediaTypeContainerd1CheckpointRuntimeOptions = "application/vnd.containerd.container.checkpoint.runtime.options+proto" - // Legacy Docker schema1 manifest - MediaTypeDockerSchema1Manifest = "application/vnd.docker.distribution.manifest.v1+prettyjws" - // Encypted media types - MediaTypeImageLayerEncrypted = ocispec.MediaTypeImageLayer + "+encrypted" - MediaTypeImageLayerGzipEncrypted = ocispec.MediaTypeImageLayerGzip + "+encrypted" -) - -// DiffCompression returns the compression as defined by the layer diff media -// type. For Docker media types without compression, "unknown" is returned to -// indicate that the media type may be compressed. If the media type is not -// recognized as a layer diff, then it returns errdefs.ErrNotImplemented -func DiffCompression(ctx context.Context, mediaType string) (string, error) { - base, ext := parseMediaTypes(mediaType) - switch base { - case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerForeign: - if len(ext) > 0 { - // Type is wrapped - return "", nil - } - // These media types may have been compressed but failed to - // use the correct media type. The decompression function - // should detect and handle this case. - return "unknown", nil - case MediaTypeDockerSchema2LayerGzip, MediaTypeDockerSchema2LayerForeignGzip: - if len(ext) > 0 { - // Type is wrapped - return "", nil - } - return "gzip", nil - case ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerNonDistributable: - if len(ext) > 0 { - switch ext[len(ext)-1] { - case "gzip": - return "gzip", nil - case "zstd": - return "zstd", nil - } - } - return "", nil - default: - return "", errors.Wrapf(errdefs.ErrNotImplemented, "unrecognised mediatype %s", mediaType) - } -} - -// parseMediaTypes splits the media type into the base type and -// an array of sorted extensions -func parseMediaTypes(mt string) (string, []string) { - if mt == "" { - return "", []string{} - } - - s := strings.Split(mt, "+") - ext := s[1:] - sort.Strings(ext) - - return s[0], ext -} - -// IsNonDistributable returns true if the media type is non-distributable. -func IsNonDistributable(mt string) bool { - return strings.HasPrefix(mt, "application/vnd.oci.image.layer.nondistributable.") || - strings.HasPrefix(mt, "application/vnd.docker.image.rootfs.foreign.") -} - -// IsLayerType returns true if the media type is a layer -func IsLayerType(mt string) bool { - if strings.HasPrefix(mt, "application/vnd.oci.image.layer.") { - return true - } - - // Parse Docker media types, strip off any + suffixes first - base, _ := parseMediaTypes(mt) - switch base { - case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerGzip, - MediaTypeDockerSchema2LayerForeign, MediaTypeDockerSchema2LayerForeignGzip: - return true - } - return false -} - -// IsDockerType returns true if the media type has "application/vnd.docker." prefix -func IsDockerType(mt string) bool { - return strings.HasPrefix(mt, "application/vnd.docker.") -} - -// IsManifestType returns true if the media type is an OCI-compatible manifest. -// No support for schema1 manifest. -func IsManifestType(mt string) bool { - switch mt { - case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: - return true - default: - return false - } -} - -// IsIndexType returns true if the media type is an OCI-compatible index. -func IsIndexType(mt string) bool { - switch mt { - case ocispec.MediaTypeImageIndex, MediaTypeDockerSchema2ManifestList: - return true - default: - return false - } -} - -// IsConfigType returns true if the media type is an OCI-compatible image config. -// No support for containerd checkpoint configs. -func IsConfigType(mt string) bool { - switch mt { - case MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig: - return true - default: - return false - } -} - -// IsKnownConfig returns true if the media type is a known config type, -// including containerd checkpoint configs -func IsKnownConfig(mt string) bool { - switch mt { - case MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig, - MediaTypeContainerd1Checkpoint, MediaTypeContainerd1CheckpointConfig: - return true - } - return false -} - -// ChildGCLabels returns the label for a given descriptor to reference it -func ChildGCLabels(desc ocispec.Descriptor) []string { - mt := desc.MediaType - if IsKnownConfig(mt) { - return []string{"containerd.io/gc.ref.content.config"} - } - - switch mt { - case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: - return []string{"containerd.io/gc.ref.content.m."} - } - - if IsLayerType(mt) { - return []string{"containerd.io/gc.ref.content.l."} - } - - return []string{"containerd.io/gc.ref.content."} -} - -// ChildGCLabelsFilterLayers returns the labels for a given descriptor to -// reference it, skipping layer media types -func ChildGCLabelsFilterLayers(desc ocispec.Descriptor) []string { - if IsLayerType(desc.MediaType) { - return nil - } - return ChildGCLabels(desc) -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/labels/labels.go b/ui/wasm/vendor/github.com/containerd/containerd/labels/labels.go deleted file mode 100644 index d76ff2cf9c9..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/labels/labels.go +++ /dev/null @@ -1,21 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package labels - -// LabelUncompressed is added to compressed layer contents. -// The value is digest of the uncompressed content. -const LabelUncompressed = "containerd.io/uncompressed" diff --git a/ui/wasm/vendor/github.com/containerd/containerd/labels/validate.go b/ui/wasm/vendor/github.com/containerd/containerd/labels/validate.go deleted file mode 100644 index 0de461663aa..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/labels/validate.go +++ /dev/null @@ -1,37 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package labels - -import ( - "github.com/containerd/containerd/errdefs" - "github.com/pkg/errors" -) - -const ( - maxSize = 4096 -) - -// Validate a label's key and value are under 4096 bytes -func Validate(k, v string) error { - if (len(k) + len(v)) > maxSize { - if len(k) > 10 { - k = k[:10] - } - return errors.Wrapf(errdefs.ErrInvalidArgument, "label key and value greater than maximum size (%d bytes), key: %s", maxSize, k) - } - return nil -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/log/context.go b/ui/wasm/vendor/github.com/containerd/containerd/log/context.go deleted file mode 100644 index 37b6a7d1c8a..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/log/context.go +++ /dev/null @@ -1,68 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package log - -import ( - "context" - - "github.com/sirupsen/logrus" -) - -var ( - // G is an alias for GetLogger. - // - // We may want to define this locally to a package to get package tagged log - // messages. - G = GetLogger - - // L is an alias for the standard logger. - L = logrus.NewEntry(logrus.StandardLogger()) -) - -type ( - loggerKey struct{} -) - -const ( - // RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to - // ensure the formatted time is always the same number of characters. - RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" - - // TextFormat represents the text logging format - TextFormat = "text" - - // JSONFormat represents the JSON logging format - JSONFormat = "json" -) - -// WithLogger returns a new context with the provided logger. Use in -// combination with logger.WithField(s) for great effect. -func WithLogger(ctx context.Context, logger *logrus.Entry) context.Context { - return context.WithValue(ctx, loggerKey{}, logger) -} - -// GetLogger retrieves the current logger from the context. If no logger is -// available, the default logger is returned. -func GetLogger(ctx context.Context) *logrus.Entry { - logger := ctx.Value(loggerKey{}) - - if logger == nil { - return L - } - - return logger.(*logrus.Entry) -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/platforms/compare.go b/ui/wasm/vendor/github.com/containerd/containerd/platforms/compare.go deleted file mode 100644 index c7657e1869b..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/platforms/compare.go +++ /dev/null @@ -1,193 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -import ( - "strconv" - "strings" - - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - -// MatchComparer is able to match and compare platforms to -// filter and sort platforms. -type MatchComparer interface { - Matcher - - Less(specs.Platform, specs.Platform) bool -} - -// platformVector returns an (ordered) vector of appropriate specs.Platform -// objects to try matching for the given platform object (see platforms.Only). -func platformVector(platform specs.Platform) []specs.Platform { - vector := []specs.Platform{platform} - - switch platform.Architecture { - case "amd64": - vector = append(vector, specs.Platform{ - Architecture: "386", - OS: platform.OS, - OSVersion: platform.OSVersion, - OSFeatures: platform.OSFeatures, - Variant: platform.Variant, - }) - case "arm": - if armVersion, err := strconv.Atoi(strings.TrimPrefix(platform.Variant, "v")); err == nil && armVersion > 5 { - for armVersion--; armVersion >= 5; armVersion-- { - vector = append(vector, specs.Platform{ - Architecture: platform.Architecture, - OS: platform.OS, - OSVersion: platform.OSVersion, - OSFeatures: platform.OSFeatures, - Variant: "v" + strconv.Itoa(armVersion), - }) - } - } - case "arm64": - variant := platform.Variant - if variant == "" { - variant = "v8" - } - vector = append(vector, platformVector(specs.Platform{ - Architecture: "arm", - OS: platform.OS, - OSVersion: platform.OSVersion, - OSFeatures: platform.OSFeatures, - Variant: variant, - })...) - } - - return vector -} - -// Only returns a match comparer for a single platform -// using default resolution logic for the platform. -// -// For arm/v8, will also match arm/v7, arm/v6 and arm/v5 -// For arm/v7, will also match arm/v6 and arm/v5 -// For arm/v6, will also match arm/v5 -// For amd64, will also match 386 -func Only(platform specs.Platform) MatchComparer { - return Ordered(platformVector(Normalize(platform))...) -} - -// OnlyStrict returns a match comparer for a single platform. -// -// Unlike Only, OnlyStrict does not match sub platforms. -// So, "arm/vN" will not match "arm/vM" where M < N, -// and "amd64" will not also match "386". -// -// OnlyStrict matches non-canonical forms. -// So, "arm64" matches "arm/64/v8". -func OnlyStrict(platform specs.Platform) MatchComparer { - return Ordered(Normalize(platform)) -} - -// Ordered returns a platform MatchComparer which matches any of the platforms -// but orders them in order they are provided. -func Ordered(platforms ...specs.Platform) MatchComparer { - matchers := make([]Matcher, len(platforms)) - for i := range platforms { - matchers[i] = NewMatcher(platforms[i]) - } - return orderedPlatformComparer{ - matchers: matchers, - } -} - -// Any returns a platform MatchComparer which matches any of the platforms -// with no preference for ordering. -func Any(platforms ...specs.Platform) MatchComparer { - matchers := make([]Matcher, len(platforms)) - for i := range platforms { - matchers[i] = NewMatcher(platforms[i]) - } - return anyPlatformComparer{ - matchers: matchers, - } -} - -// All is a platform MatchComparer which matches all platforms -// with preference for ordering. -var All MatchComparer = allPlatformComparer{} - -type orderedPlatformComparer struct { - matchers []Matcher -} - -func (c orderedPlatformComparer) Match(platform specs.Platform) bool { - for _, m := range c.matchers { - if m.Match(platform) { - return true - } - } - return false -} - -func (c orderedPlatformComparer) Less(p1 specs.Platform, p2 specs.Platform) bool { - for _, m := range c.matchers { - p1m := m.Match(p1) - p2m := m.Match(p2) - if p1m && !p2m { - return true - } - if p1m || p2m { - return false - } - } - return false -} - -type anyPlatformComparer struct { - matchers []Matcher -} - -func (c anyPlatformComparer) Match(platform specs.Platform) bool { - for _, m := range c.matchers { - if m.Match(platform) { - return true - } - } - return false -} - -func (c anyPlatformComparer) Less(p1, p2 specs.Platform) bool { - var p1m, p2m bool - for _, m := range c.matchers { - if !p1m && m.Match(p1) { - p1m = true - } - if !p2m && m.Match(p2) { - p2m = true - } - if p1m && p2m { - return false - } - } - // If one matches, and the other does, sort match first - return p1m && !p2m -} - -type allPlatformComparer struct{} - -func (allPlatformComparer) Match(specs.Platform) bool { - return true -} - -func (allPlatformComparer) Less(specs.Platform, specs.Platform) bool { - return false -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/platforms/cpuinfo.go b/ui/wasm/vendor/github.com/containerd/containerd/platforms/cpuinfo.go deleted file mode 100644 index 4a7177e3138..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/platforms/cpuinfo.go +++ /dev/null @@ -1,131 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -import ( - "bufio" - "os" - "runtime" - "strings" - "sync" - - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/log" - "github.com/pkg/errors" -) - -// Present the ARM instruction set architecture, eg: v7, v8 -// Don't use this value directly; call cpuVariant() instead. -var cpuVariantValue string - -var cpuVariantOnce sync.Once - -func cpuVariant() string { - cpuVariantOnce.Do(func() { - if isArmArch(runtime.GOARCH) { - cpuVariantValue = getCPUVariant() - } - }) - return cpuVariantValue -} - -// For Linux, the kernel has already detected the ABI, ISA and Features. -// So we don't need to access the ARM registers to detect platform information -// by ourselves. We can just parse these information from /proc/cpuinfo -func getCPUInfo(pattern string) (info string, err error) { - if !isLinuxOS(runtime.GOOS) { - return "", errors.Wrapf(errdefs.ErrNotImplemented, "getCPUInfo for OS %s", runtime.GOOS) - } - - cpuinfo, err := os.Open("/proc/cpuinfo") - if err != nil { - return "", err - } - defer cpuinfo.Close() - - // Start to Parse the Cpuinfo line by line. For SMP SoC, we parse - // the first core is enough. - scanner := bufio.NewScanner(cpuinfo) - for scanner.Scan() { - newline := scanner.Text() - list := strings.Split(newline, ":") - - if len(list) > 1 && strings.EqualFold(strings.TrimSpace(list[0]), pattern) { - return strings.TrimSpace(list[1]), nil - } - } - - // Check whether the scanner encountered errors - err = scanner.Err() - if err != nil { - return "", err - } - - return "", errors.Wrapf(errdefs.ErrNotFound, "getCPUInfo for pattern: %s", pattern) -} - -func getCPUVariant() string { - if runtime.GOOS == "windows" || runtime.GOOS == "darwin" { - // Windows/Darwin only supports v7 for ARM32 and v8 for ARM64 and so we can use - // runtime.GOARCH to determine the variants - var variant string - switch runtime.GOARCH { - case "arm64": - variant = "v8" - case "arm": - variant = "v7" - default: - variant = "unknown" - } - - return variant - } - - variant, err := getCPUInfo("Cpu architecture") - if err != nil { - log.L.WithError(err).Error("failure getting variant") - return "" - } - - // handle edge case for Raspberry Pi ARMv6 devices (which due to a kernel quirk, report "CPU architecture: 7") - // https://www.raspberrypi.org/forums/viewtopic.php?t=12614 - if runtime.GOARCH == "arm" && variant == "7" { - model, err := getCPUInfo("model name") - if err == nil && strings.HasPrefix(strings.ToLower(model), "armv6-compatible") { - variant = "6" - } - } - - switch strings.ToLower(variant) { - case "8", "aarch64": - variant = "v8" - case "7", "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)": - variant = "v7" - case "6", "6tej": - variant = "v6" - case "5", "5t", "5te", "5tej": - variant = "v5" - case "4", "4t": - variant = "v4" - case "3": - variant = "v3" - default: - variant = "unknown" - } - - return variant -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/platforms/database.go b/ui/wasm/vendor/github.com/containerd/containerd/platforms/database.go deleted file mode 100644 index 6ede94061eb..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/platforms/database.go +++ /dev/null @@ -1,114 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -import ( - "runtime" - "strings" -) - -// isLinuxOS returns true if the operating system is Linux. -// -// The OS value should be normalized before calling this function. -func isLinuxOS(os string) bool { - return os == "linux" -} - -// These function are generated from https://golang.org/src/go/build/syslist.go. -// -// We use switch statements because they are slightly faster than map lookups -// and use a little less memory. - -// isKnownOS returns true if we know about the operating system. -// -// The OS value should be normalized before calling this function. -func isKnownOS(os string) bool { - switch os { - case "aix", "android", "darwin", "dragonfly", "freebsd", "hurd", "illumos", "js", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos": - return true - } - return false -} - -// isArmArch returns true if the architecture is ARM. -// -// The arch value should be normalized before being passed to this function. -func isArmArch(arch string) bool { - switch arch { - case "arm", "arm64": - return true - } - return false -} - -// isKnownArch returns true if we know about the architecture. -// -// The arch value should be normalized before being passed to this function. -func isKnownArch(arch string) bool { - switch arch { - case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "riscv", "riscv64", "s390", "s390x", "sparc", "sparc64", "wasm": - return true - } - return false -} - -func normalizeOS(os string) string { - if os == "" { - return runtime.GOOS - } - os = strings.ToLower(os) - - switch os { - case "macos": - os = "darwin" - } - return os -} - -// normalizeArch normalizes the architecture. -func normalizeArch(arch, variant string) (string, string) { - arch, variant = strings.ToLower(arch), strings.ToLower(variant) - switch arch { - case "i386": - arch = "386" - variant = "" - case "x86_64", "x86-64": - arch = "amd64" - variant = "" - case "aarch64", "arm64": - arch = "arm64" - switch variant { - case "8", "v8": - variant = "" - } - case "armhf": - arch = "arm" - variant = "v7" - case "armel": - arch = "arm" - variant = "v6" - case "arm": - switch variant { - case "", "7": - variant = "v7" - case "5", "6", "8": - variant = "v" + variant - } - } - - return arch, variant -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/platforms/defaults.go b/ui/wasm/vendor/github.com/containerd/containerd/platforms/defaults.go deleted file mode 100644 index cb77fbc9f7b..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/platforms/defaults.go +++ /dev/null @@ -1,43 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -import ( - "runtime" - - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - -// DefaultString returns the default string specifier for the platform. -func DefaultString() string { - return Format(DefaultSpec()) -} - -// DefaultSpec returns the current platform's default platform specification. -func DefaultSpec() specs.Platform { - return specs.Platform{ - OS: runtime.GOOS, - Architecture: runtime.GOARCH, - // The Variant field will be empty if arch != ARM. - Variant: cpuVariant(), - } -} - -// DefaultStrict returns strict form of Default. -func DefaultStrict() MatchComparer { - return OnlyStrict(DefaultSpec()) -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/platforms/defaults_unix.go b/ui/wasm/vendor/github.com/containerd/containerd/platforms/defaults_unix.go deleted file mode 100644 index e8a7d5ffa0d..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/platforms/defaults_unix.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build !windows - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -// Default returns the default matcher for the platform. -func Default() MatchComparer { - return Only(DefaultSpec()) -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/platforms/defaults_windows.go b/ui/wasm/vendor/github.com/containerd/containerd/platforms/defaults_windows.go deleted file mode 100644 index 0c380e3b7c1..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/platforms/defaults_windows.go +++ /dev/null @@ -1,81 +0,0 @@ -// +build windows - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package platforms - -import ( - "fmt" - "runtime" - "strconv" - "strings" - - imagespec "github.com/opencontainers/image-spec/specs-go/v1" - specs "github.com/opencontainers/image-spec/specs-go/v1" - "golang.org/x/sys/windows" -) - -type matchComparer struct { - defaults Matcher - osVersionPrefix string -} - -// Match matches platform with the same windows major, minor -// and build version. -func (m matchComparer) Match(p imagespec.Platform) bool { - if m.defaults.Match(p) { - // TODO(windows): Figure out whether OSVersion is deprecated. - return strings.HasPrefix(p.OSVersion, m.osVersionPrefix) - } - return false -} - -// Less sorts matched platforms in front of other platforms. -// For matched platforms, it puts platforms with larger revision -// number in front. -func (m matchComparer) Less(p1, p2 imagespec.Platform) bool { - m1, m2 := m.Match(p1), m.Match(p2) - if m1 && m2 { - r1, r2 := revision(p1.OSVersion), revision(p2.OSVersion) - return r1 > r2 - } - return m1 && !m2 -} - -func revision(v string) int { - parts := strings.Split(v, ".") - if len(parts) < 4 { - return 0 - } - r, err := strconv.Atoi(parts[3]) - if err != nil { - return 0 - } - return r -} - -// Default returns the current platform's default platform specification. -func Default() MatchComparer { - major, minor, build := windows.RtlGetNtVersionNumbers() - return matchComparer{ - defaults: Ordered(DefaultSpec(), specs.Platform{ - OS: "linux", - Architecture: runtime.GOARCH, - }), - osVersionPrefix: fmt.Sprintf("%d.%d.%d", major, minor, build), - } -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/platforms/platforms.go b/ui/wasm/vendor/github.com/containerd/containerd/platforms/platforms.go deleted file mode 100644 index 088bdea0508..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/platforms/platforms.go +++ /dev/null @@ -1,278 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -// Package platforms provides a toolkit for normalizing, matching and -// specifying container platforms. -// -// Centered around OCI platform specifications, we define a string-based -// specifier syntax that can be used for user input. With a specifier, users -// only need to specify the parts of the platform that are relevant to their -// context, providing an operating system or architecture or both. -// -// How do I use this package? -// -// The vast majority of use cases should simply use the match function with -// user input. The first step is to parse a specifier into a matcher: -// -// m, err := Parse("linux") -// if err != nil { ... } -// -// Once you have a matcher, use it to match against the platform declared by a -// component, typically from an image or runtime. Since extracting an images -// platform is a little more involved, we'll use an example against the -// platform default: -// -// if ok := m.Match(Default()); !ok { /* doesn't match */ } -// -// This can be composed in loops for resolving runtimes or used as a filter for -// fetch and select images. -// -// More details of the specifier syntax and platform spec follow. -// -// Declaring Platform Support -// -// Components that have strict platform requirements should use the OCI -// platform specification to declare their support. Typically, this will be -// images and runtimes that should make these declaring which platform they -// support specifically. This looks roughly as follows: -// -// type Platform struct { -// Architecture string -// OS string -// Variant string -// } -// -// Most images and runtimes should at least set Architecture and OS, according -// to their GOARCH and GOOS values, respectively (follow the OCI image -// specification when in doubt). ARM should set variant under certain -// discussions, which are outlined below. -// -// Platform Specifiers -// -// While the OCI platform specifications provide a tool for components to -// specify structured information, user input typically doesn't need the full -// context and much can be inferred. To solve this problem, we introduced -// "specifiers". A specifier has the format -// `||/[/]`. The user can provide either the -// operating system or the architecture or both. -// -// An example of a common specifier is `linux/amd64`. If the host has a default -// of runtime that matches this, the user can simply provide the component that -// matters. For example, if a image provides amd64 and arm64 support, the -// operating system, `linux` can be inferred, so they only have to provide -// `arm64` or `amd64`. Similar behavior is implemented for operating systems, -// where the architecture may be known but a runtime may support images from -// different operating systems. -// -// Normalization -// -// Because not all users are familiar with the way the Go runtime represents -// platforms, several normalizations have been provided to make this package -// easier to user. -// -// The following are performed for architectures: -// -// Value Normalized -// aarch64 arm64 -// armhf arm -// armel arm/v6 -// i386 386 -// x86_64 amd64 -// x86-64 amd64 -// -// We also normalize the operating system `macos` to `darwin`. -// -// ARM Support -// -// To qualify ARM architecture, the Variant field is used to qualify the arm -// version. The most common arm version, v7, is represented without the variant -// unless it is explicitly provided. This is treated as equivalent to armhf. A -// previous architecture, armel, will be normalized to arm/v6. -// -// While these normalizations are provided, their support on arm platforms has -// not yet been fully implemented and tested. -package platforms - -import ( - "regexp" - "runtime" - "strconv" - "strings" - - "github.com/containerd/containerd/errdefs" - specs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -var ( - specifierRe = regexp.MustCompile(`^[A-Za-z0-9_-]+$`) -) - -// Matcher matches platforms specifications, provided by an image or runtime. -type Matcher interface { - Match(platform specs.Platform) bool -} - -// NewMatcher returns a simple matcher based on the provided platform -// specification. The returned matcher only looks for equality based on os, -// architecture and variant. -// -// One may implement their own matcher if this doesn't provide the required -// functionality. -// -// Applications should opt to use `Match` over directly parsing specifiers. -func NewMatcher(platform specs.Platform) Matcher { - return &matcher{ - Platform: Normalize(platform), - } -} - -type matcher struct { - specs.Platform -} - -func (m *matcher) Match(platform specs.Platform) bool { - normalized := Normalize(platform) - return m.OS == normalized.OS && - m.Architecture == normalized.Architecture && - m.Variant == normalized.Variant -} - -func (m *matcher) String() string { - return Format(m.Platform) -} - -// Parse parses the platform specifier syntax into a platform declaration. -// -// Platform specifiers are in the format `||/[/]`. -// The minimum required information for a platform specifier is the operating -// system or architecture. If there is only a single string (no slashes), the -// value will be matched against the known set of operating systems, then fall -// back to the known set of architectures. The missing component will be -// inferred based on the local environment. -func Parse(specifier string) (specs.Platform, error) { - if strings.Contains(specifier, "*") { - // TODO(stevvooe): need to work out exact wildcard handling - return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: wildcards not yet supported", specifier) - } - - parts := strings.Split(specifier, "/") - - for _, part := range parts { - if !specifierRe.MatchString(part) { - return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q is an invalid component of %q: platform specifier component must match %q", part, specifier, specifierRe.String()) - } - } - - var p specs.Platform - switch len(parts) { - case 1: - // in this case, we will test that the value might be an OS, then look - // it up. If it is not known, we'll treat it as an architecture. Since - // we have very little information about the platform here, we are - // going to be a little more strict if we don't know about the argument - // value. - p.OS = normalizeOS(parts[0]) - if isKnownOS(p.OS) { - // picks a default architecture - p.Architecture = runtime.GOARCH - if p.Architecture == "arm" && cpuVariant() != "v7" { - p.Variant = cpuVariant() - } - - return p, nil - } - - p.Architecture, p.Variant = normalizeArch(parts[0], "") - if p.Architecture == "arm" && p.Variant == "v7" { - p.Variant = "" - } - if isKnownArch(p.Architecture) { - p.OS = runtime.GOOS - return p, nil - } - - return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: unknown operating system or architecture", specifier) - case 2: - // In this case, we treat as a regular os/arch pair. We don't care - // about whether or not we know of the platform. - p.OS = normalizeOS(parts[0]) - p.Architecture, p.Variant = normalizeArch(parts[1], "") - if p.Architecture == "arm" && p.Variant == "v7" { - p.Variant = "" - } - - return p, nil - case 3: - // we have a fully specified variant, this is rare - p.OS = normalizeOS(parts[0]) - p.Architecture, p.Variant = normalizeArch(parts[1], parts[2]) - if p.Architecture == "arm64" && p.Variant == "" { - p.Variant = "v8" - } - - return p, nil - } - - return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: cannot parse platform specifier", specifier) -} - -// MustParse is like Parses but panics if the specifier cannot be parsed. -// Simplifies initialization of global variables. -func MustParse(specifier string) specs.Platform { - p, err := Parse(specifier) - if err != nil { - panic("platform: Parse(" + strconv.Quote(specifier) + "): " + err.Error()) - } - return p -} - -// Format returns a string specifier from the provided platform specification. -func Format(platform specs.Platform) string { - if platform.OS == "" { - return "unknown" - } - - return joinNotEmpty(platform.OS, platform.Architecture, platform.Variant) -} - -func joinNotEmpty(s ...string) string { - var ss []string - for _, s := range s { - if s == "" { - continue - } - - ss = append(ss, s) - } - - return strings.Join(ss, "/") -} - -// Normalize validates and translate the platform to the canonical value. -// -// For example, if "Aarch64" is encountered, we change it to "arm64" or if -// "x86_64" is encountered, it becomes "amd64". -func Normalize(platform specs.Platform) specs.Platform { - platform.OS = normalizeOS(platform.OS) - platform.Architecture, platform.Variant = normalizeArch(platform.Architecture, platform.Variant) - - // these fields are deprecated, remove them - platform.OSFeatures = nil - platform.OSVersion = "" - - return platform -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/reference/reference.go b/ui/wasm/vendor/github.com/containerd/containerd/reference/reference.go deleted file mode 100644 index a4bf6da6019..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/reference/reference.go +++ /dev/null @@ -1,166 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package reference - -import ( - "errors" - "fmt" - "net/url" - "path" - "regexp" - "strings" - - digest "github.com/opencontainers/go-digest" -) - -var ( - // ErrInvalid is returned when there is an invalid reference - ErrInvalid = errors.New("invalid reference") - // ErrObjectRequired is returned when the object is required - ErrObjectRequired = errors.New("object required") - // ErrHostnameRequired is returned when the hostname is required - ErrHostnameRequired = errors.New("hostname required") -) - -// Spec defines the main components of a reference specification. -// -// A reference specification is a schema-less URI parsed into common -// components. The two main components, locator and object, are required to be -// supported by remotes. It represents a superset of the naming define in -// docker's reference schema. It aims to be compatible but not prescriptive. -// -// While the interpretation of the components, locator and object, are up to -// the remote, we define a few common parts, accessible via helper methods. -// -// The first is the hostname, which is part of the locator. This doesn't need -// to map to a physical resource, but it must parse as a hostname. We refer to -// this as the namespace. -// -// The other component made accessible by helper method is the digest. This is -// part of the object identifier, always prefixed with an '@'. If present, the -// remote may use the digest portion directly or resolve it against a prefix. -// If the object does not include the `@` symbol, the return value for `Digest` -// will be empty. -type Spec struct { - // Locator is the host and path portion of the specification. The host - // portion may refer to an actual host or just a namespace of related - // images. - // - // Typically, the locator may used to resolve the remote to fetch specific - // resources. - Locator string - - // Object contains the identifier for the remote resource. Classically, - // this is a tag but can refer to anything in a remote. By convention, any - // portion that may be a partial or whole digest will be preceded by an - // `@`. Anything preceding the `@` will be referred to as the "tag". - // - // In practice, we will see this broken down into the following formats: - // - // 1. - // 2. @ - // 3. @ - // - // We define the tag to be anything except '@' and ':'. may - // be a full valid digest or shortened version, possibly with elided - // algorithm. - Object string -} - -var splitRe = regexp.MustCompile(`[:@]`) - -// Parse parses the string into a structured ref. -func Parse(s string) (Spec, error) { - if strings.Contains(s, "://") { - return Spec{}, ErrInvalid - } - - u, err := url.Parse("dummy://" + s) - if err != nil { - return Spec{}, err - } - - if u.Scheme != "dummy" { - return Spec{}, ErrInvalid - } - - if u.Host == "" { - return Spec{}, ErrHostnameRequired - } - - var object string - - if idx := splitRe.FindStringIndex(u.Path); idx != nil { - // This allows us to retain the @ to signify digests or shortened digests in - // the object. - object = u.Path[idx[0]:] - if object[:1] == ":" { - object = object[1:] - } - u.Path = u.Path[:idx[0]] - } - - return Spec{ - Locator: path.Join(u.Host, u.Path), - Object: object, - }, nil -} - -// Hostname returns the hostname portion of the locator. -// -// Remotes are not required to directly access the resources at this host. This -// method is provided for convenience. -func (r Spec) Hostname() string { - i := strings.Index(r.Locator, "/") - - if i < 0 { - return r.Locator - } - return r.Locator[:i] -} - -// Digest returns the digest portion of the reference spec. This may be a -// partial or invalid digest, which may be used to lookup a complete digest. -func (r Spec) Digest() digest.Digest { - _, dgst := SplitObject(r.Object) - return dgst -} - -// String returns the normalized string for the ref. -func (r Spec) String() string { - if r.Object == "" { - return r.Locator - } - if r.Object[:1] == "@" { - return fmt.Sprintf("%v%v", r.Locator, r.Object) - } - - return fmt.Sprintf("%v:%v", r.Locator, r.Object) -} - -// SplitObject provides two parts of the object spec, delimited by an `@` -// symbol. -// -// Either may be empty and it is the callers job to validate them -// appropriately. -func SplitObject(obj string) (tag string, dgst digest.Digest) { - parts := strings.SplitAfterN(obj, "@", 2) - if len(parts) < 2 { - return parts[0], "" - } - return parts[0], digest.Digest(parts[1]) -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go b/ui/wasm/vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go deleted file mode 100644 index 8b0a87e755f..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go +++ /dev/null @@ -1,209 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package auth - -import ( - "context" - "encoding/json" - "net/http" - "net/url" - "strings" - "time" - - "github.com/containerd/containerd/log" - remoteserrors "github.com/containerd/containerd/remotes/errors" - "github.com/containerd/containerd/version" - "github.com/pkg/errors" - "golang.org/x/net/context/ctxhttp" -) - -var ( - // ErrNoToken is returned if a request is successful but the body does not - // contain an authorization token. - ErrNoToken = errors.New("authorization server did not include a token in the response") -) - -// GenerateTokenOptions generates options for fetching a token based on a challenge -func GenerateTokenOptions(ctx context.Context, host, username, secret string, c Challenge) (TokenOptions, error) { - realm, ok := c.Parameters["realm"] - if !ok { - return TokenOptions{}, errors.New("no realm specified for token auth challenge") - } - - realmURL, err := url.Parse(realm) - if err != nil { - return TokenOptions{}, errors.Wrap(err, "invalid token auth challenge realm") - } - - to := TokenOptions{ - Realm: realmURL.String(), - Service: c.Parameters["service"], - Username: username, - Secret: secret, - } - - scope, ok := c.Parameters["scope"] - if ok { - to.Scopes = append(to.Scopes, scope) - } else { - log.G(ctx).WithField("host", host).Debug("no scope specified for token auth challenge") - } - - return to, nil -} - -// TokenOptions are options for requesting a token -type TokenOptions struct { - Realm string - Service string - Scopes []string - Username string - Secret string -} - -// OAuthTokenResponse is response from fetching token with a OAuth POST request -type OAuthTokenResponse struct { - AccessToken string `json:"access_token"` - RefreshToken string `json:"refresh_token"` - ExpiresIn int `json:"expires_in"` - IssuedAt time.Time `json:"issued_at"` - Scope string `json:"scope"` -} - -// FetchTokenWithOAuth fetches a token using a POST request -func FetchTokenWithOAuth(ctx context.Context, client *http.Client, headers http.Header, clientID string, to TokenOptions) (*OAuthTokenResponse, error) { - form := url.Values{} - if len(to.Scopes) > 0 { - form.Set("scope", strings.Join(to.Scopes, " ")) - } - form.Set("service", to.Service) - form.Set("client_id", clientID) - - if to.Username == "" { - form.Set("grant_type", "refresh_token") - form.Set("refresh_token", to.Secret) - } else { - form.Set("grant_type", "password") - form.Set("username", to.Username) - form.Set("password", to.Secret) - } - - req, err := http.NewRequest("POST", to.Realm, strings.NewReader(form.Encode())) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") - for k, v := range headers { - req.Header[k] = append(req.Header[k], v...) - } - if len(req.Header.Get("User-Agent")) == 0 { - req.Header.Set("User-Agent", "containerd/"+version.Version) - } - - resp, err := ctxhttp.Do(ctx, client, req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if resp.StatusCode < 200 || resp.StatusCode >= 400 { - return nil, errors.WithStack(remoteserrors.NewUnexpectedStatusErr(resp)) - } - - decoder := json.NewDecoder(resp.Body) - - var tr OAuthTokenResponse - if err = decoder.Decode(&tr); err != nil { - return nil, errors.Wrap(err, "unable to decode token response") - } - - if tr.AccessToken == "" { - return nil, errors.WithStack(ErrNoToken) - } - - return &tr, nil -} - -// FetchTokenResponse is response from fetching token with GET request -type FetchTokenResponse struct { - Token string `json:"token"` - AccessToken string `json:"access_token"` - ExpiresIn int `json:"expires_in"` - IssuedAt time.Time `json:"issued_at"` - RefreshToken string `json:"refresh_token"` -} - -// FetchToken fetches a token using a GET request -func FetchToken(ctx context.Context, client *http.Client, headers http.Header, to TokenOptions) (*FetchTokenResponse, error) { - req, err := http.NewRequest("GET", to.Realm, nil) - if err != nil { - return nil, err - } - - for k, v := range headers { - req.Header[k] = append(req.Header[k], v...) - } - if len(req.Header.Get("User-Agent")) == 0 { - req.Header.Set("User-Agent", "containerd/"+version.Version) - } - - reqParams := req.URL.Query() - - if to.Service != "" { - reqParams.Add("service", to.Service) - } - - for _, scope := range to.Scopes { - reqParams.Add("scope", scope) - } - - if to.Secret != "" { - req.SetBasicAuth(to.Username, to.Secret) - } - - req.URL.RawQuery = reqParams.Encode() - - resp, err := ctxhttp.Do(ctx, client, req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if resp.StatusCode < 200 || resp.StatusCode >= 400 { - return nil, errors.WithStack(remoteserrors.NewUnexpectedStatusErr(resp)) - } - - decoder := json.NewDecoder(resp.Body) - - var tr FetchTokenResponse - if err = decoder.Decode(&tr); err != nil { - return nil, errors.Wrap(err, "unable to decode token response") - } - - // `access_token` is equivalent to `token` and if both are specified - // the choice is undefined. Canonicalize `access_token` by sticking - // things in `token`. - if tr.AccessToken != "" { - tr.Token = tr.AccessToken - } - - if tr.Token == "" { - return nil, errors.WithStack(ErrNoToken) - } - - return &tr, nil -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/remotes/docker/auth/parse.go b/ui/wasm/vendor/github.com/containerd/containerd/remotes/docker/auth/parse.go deleted file mode 100644 index 223fa2d0524..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/remotes/docker/auth/parse.go +++ /dev/null @@ -1,203 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package auth - -import ( - "net/http" - "sort" - "strings" -) - -// AuthenticationScheme defines scheme of the authentication method -type AuthenticationScheme byte - -const ( - // BasicAuth is scheme for Basic HTTP Authentication RFC 7617 - BasicAuth AuthenticationScheme = 1 << iota - // DigestAuth is scheme for HTTP Digest Access Authentication RFC 7616 - DigestAuth - // BearerAuth is scheme for OAuth 2.0 Bearer Tokens RFC 6750 - BearerAuth -) - -// Challenge carries information from a WWW-Authenticate response header. -// See RFC 2617. -type Challenge struct { - // scheme is the auth-scheme according to RFC 2617 - Scheme AuthenticationScheme - - // parameters are the auth-params according to RFC 2617 - Parameters map[string]string -} - -type byScheme []Challenge - -func (bs byScheme) Len() int { return len(bs) } -func (bs byScheme) Swap(i, j int) { bs[i], bs[j] = bs[j], bs[i] } - -// Sort in priority order: token > digest > basic -func (bs byScheme) Less(i, j int) bool { return bs[i].Scheme > bs[j].Scheme } - -// Octet types from RFC 2616. -type octetType byte - -var octetTypes [256]octetType - -const ( - isToken octetType = 1 << iota - isSpace -) - -func init() { - // OCTET = - // CHAR = - // CTL = - // CR = - // LF = - // SP = - // HT = - // <"> = - // CRLF = CR LF - // LWS = [CRLF] 1*( SP | HT ) - // TEXT = - // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> - // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT - // token = 1* - // qdtext = > - - for c := 0; c < 256; c++ { - var t octetType - isCtl := c <= 31 || c == 127 - isChar := 0 <= c && c <= 127 - isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) - if strings.ContainsRune(" \t\r\n", rune(c)) { - t |= isSpace - } - if isChar && !isCtl && !isSeparator { - t |= isToken - } - octetTypes[c] = t - } -} - -// ParseAuthHeader parses challenges from WWW-Authenticate header -func ParseAuthHeader(header http.Header) []Challenge { - challenges := []Challenge{} - for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { - v, p := parseValueAndParams(h) - var s AuthenticationScheme - switch v { - case "basic": - s = BasicAuth - case "digest": - s = DigestAuth - case "bearer": - s = BearerAuth - default: - continue - } - challenges = append(challenges, Challenge{Scheme: s, Parameters: p}) - } - sort.Stable(byScheme(challenges)) - return challenges -} - -func parseValueAndParams(header string) (value string, params map[string]string) { - params = make(map[string]string) - value, s := expectToken(header) - if value == "" { - return - } - value = strings.ToLower(value) - for { - var pkey string - pkey, s = expectToken(skipSpace(s)) - if pkey == "" { - return - } - if !strings.HasPrefix(s, "=") { - return - } - var pvalue string - pvalue, s = expectTokenOrQuoted(s[1:]) - if pvalue == "" { - return - } - pkey = strings.ToLower(pkey) - params[pkey] = pvalue - s = skipSpace(s) - if !strings.HasPrefix(s, ",") { - return - } - s = s[1:] - } -} - -func skipSpace(s string) (rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isSpace == 0 { - break - } - } - return s[i:] -} - -func expectToken(s string) (token, rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isToken == 0 { - break - } - } - return s[:i], s[i:] -} - -func expectTokenOrQuoted(s string) (value string, rest string) { - if !strings.HasPrefix(s, "\"") { - return expectToken(s) - } - s = s[1:] - for i := 0; i < len(s); i++ { - switch s[i] { - case '"': - return s[:i], s[i+1:] - case '\\': - p := make([]byte, len(s)-1) - j := copy(p, s[:i]) - escape := true - for i = i + 1; i < len(s); i++ { - b := s[i] - switch { - case escape: - escape = false - p[j] = b - j++ - case b == '\\': - escape = true - case b == '"': - return string(p[:j]), s[i+1:] - default: - p[j] = b - j++ - } - } - return "", "" - } - } - return "", "" -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go b/ui/wasm/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go deleted file mode 100644 index 67e4aea8da8..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go +++ /dev/null @@ -1,333 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package docker - -import ( - "context" - "encoding/base64" - "fmt" - "net/http" - "strings" - "sync" - - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/log" - "github.com/containerd/containerd/remotes/docker/auth" - remoteerrors "github.com/containerd/containerd/remotes/errors" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -type dockerAuthorizer struct { - credentials func(string) (string, string, error) - - client *http.Client - header http.Header - mu sync.Mutex - - // indexed by host name - handlers map[string]*authHandler -} - -// NewAuthorizer creates a Docker authorizer using the provided function to -// get credentials for the token server or basic auth. -// Deprecated: Use NewDockerAuthorizer -func NewAuthorizer(client *http.Client, f func(string) (string, string, error)) Authorizer { - return NewDockerAuthorizer(WithAuthClient(client), WithAuthCreds(f)) -} - -type authorizerConfig struct { - credentials func(string) (string, string, error) - client *http.Client - header http.Header -} - -// AuthorizerOpt configures an authorizer -type AuthorizerOpt func(*authorizerConfig) - -// WithAuthClient provides the HTTP client for the authorizer -func WithAuthClient(client *http.Client) AuthorizerOpt { - return func(opt *authorizerConfig) { - opt.client = client - } -} - -// WithAuthCreds provides a credential function to the authorizer -func WithAuthCreds(creds func(string) (string, string, error)) AuthorizerOpt { - return func(opt *authorizerConfig) { - opt.credentials = creds - } -} - -// WithAuthHeader provides HTTP headers for authorization -func WithAuthHeader(hdr http.Header) AuthorizerOpt { - return func(opt *authorizerConfig) { - opt.header = hdr - } -} - -// NewDockerAuthorizer creates an authorizer using Docker's registry -// authentication spec. -// See https://docs.docker.com/registry/spec/auth/ -func NewDockerAuthorizer(opts ...AuthorizerOpt) Authorizer { - var ao authorizerConfig - for _, opt := range opts { - opt(&ao) - } - - if ao.client == nil { - ao.client = http.DefaultClient - } - - return &dockerAuthorizer{ - credentials: ao.credentials, - client: ao.client, - header: ao.header, - handlers: make(map[string]*authHandler), - } -} - -// Authorize handles auth request. -func (a *dockerAuthorizer) Authorize(ctx context.Context, req *http.Request) error { - // skip if there is no auth handler - ah := a.getAuthHandler(req.URL.Host) - if ah == nil { - return nil - } - - auth, err := ah.authorize(ctx) - if err != nil { - return err - } - - req.Header.Set("Authorization", auth) - return nil -} - -func (a *dockerAuthorizer) getAuthHandler(host string) *authHandler { - a.mu.Lock() - defer a.mu.Unlock() - - return a.handlers[host] -} - -func (a *dockerAuthorizer) AddResponses(ctx context.Context, responses []*http.Response) error { - last := responses[len(responses)-1] - host := last.Request.URL.Host - - a.mu.Lock() - defer a.mu.Unlock() - for _, c := range auth.ParseAuthHeader(last.Header) { - if c.Scheme == auth.BearerAuth { - if err := invalidAuthorization(c, responses); err != nil { - delete(a.handlers, host) - return err - } - - // reuse existing handler - // - // assume that one registry will return the common - // challenge information, including realm and service. - // and the resource scope is only different part - // which can be provided by each request. - if _, ok := a.handlers[host]; ok { - return nil - } - - var username, secret string - if a.credentials != nil { - var err error - username, secret, err = a.credentials(host) - if err != nil { - return err - } - } - - common, err := auth.GenerateTokenOptions(ctx, host, username, secret, c) - if err != nil { - return err - } - - a.handlers[host] = newAuthHandler(a.client, a.header, c.Scheme, common) - return nil - } else if c.Scheme == auth.BasicAuth && a.credentials != nil { - username, secret, err := a.credentials(host) - if err != nil { - return err - } - - if username != "" && secret != "" { - common := auth.TokenOptions{ - Username: username, - Secret: secret, - } - - a.handlers[host] = newAuthHandler(a.client, a.header, c.Scheme, common) - return nil - } - } - } - return errors.Wrap(errdefs.ErrNotImplemented, "failed to find supported auth scheme") -} - -// authResult is used to control limit rate. -type authResult struct { - sync.WaitGroup - token string - err error -} - -// authHandler is used to handle auth request per registry server. -type authHandler struct { - sync.Mutex - - header http.Header - - client *http.Client - - // only support basic and bearer schemes - scheme auth.AuthenticationScheme - - // common contains common challenge answer - common auth.TokenOptions - - // scopedTokens caches token indexed by scopes, which used in - // bearer auth case - scopedTokens map[string]*authResult -} - -func newAuthHandler(client *http.Client, hdr http.Header, scheme auth.AuthenticationScheme, opts auth.TokenOptions) *authHandler { - return &authHandler{ - header: hdr, - client: client, - scheme: scheme, - common: opts, - scopedTokens: map[string]*authResult{}, - } -} - -func (ah *authHandler) authorize(ctx context.Context) (string, error) { - switch ah.scheme { - case auth.BasicAuth: - return ah.doBasicAuth(ctx) - case auth.BearerAuth: - return ah.doBearerAuth(ctx) - default: - return "", errors.Wrapf(errdefs.ErrNotImplemented, "failed to find supported auth scheme: %s", string(ah.scheme)) - } -} - -func (ah *authHandler) doBasicAuth(ctx context.Context) (string, error) { - username, secret := ah.common.Username, ah.common.Secret - - if username == "" || secret == "" { - return "", fmt.Errorf("failed to handle basic auth because missing username or secret") - } - - auth := base64.StdEncoding.EncodeToString([]byte(username + ":" + secret)) - return fmt.Sprintf("Basic %s", auth), nil -} - -func (ah *authHandler) doBearerAuth(ctx context.Context) (token string, err error) { - // copy common tokenOptions - to := ah.common - - to.Scopes = GetTokenScopes(ctx, to.Scopes) - - // Docs: https://docs.docker.com/registry/spec/auth/scope - scoped := strings.Join(to.Scopes, " ") - - ah.Lock() - if r, exist := ah.scopedTokens[scoped]; exist { - ah.Unlock() - r.Wait() - return r.token, r.err - } - - // only one fetch token job - r := new(authResult) - r.Add(1) - ah.scopedTokens[scoped] = r - ah.Unlock() - - defer func() { - token = fmt.Sprintf("Bearer %s", token) - r.token, r.err = token, err - r.Done() - }() - - // fetch token for the resource scope - if to.Secret != "" { - defer func() { - err = errors.Wrap(err, "failed to fetch oauth token") - }() - // credential information is provided, use oauth POST endpoint - // TODO: Allow setting client_id - resp, err := auth.FetchTokenWithOAuth(ctx, ah.client, ah.header, "containerd-client", to) - if err != nil { - var errStatus remoteerrors.ErrUnexpectedStatus - if errors.As(err, &errStatus) { - // Registries without support for POST may return 404 for POST /v2/token. - // As of September 2017, GCR is known to return 404. - // As of February 2018, JFrog Artifactory is known to return 401. - if (errStatus.StatusCode == 405 && to.Username != "") || errStatus.StatusCode == 404 || errStatus.StatusCode == 401 { - resp, err := auth.FetchToken(ctx, ah.client, ah.header, to) - if err != nil { - return "", err - } - return resp.Token, nil - } - log.G(ctx).WithFields(logrus.Fields{ - "status": errStatus.Status, - "body": string(errStatus.Body), - }).Debugf("token request failed") - } - return "", err - } - return resp.AccessToken, nil - } - // do request anonymously - resp, err := auth.FetchToken(ctx, ah.client, ah.header, to) - if err != nil { - return "", errors.Wrap(err, "failed to fetch anonymous token") - } - return resp.Token, nil -} - -func invalidAuthorization(c auth.Challenge, responses []*http.Response) error { - errStr := c.Parameters["error"] - if errStr == "" { - return nil - } - - n := len(responses) - if n == 1 || (n > 1 && !sameRequest(responses[n-2].Request, responses[n-1].Request)) { - return nil - } - - return errors.Wrapf(ErrInvalidAuthorization, "server message: %s", errStr) -} - -func sameRequest(r1, r2 *http.Request) bool { - if r1.Method != r2.Method { - return false - } - if *r1.URL != *r2.URL { - return false - } - return true -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/remotes/docker/converter.go b/ui/wasm/vendor/github.com/containerd/containerd/remotes/docker/converter.go deleted file mode 100644 index 43e6b372c12..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/remotes/docker/converter.go +++ /dev/null @@ -1,88 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package docker - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/images" - "github.com/containerd/containerd/log" - "github.com/containerd/containerd/remotes" - digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -// LegacyConfigMediaType should be replaced by OCI image spec. -// -// More detail: docker/distribution#1622 -const LegacyConfigMediaType = "application/octet-stream" - -// ConvertManifest changes application/octet-stream to schema2 config media type if need. -// -// NOTE: -// 1. original manifest will be deleted by next gc round. -// 2. don't cover manifest list. -func ConvertManifest(ctx context.Context, store content.Store, desc ocispec.Descriptor) (ocispec.Descriptor, error) { - if !(desc.MediaType == images.MediaTypeDockerSchema2Manifest || - desc.MediaType == ocispec.MediaTypeImageManifest) { - - log.G(ctx).Warnf("do nothing for media type: %s", desc.MediaType) - return desc, nil - } - - // read manifest data - mb, err := content.ReadBlob(ctx, store, desc) - if err != nil { - return ocispec.Descriptor{}, errors.Wrap(err, "failed to read index data") - } - - var manifest ocispec.Manifest - if err := json.Unmarshal(mb, &manifest); err != nil { - return ocispec.Descriptor{}, errors.Wrap(err, "failed to unmarshal data into manifest") - } - - // check config media type - if manifest.Config.MediaType != LegacyConfigMediaType { - return desc, nil - } - - manifest.Config.MediaType = images.MediaTypeDockerSchema2Config - data, err := json.MarshalIndent(manifest, "", " ") - if err != nil { - return ocispec.Descriptor{}, errors.Wrap(err, "failed to marshal manifest") - } - - // update manifest with gc labels - desc.Digest = digest.Canonical.FromBytes(data) - desc.Size = int64(len(data)) - - labels := map[string]string{} - for i, c := range append([]ocispec.Descriptor{manifest.Config}, manifest.Layers...) { - labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i)] = c.Digest.String() - } - - ref := remotes.MakeRefKey(ctx, desc) - if err := content.WriteBlob(ctx, store, ref, bytes.NewReader(data), desc, content.WithLabels(labels)); err != nil { - return ocispec.Descriptor{}, errors.Wrap(err, "failed to update content") - } - return desc, nil -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/remotes/docker/errcode.go b/ui/wasm/vendor/github.com/containerd/containerd/remotes/docker/errcode.go deleted file mode 100644 index 8c623bcbef7..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/remotes/docker/errcode.go +++ /dev/null @@ -1,283 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package docker - -import ( - "encoding/json" - "fmt" - "strings" -) - -// ErrorCoder is the base interface for ErrorCode and Error allowing -// users of each to just call ErrorCode to get the real ID of each -type ErrorCoder interface { - ErrorCode() ErrorCode -} - -// ErrorCode represents the error type. The errors are serialized via strings -// and the integer format may change and should *never* be exported. -type ErrorCode int - -var _ error = ErrorCode(0) - -// ErrorCode just returns itself -func (ec ErrorCode) ErrorCode() ErrorCode { - return ec -} - -// Error returns the ID/Value -func (ec ErrorCode) Error() string { - // NOTE(stevvooe): Cannot use message here since it may have unpopulated args. - return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1)) -} - -// Descriptor returns the descriptor for the error code. -func (ec ErrorCode) Descriptor() ErrorDescriptor { - d, ok := errorCodeToDescriptors[ec] - - if !ok { - return ErrorCodeUnknown.Descriptor() - } - - return d -} - -// String returns the canonical identifier for this error code. -func (ec ErrorCode) String() string { - return ec.Descriptor().Value -} - -// Message returned the human-readable error message for this error code. -func (ec ErrorCode) Message() string { - return ec.Descriptor().Message -} - -// MarshalText encodes the receiver into UTF-8-encoded text and returns the -// result. -func (ec ErrorCode) MarshalText() (text []byte, err error) { - return []byte(ec.String()), nil -} - -// UnmarshalText decodes the form generated by MarshalText. -func (ec *ErrorCode) UnmarshalText(text []byte) error { - desc, ok := idToDescriptors[string(text)] - - if !ok { - desc = ErrorCodeUnknown.Descriptor() - } - - *ec = desc.Code - - return nil -} - -// WithMessage creates a new Error struct based on the passed-in info and -// overrides the Message property. -func (ec ErrorCode) WithMessage(message string) Error { - return Error{ - Code: ec, - Message: message, - } -} - -// WithDetail creates a new Error struct based on the passed-in info and -// set the Detail property appropriately -func (ec ErrorCode) WithDetail(detail interface{}) Error { - return Error{ - Code: ec, - Message: ec.Message(), - }.WithDetail(detail) -} - -// WithArgs creates a new Error struct and sets the Args slice -func (ec ErrorCode) WithArgs(args ...interface{}) Error { - return Error{ - Code: ec, - Message: ec.Message(), - }.WithArgs(args...) -} - -// Error provides a wrapper around ErrorCode with extra Details provided. -type Error struct { - Code ErrorCode `json:"code"` - Message string `json:"message"` - Detail interface{} `json:"detail,omitempty"` - - // TODO(duglin): See if we need an "args" property so we can do the - // variable substitution right before showing the message to the user -} - -var _ error = Error{} - -// ErrorCode returns the ID/Value of this Error -func (e Error) ErrorCode() ErrorCode { - return e.Code -} - -// Error returns a human readable representation of the error. -func (e Error) Error() string { - return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message) -} - -// WithDetail will return a new Error, based on the current one, but with -// some Detail info added -func (e Error) WithDetail(detail interface{}) Error { - return Error{ - Code: e.Code, - Message: e.Message, - Detail: detail, - } -} - -// WithArgs uses the passed-in list of interface{} as the substitution -// variables in the Error's Message string, but returns a new Error -func (e Error) WithArgs(args ...interface{}) Error { - return Error{ - Code: e.Code, - Message: fmt.Sprintf(e.Code.Message(), args...), - Detail: e.Detail, - } -} - -// ErrorDescriptor provides relevant information about a given error code. -type ErrorDescriptor struct { - // Code is the error code that this descriptor describes. - Code ErrorCode - - // Value provides a unique, string key, often captilized with - // underscores, to identify the error code. This value is used as the - // keyed value when serializing api errors. - Value string - - // Message is a short, human readable description of the error condition - // included in API responses. - Message string - - // Description provides a complete account of the errors purpose, suitable - // for use in documentation. - Description string - - // HTTPStatusCode provides the http status code that is associated with - // this error condition. - HTTPStatusCode int -} - -// ParseErrorCode returns the value by the string error code. -// `ErrorCodeUnknown` will be returned if the error is not known. -func ParseErrorCode(value string) ErrorCode { - ed, ok := idToDescriptors[value] - if ok { - return ed.Code - } - - return ErrorCodeUnknown -} - -// Errors provides the envelope for multiple errors and a few sugar methods -// for use within the application. -type Errors []error - -var _ error = Errors{} - -func (errs Errors) Error() string { - switch len(errs) { - case 0: - return "" - case 1: - return errs[0].Error() - default: - msg := "errors:\n" - for _, err := range errs { - msg += err.Error() + "\n" - } - return msg - } -} - -// Len returns the current number of errors. -func (errs Errors) Len() int { - return len(errs) -} - -// MarshalJSON converts slice of error, ErrorCode or Error into a -// slice of Error - then serializes -func (errs Errors) MarshalJSON() ([]byte, error) { - var tmpErrs struct { - Errors []Error `json:"errors,omitempty"` - } - - for _, daErr := range errs { - var err Error - - switch daErr := daErr.(type) { - case ErrorCode: - err = daErr.WithDetail(nil) - case Error: - err = daErr - default: - err = ErrorCodeUnknown.WithDetail(daErr) - - } - - // If the Error struct was setup and they forgot to set the - // Message field (meaning its "") then grab it from the ErrCode - msg := err.Message - if msg == "" { - msg = err.Code.Message() - } - - tmpErrs.Errors = append(tmpErrs.Errors, Error{ - Code: err.Code, - Message: msg, - Detail: err.Detail, - }) - } - - return json.Marshal(tmpErrs) -} - -// UnmarshalJSON deserializes []Error and then converts it into slice of -// Error or ErrorCode -func (errs *Errors) UnmarshalJSON(data []byte) error { - var tmpErrs struct { - Errors []Error - } - - if err := json.Unmarshal(data, &tmpErrs); err != nil { - return err - } - - var newErrs Errors - for _, daErr := range tmpErrs.Errors { - // If Message is empty or exactly matches the Code's message string - // then just use the Code, no need for a full Error struct - if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) { - // Error's w/o details get converted to ErrorCode - newErrs = append(newErrs, daErr.Code) - } else { - // Error's w/ details are untouched - newErrs = append(newErrs, Error{ - Code: daErr.Code, - Message: daErr.Message, - Detail: daErr.Detail, - }) - } - } - - *errs = newErrs - return nil -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/remotes/docker/errdesc.go b/ui/wasm/vendor/github.com/containerd/containerd/remotes/docker/errdesc.go deleted file mode 100644 index b2bd4d82bd8..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/remotes/docker/errdesc.go +++ /dev/null @@ -1,154 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package docker - -import ( - "fmt" - "net/http" - "sort" - "sync" -) - -var ( - errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{} - idToDescriptors = map[string]ErrorDescriptor{} - groupToDescriptors = map[string][]ErrorDescriptor{} -) - -var ( - // ErrorCodeUnknown is a generic error that can be used as a last - // resort if there is no situation-specific error message that can be used - ErrorCodeUnknown = Register("errcode", ErrorDescriptor{ - Value: "UNKNOWN", - Message: "unknown error", - Description: `Generic error returned when the error does not have an - API classification.`, - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeUnsupported is returned when an operation is not supported. - ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{ - Value: "UNSUPPORTED", - Message: "The operation is unsupported.", - Description: `The operation was unsupported due to a missing - implementation or invalid set of parameters.`, - HTTPStatusCode: http.StatusMethodNotAllowed, - }) - - // ErrorCodeUnauthorized is returned if a request requires - // authentication. - ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{ - Value: "UNAUTHORIZED", - Message: "authentication required", - Description: `The access controller was unable to authenticate - the client. Often this will be accompanied by a - Www-Authenticate HTTP response header indicating how to - authenticate.`, - HTTPStatusCode: http.StatusUnauthorized, - }) - - // ErrorCodeDenied is returned if a client does not have sufficient - // permission to perform an action. - ErrorCodeDenied = Register("errcode", ErrorDescriptor{ - Value: "DENIED", - Message: "requested access to the resource is denied", - Description: `The access controller denied access for the - operation on a resource.`, - HTTPStatusCode: http.StatusForbidden, - }) - - // ErrorCodeUnavailable provides a common error to report unavailability - // of a service or endpoint. - ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{ - Value: "UNAVAILABLE", - Message: "service unavailable", - Description: "Returned when a service is not available", - HTTPStatusCode: http.StatusServiceUnavailable, - }) - - // ErrorCodeTooManyRequests is returned if a client attempts too many - // times to contact a service endpoint. - ErrorCodeTooManyRequests = Register("errcode", ErrorDescriptor{ - Value: "TOOMANYREQUESTS", - Message: "too many requests", - Description: `Returned when a client attempts to contact a - service too many times`, - HTTPStatusCode: http.StatusTooManyRequests, - }) -) - -var nextCode = 1000 -var registerLock sync.Mutex - -// Register will make the passed-in error known to the environment and -// return a new ErrorCode -func Register(group string, descriptor ErrorDescriptor) ErrorCode { - registerLock.Lock() - defer registerLock.Unlock() - - descriptor.Code = ErrorCode(nextCode) - - if _, ok := idToDescriptors[descriptor.Value]; ok { - panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value)) - } - if _, ok := errorCodeToDescriptors[descriptor.Code]; ok { - panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code)) - } - - groupToDescriptors[group] = append(groupToDescriptors[group], descriptor) - errorCodeToDescriptors[descriptor.Code] = descriptor - idToDescriptors[descriptor.Value] = descriptor - - nextCode++ - return descriptor.Code -} - -type byValue []ErrorDescriptor - -func (a byValue) Len() int { return len(a) } -func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value } - -// GetGroupNames returns the list of Error group names that are registered -func GetGroupNames() []string { - keys := []string{} - - for k := range groupToDescriptors { - keys = append(keys, k) - } - sort.Strings(keys) - return keys -} - -// GetErrorCodeGroup returns the named group of error descriptors -func GetErrorCodeGroup(name string) []ErrorDescriptor { - desc := groupToDescriptors[name] - sort.Sort(byValue(desc)) - return desc -} - -// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are -// registered, irrespective of what group they're in -func GetErrorAllDescriptors() []ErrorDescriptor { - result := []ErrorDescriptor{} - - for _, group := range GetGroupNames() { - result = append(result, GetErrorCodeGroup(group)...) - } - sort.Sort(byValue(result)) - return result -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go b/ui/wasm/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go deleted file mode 100644 index 4b2c10e9a37..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go +++ /dev/null @@ -1,216 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package docker - -import ( - "context" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strings" - - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/images" - "github.com/containerd/containerd/log" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -type dockerFetcher struct { - *dockerBase -} - -func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) { - ctx = log.WithLogger(ctx, log.G(ctx).WithField("digest", desc.Digest)) - - hosts := r.filterHosts(HostCapabilityPull) - if len(hosts) == 0 { - return nil, errors.Wrap(errdefs.ErrNotFound, "no pull hosts") - } - - ctx, err := ContextWithRepositoryScope(ctx, r.refspec, false) - if err != nil { - return nil, err - } - - return newHTTPReadSeeker(desc.Size, func(offset int64) (io.ReadCloser, error) { - // firstly try fetch via external urls - for _, us := range desc.URLs { - ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", us)) - - u, err := url.Parse(us) - if err != nil { - log.G(ctx).WithError(err).Debug("failed to parse") - continue - } - if u.Scheme != "http" && u.Scheme != "https" { - log.G(ctx).Debug("non-http(s) alternative url is unsupported") - continue - } - log.G(ctx).Debug("trying alternative url") - - // Try this first, parse it - host := RegistryHost{ - Client: http.DefaultClient, - Host: u.Host, - Scheme: u.Scheme, - Path: u.Path, - Capabilities: HostCapabilityPull, - } - req := r.request(host, http.MethodGet) - // Strip namespace from base - req.path = u.Path - if u.RawQuery != "" { - req.path = req.path + "?" + u.RawQuery - } - - rc, err := r.open(ctx, req, desc.MediaType, offset) - if err != nil { - if errdefs.IsNotFound(err) { - continue // try one of the other urls. - } - - return nil, err - } - - return rc, nil - } - - // Try manifests endpoints for manifests types - switch desc.MediaType { - case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList, - images.MediaTypeDockerSchema1Manifest, - ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex: - - var firstErr error - for _, host := range r.hosts { - req := r.request(host, http.MethodGet, "manifests", desc.Digest.String()) - if err := req.addNamespace(r.refspec.Hostname()); err != nil { - return nil, err - } - - rc, err := r.open(ctx, req, desc.MediaType, offset) - if err != nil { - // Store the error for referencing later - if firstErr == nil { - firstErr = err - } - continue // try another host - } - - return rc, nil - } - - return nil, firstErr - } - - // Finally use blobs endpoints - var firstErr error - for _, host := range r.hosts { - req := r.request(host, http.MethodGet, "blobs", desc.Digest.String()) - if err := req.addNamespace(r.refspec.Hostname()); err != nil { - return nil, err - } - - rc, err := r.open(ctx, req, desc.MediaType, offset) - if err != nil { - // Store the error for referencing later - if firstErr == nil { - firstErr = err - } - continue // try another host - } - - return rc, nil - } - - if errdefs.IsNotFound(firstErr) { - firstErr = errors.Wrapf(errdefs.ErrNotFound, - "could not fetch content descriptor %v (%v) from remote", - desc.Digest, desc.MediaType) - } - - return nil, firstErr - - }) -} - -func (r dockerFetcher) open(ctx context.Context, req *request, mediatype string, offset int64) (_ io.ReadCloser, retErr error) { - req.header.Set("Accept", strings.Join([]string{mediatype, `*/*`}, ", ")) - - if offset > 0 { - // Note: "Accept-Ranges: bytes" cannot be trusted as some endpoints - // will return the header without supporting the range. The content - // range must always be checked. - req.header.Set("Range", fmt.Sprintf("bytes=%d-", offset)) - } - - resp, err := req.doWithRetries(ctx, nil) - if err != nil { - return nil, err - } - defer func() { - if retErr != nil { - resp.Body.Close() - } - }() - - if resp.StatusCode > 299 { - // TODO(stevvooe): When doing a offset specific request, we should - // really distinguish between a 206 and a 200. In the case of 200, we - // can discard the bytes, hiding the seek behavior from the - // implementation. - - if resp.StatusCode == http.StatusNotFound { - return nil, errors.Wrapf(errdefs.ErrNotFound, "content at %v not found", req.String()) - } - var registryErr Errors - if err := json.NewDecoder(resp.Body).Decode(®istryErr); err != nil || registryErr.Len() < 1 { - return nil, errors.Errorf("unexpected status code %v: %v", req.String(), resp.Status) - } - return nil, errors.Errorf("unexpected status code %v: %s - Server message: %s", req.String(), resp.Status, registryErr.Error()) - } - if offset > 0 { - cr := resp.Header.Get("content-range") - if cr != "" { - if !strings.HasPrefix(cr, fmt.Sprintf("bytes %d-", offset)) { - return nil, errors.Errorf("unhandled content range in response: %v", cr) - - } - } else { - // TODO: Should any cases where use of content range - // without the proper header be considered? - // 206 responses? - - // Discard up to offset - // Could use buffer pool here but this case should be rare - n, err := io.Copy(ioutil.Discard, io.LimitReader(resp.Body, offset)) - if err != nil { - return nil, errors.Wrap(err, "failed to discard to offset") - } - if n != offset { - return nil, errors.Errorf("unable to discard to offset") - } - - } - } - - return resp.Body, nil -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/remotes/docker/handler.go b/ui/wasm/vendor/github.com/containerd/containerd/remotes/docker/handler.go deleted file mode 100644 index 529cfbc274b..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/remotes/docker/handler.go +++ /dev/null @@ -1,154 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package docker - -import ( - "context" - "fmt" - "net/url" - "strings" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/images" - "github.com/containerd/containerd/labels" - "github.com/containerd/containerd/log" - "github.com/containerd/containerd/reference" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -var ( - // labelDistributionSource describes the source blob comes from. - labelDistributionSource = "containerd.io/distribution.source" -) - -// AppendDistributionSourceLabel updates the label of blob with distribution source. -func AppendDistributionSourceLabel(manager content.Manager, ref string) (images.HandlerFunc, error) { - refspec, err := reference.Parse(ref) - if err != nil { - return nil, err - } - - u, err := url.Parse("dummy://" + refspec.Locator) - if err != nil { - return nil, err - } - - source, repo := u.Hostname(), strings.TrimPrefix(u.Path, "/") - return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - info, err := manager.Info(ctx, desc.Digest) - if err != nil { - return nil, err - } - - key := distributionSourceLabelKey(source) - - originLabel := "" - if info.Labels != nil { - originLabel = info.Labels[key] - } - value := appendDistributionSourceLabel(originLabel, repo) - - // The repo name has been limited under 256 and the distribution - // label might hit the limitation of label size, when blob data - // is used as the very, very common layer. - if err := labels.Validate(key, value); err != nil { - log.G(ctx).Warnf("skip to append distribution label: %s", err) - return nil, nil - } - - info = content.Info{ - Digest: desc.Digest, - Labels: map[string]string{ - key: value, - }, - } - _, err = manager.Update(ctx, info, fmt.Sprintf("labels.%s", key)) - return nil, err - }, nil -} - -func appendDistributionSourceLabel(originLabel, repo string) string { - repos := []string{} - if originLabel != "" { - repos = strings.Split(originLabel, ",") - } - repos = append(repos, repo) - - // use empty string to present duplicate items - for i := 1; i < len(repos); i++ { - tmp, j := repos[i], i-1 - for ; j >= 0 && repos[j] >= tmp; j-- { - if repos[j] == tmp { - tmp = "" - } - repos[j+1] = repos[j] - } - repos[j+1] = tmp - } - - i := 0 - for ; i < len(repos) && repos[i] == ""; i++ { - } - - return strings.Join(repos[i:], ",") -} - -func distributionSourceLabelKey(source string) string { - return fmt.Sprintf("%s.%s", labelDistributionSource, source) -} - -// selectRepositoryMountCandidate will select the repo which has longest -// common prefix components as the candidate. -func selectRepositoryMountCandidate(refspec reference.Spec, sources map[string]string) string { - u, err := url.Parse("dummy://" + refspec.Locator) - if err != nil { - // NOTE: basically, it won't be error here - return "" - } - - source, target := u.Hostname(), strings.TrimPrefix(u.Path, "/") - repoLabel, ok := sources[distributionSourceLabelKey(source)] - if !ok || repoLabel == "" { - return "" - } - - n, match := 0, "" - components := strings.Split(target, "/") - for _, repo := range strings.Split(repoLabel, ",") { - // the target repo is not a candidate - if repo == target { - continue - } - - if l := commonPrefixComponents(components, repo); l >= n { - n, match = l, repo - } - } - return match -} - -func commonPrefixComponents(components []string, target string) int { - targetComponents := strings.Split(target, "/") - - i := 0 - for ; i < len(components) && i < len(targetComponents); i++ { - if components[i] != targetComponents[i] { - break - } - } - return i -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/remotes/docker/httpreadseeker.go b/ui/wasm/vendor/github.com/containerd/containerd/remotes/docker/httpreadseeker.go deleted file mode 100644 index 58c866bcdec..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/remotes/docker/httpreadseeker.go +++ /dev/null @@ -1,169 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package docker - -import ( - "bytes" - "io" - "io/ioutil" - - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/log" - "github.com/pkg/errors" -) - -const maxRetry = 3 - -type httpReadSeeker struct { - size int64 - offset int64 - rc io.ReadCloser - open func(offset int64) (io.ReadCloser, error) - closed bool - - errsWithNoProgress int -} - -func newHTTPReadSeeker(size int64, open func(offset int64) (io.ReadCloser, error)) (io.ReadCloser, error) { - return &httpReadSeeker{ - size: size, - open: open, - }, nil -} - -func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) { - if hrs.closed { - return 0, io.EOF - } - - rd, err := hrs.reader() - if err != nil { - return 0, err - } - - n, err = rd.Read(p) - hrs.offset += int64(n) - if n > 0 || err == nil { - hrs.errsWithNoProgress = 0 - } - if err == io.ErrUnexpectedEOF { - // connection closed unexpectedly. try reconnecting. - if n == 0 { - hrs.errsWithNoProgress++ - if hrs.errsWithNoProgress > maxRetry { - return // too many retries for this offset with no progress - } - } - if hrs.rc != nil { - if clsErr := hrs.rc.Close(); clsErr != nil { - log.L.WithError(clsErr).Errorf("httpReadSeeker: failed to close ReadCloser") - } - hrs.rc = nil - } - if _, err2 := hrs.reader(); err2 == nil { - return n, nil - } - } - return -} - -func (hrs *httpReadSeeker) Close() error { - if hrs.closed { - return nil - } - hrs.closed = true - if hrs.rc != nil { - return hrs.rc.Close() - } - - return nil -} - -func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { - if hrs.closed { - return 0, errors.Wrap(errdefs.ErrUnavailable, "Fetcher.Seek: closed") - } - - abs := hrs.offset - switch whence { - case io.SeekStart: - abs = offset - case io.SeekCurrent: - abs += offset - case io.SeekEnd: - if hrs.size == -1 { - return 0, errors.Wrap(errdefs.ErrUnavailable, "Fetcher.Seek: unknown size, cannot seek from end") - } - abs = hrs.size + offset - default: - return 0, errors.Wrap(errdefs.ErrInvalidArgument, "Fetcher.Seek: invalid whence") - } - - if abs < 0 { - return 0, errors.Wrapf(errdefs.ErrInvalidArgument, "Fetcher.Seek: negative offset") - } - - if abs != hrs.offset { - if hrs.rc != nil { - if err := hrs.rc.Close(); err != nil { - log.L.WithError(err).Errorf("Fetcher.Seek: failed to close ReadCloser") - } - - hrs.rc = nil - } - - hrs.offset = abs - } - - return hrs.offset, nil -} - -func (hrs *httpReadSeeker) reader() (io.Reader, error) { - if hrs.rc != nil { - return hrs.rc, nil - } - - if hrs.size == -1 || hrs.offset < hrs.size { - // only try to reopen the body request if we are seeking to a value - // less than the actual size. - if hrs.open == nil { - return nil, errors.Wrapf(errdefs.ErrNotImplemented, "cannot open") - } - - rc, err := hrs.open(hrs.offset) - if err != nil { - return nil, errors.Wrapf(err, "httpReadSeeker: failed open") - } - - if hrs.rc != nil { - if err := hrs.rc.Close(); err != nil { - log.L.WithError(err).Errorf("httpReadSeeker: failed to close ReadCloser") - } - } - hrs.rc = rc - } else { - // There is an edge case here where offset == size of the content. If - // we seek, we will probably get an error for content that cannot be - // sought (?). In that case, we should err on committing the content, - // as the length is already satisfied but we just return the empty - // reader instead. - - hrs.rc = ioutil.NopCloser(bytes.NewReader([]byte{})) - } - - return hrs.rc, nil -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/remotes/docker/pusher.go b/ui/wasm/vendor/github.com/containerd/containerd/remotes/docker/pusher.go deleted file mode 100644 index 97ed66a6ab0..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/remotes/docker/pusher.go +++ /dev/null @@ -1,445 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package docker - -import ( - "context" - "io" - "io/ioutil" - "net/http" - "net/url" - "strings" - "time" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/images" - "github.com/containerd/containerd/log" - "github.com/containerd/containerd/remotes" - remoteserrors "github.com/containerd/containerd/remotes/errors" - digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -type dockerPusher struct { - *dockerBase - object string - - // TODO: namespace tracker - tracker StatusTracker -} - -// Writer implements Ingester API of content store. This allows the client -// to receive ErrUnavailable when there is already an on-going upload. -// Note that the tracker MUST implement StatusTrackLocker interface to avoid -// race condition on StatusTracker. -func (p dockerPusher) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { - var wOpts content.WriterOpts - for _, opt := range opts { - if err := opt(&wOpts); err != nil { - return nil, err - } - } - if wOpts.Ref == "" { - return nil, errors.Wrap(errdefs.ErrInvalidArgument, "ref must not be empty") - } - return p.push(ctx, wOpts.Desc, wOpts.Ref, true) -} - -func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) { - return p.push(ctx, desc, remotes.MakeRefKey(ctx, desc), false) -} - -func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref string, unavailableOnFail bool) (content.Writer, error) { - if l, ok := p.tracker.(StatusTrackLocker); ok { - l.Lock(ref) - defer l.Unlock(ref) - } - ctx, err := ContextWithRepositoryScope(ctx, p.refspec, true) - if err != nil { - return nil, err - } - status, err := p.tracker.GetStatus(ref) - if err == nil { - if status.Committed && status.Offset == status.Total { - return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "ref %v", ref) - } - if unavailableOnFail { - // Another push of this ref is happening elsewhere. The rest of function - // will continue only when `errdefs.IsNotFound(err) == true` (i.e. there - // is no actively-tracked ref already). - return nil, errors.Wrap(errdefs.ErrUnavailable, "push is on-going") - } - // TODO: Handle incomplete status - } else if !errdefs.IsNotFound(err) { - return nil, errors.Wrap(err, "failed to get status") - } - - hosts := p.filterHosts(HostCapabilityPush) - if len(hosts) == 0 { - return nil, errors.Wrap(errdefs.ErrNotFound, "no push hosts") - } - - var ( - isManifest bool - existCheck []string - host = hosts[0] - ) - - switch desc.MediaType { - case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList, - ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex: - isManifest = true - existCheck = getManifestPath(p.object, desc.Digest) - default: - existCheck = []string{"blobs", desc.Digest.String()} - } - - req := p.request(host, http.MethodHead, existCheck...) - req.header.Set("Accept", strings.Join([]string{desc.MediaType, `*/*`}, ", ")) - - log.G(ctx).WithField("url", req.String()).Debugf("checking and pushing to") - - resp, err := req.doWithRetries(ctx, nil) - if err != nil { - if !errors.Is(err, ErrInvalidAuthorization) { - return nil, err - } - log.G(ctx).WithError(err).Debugf("Unable to check existence, continuing with push") - } else { - if resp.StatusCode == http.StatusOK { - var exists bool - if isManifest && existCheck[1] != desc.Digest.String() { - dgstHeader := digest.Digest(resp.Header.Get("Docker-Content-Digest")) - if dgstHeader == desc.Digest { - exists = true - } - } else { - exists = true - } - - if exists { - p.tracker.SetStatus(ref, Status{ - Committed: true, - Status: content.Status{ - Ref: ref, - Total: desc.Size, - Offset: desc.Size, - // TODO: Set updated time? - }, - }) - resp.Body.Close() - return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v on remote", desc.Digest) - } - } else if resp.StatusCode != http.StatusNotFound { - err := remoteserrors.NewUnexpectedStatusErr(resp) - log.G(ctx).WithField("resp", resp).WithField("body", string(err.(remoteserrors.ErrUnexpectedStatus).Body)).Debug("unexpected response") - resp.Body.Close() - return nil, err - } - resp.Body.Close() - } - - if isManifest { - putPath := getManifestPath(p.object, desc.Digest) - req = p.request(host, http.MethodPut, putPath...) - req.header.Add("Content-Type", desc.MediaType) - } else { - // Start upload request - req = p.request(host, http.MethodPost, "blobs", "uploads/") - - var resp *http.Response - if fromRepo := selectRepositoryMountCandidate(p.refspec, desc.Annotations); fromRepo != "" { - preq := requestWithMountFrom(req, desc.Digest.String(), fromRepo) - pctx := ContextWithAppendPullRepositoryScope(ctx, fromRepo) - - // NOTE: the fromRepo might be private repo and - // auth service still can grant token without error. - // but the post request will fail because of 401. - // - // for the private repo, we should remove mount-from - // query and send the request again. - resp, err = preq.doWithRetries(pctx, nil) - if err != nil { - return nil, err - } - - if resp.StatusCode == http.StatusUnauthorized { - log.G(ctx).Debugf("failed to mount from repository %s", fromRepo) - - resp.Body.Close() - resp = nil - } - } - - if resp == nil { - resp, err = req.doWithRetries(ctx, nil) - if err != nil { - return nil, err - } - } - defer resp.Body.Close() - - switch resp.StatusCode { - case http.StatusOK, http.StatusAccepted, http.StatusNoContent: - case http.StatusCreated: - p.tracker.SetStatus(ref, Status{ - Committed: true, - Status: content.Status{ - Ref: ref, - Total: desc.Size, - Offset: desc.Size, - }, - }) - return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v on remote", desc.Digest) - default: - err := remoteserrors.NewUnexpectedStatusErr(resp) - log.G(ctx).WithField("resp", resp).WithField("body", string(err.(remoteserrors.ErrUnexpectedStatus).Body)).Debug("unexpected response") - return nil, err - } - - var ( - location = resp.Header.Get("Location") - lurl *url.URL - lhost = host - ) - // Support paths without host in location - if strings.HasPrefix(location, "/") { - lurl, err = url.Parse(lhost.Scheme + "://" + lhost.Host + location) - if err != nil { - return nil, errors.Wrapf(err, "unable to parse location %v", location) - } - } else { - if !strings.Contains(location, "://") { - location = lhost.Scheme + "://" + location - } - lurl, err = url.Parse(location) - if err != nil { - return nil, errors.Wrapf(err, "unable to parse location %v", location) - } - - if lurl.Host != lhost.Host || lhost.Scheme != lurl.Scheme { - - lhost.Scheme = lurl.Scheme - lhost.Host = lurl.Host - log.G(ctx).WithField("host", lhost.Host).WithField("scheme", lhost.Scheme).Debug("upload changed destination") - - // Strip authorizer if change to host or scheme - lhost.Authorizer = nil - } - } - q := lurl.Query() - q.Add("digest", desc.Digest.String()) - - req = p.request(lhost, http.MethodPut) - req.header.Set("Content-Type", "application/octet-stream") - req.path = lurl.Path + "?" + q.Encode() - } - p.tracker.SetStatus(ref, Status{ - Status: content.Status{ - Ref: ref, - Total: desc.Size, - Expected: desc.Digest, - StartedAt: time.Now(), - }, - }) - - // TODO: Support chunked upload - - pr, pw := io.Pipe() - respC := make(chan response, 1) - body := ioutil.NopCloser(pr) - - req.body = func() (io.ReadCloser, error) { - if body == nil { - return nil, errors.New("cannot reuse body, request must be retried") - } - // Only use the body once since pipe cannot be seeked - ob := body - body = nil - return ob, nil - } - req.size = desc.Size - - go func() { - defer close(respC) - resp, err := req.doWithRetries(ctx, nil) - if err != nil { - respC <- response{err: err} - pr.CloseWithError(err) - return - } - - switch resp.StatusCode { - case http.StatusOK, http.StatusCreated, http.StatusNoContent: - default: - err := remoteserrors.NewUnexpectedStatusErr(resp) - log.G(ctx).WithField("resp", resp).WithField("body", string(err.(remoteserrors.ErrUnexpectedStatus).Body)).Debug("unexpected response") - pr.CloseWithError(err) - } - respC <- response{Response: resp} - }() - - return &pushWriter{ - base: p.dockerBase, - ref: ref, - pipe: pw, - responseC: respC, - isManifest: isManifest, - expected: desc.Digest, - tracker: p.tracker, - }, nil -} - -func getManifestPath(object string, dgst digest.Digest) []string { - if i := strings.IndexByte(object, '@'); i >= 0 { - if object[i+1:] != dgst.String() { - // use digest, not tag - object = "" - } else { - // strip @ for registry path to make tag - object = object[:i] - } - - } - - if object == "" { - return []string{"manifests", dgst.String()} - } - - return []string{"manifests", object} -} - -type response struct { - *http.Response - err error -} - -type pushWriter struct { - base *dockerBase - ref string - - pipe *io.PipeWriter - responseC <-chan response - isManifest bool - - expected digest.Digest - tracker StatusTracker -} - -func (pw *pushWriter) Write(p []byte) (n int, err error) { - status, err := pw.tracker.GetStatus(pw.ref) - if err != nil { - return n, err - } - n, err = pw.pipe.Write(p) - status.Offset += int64(n) - status.UpdatedAt = time.Now() - pw.tracker.SetStatus(pw.ref, status) - return -} - -func (pw *pushWriter) Close() error { - return pw.pipe.Close() -} - -func (pw *pushWriter) Status() (content.Status, error) { - status, err := pw.tracker.GetStatus(pw.ref) - if err != nil { - return content.Status{}, err - } - return status.Status, nil - -} - -func (pw *pushWriter) Digest() digest.Digest { - // TODO: Get rid of this function? - return pw.expected -} - -func (pw *pushWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { - // Check whether read has already thrown an error - if _, err := pw.pipe.Write([]byte{}); err != nil && err != io.ErrClosedPipe { - return errors.Wrap(err, "pipe error before commit") - } - - if err := pw.pipe.Close(); err != nil { - return err - } - // TODO: timeout waiting for response - resp := <-pw.responseC - if resp.err != nil { - return resp.err - } - defer resp.Response.Body.Close() - - // 201 is specified return status, some registries return - // 200, 202 or 204. - switch resp.StatusCode { - case http.StatusOK, http.StatusCreated, http.StatusNoContent, http.StatusAccepted: - default: - return remoteserrors.NewUnexpectedStatusErr(resp.Response) - } - - status, err := pw.tracker.GetStatus(pw.ref) - if err != nil { - return errors.Wrap(err, "failed to get status") - } - - if size > 0 && size != status.Offset { - return errors.Errorf("unexpected size %d, expected %d", status.Offset, size) - } - - if expected == "" { - expected = status.Expected - } - - actual, err := digest.Parse(resp.Header.Get("Docker-Content-Digest")) - if err != nil { - return errors.Wrap(err, "invalid content digest in response") - } - - if actual != expected { - return errors.Errorf("got digest %s, expected %s", actual, expected) - } - - status.Committed = true - status.UpdatedAt = time.Now() - pw.tracker.SetStatus(pw.ref, status) - - return nil -} - -func (pw *pushWriter) Truncate(size int64) error { - // TODO: if blob close request and start new request at offset - // TODO: always error on manifest - return errors.New("cannot truncate remote upload") -} - -func requestWithMountFrom(req *request, mount, from string) *request { - creq := *req - - sep := "?" - if strings.Contains(creq.path, sep) { - sep = "&" - } - - creq.path = creq.path + sep + "mount=" + mount + "&from=" + from - - return &creq -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/remotes/docker/registry.go b/ui/wasm/vendor/github.com/containerd/containerd/remotes/docker/registry.go deleted file mode 100644 index 1e77d4c86cd..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/remotes/docker/registry.go +++ /dev/null @@ -1,245 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package docker - -import ( - "net" - "net/http" - - "github.com/pkg/errors" -) - -// HostCapabilities represent the capabilities of the registry -// host. This also represents the set of operations for which -// the registry host may be trusted to perform. -// -// For example pushing is a capability which should only be -// performed on an upstream source, not a mirror. -// Resolving (the process of converting a name into a digest) -// must be considered a trusted operation and only done by -// a host which is trusted (or more preferably by secure process -// which can prove the provenance of the mapping). A public -// mirror should never be trusted to do a resolve action. -// -// | Registry Type | Pull | Resolve | Push | -// |------------------|------|---------|------| -// | Public Registry | yes | yes | yes | -// | Private Registry | yes | yes | yes | -// | Public Mirror | yes | no | no | -// | Private Mirror | yes | yes | no | -type HostCapabilities uint8 - -const ( - // HostCapabilityPull represents the capability to fetch manifests - // and blobs by digest - HostCapabilityPull HostCapabilities = 1 << iota - - // HostCapabilityResolve represents the capability to fetch manifests - // by name - HostCapabilityResolve - - // HostCapabilityPush represents the capability to push blobs and - // manifests - HostCapabilityPush - - // Reserved for future capabilities (i.e. search, catalog, remove) -) - -// Has checks whether the capabilities list has the provide capability -func (c HostCapabilities) Has(t HostCapabilities) bool { - return c&t == t -} - -// RegistryHost represents a complete configuration for a registry -// host, representing the capabilities, authorizations, connection -// configuration, and location. -type RegistryHost struct { - Client *http.Client - Authorizer Authorizer - Host string - Scheme string - Path string - Capabilities HostCapabilities - Header http.Header -} - -func (h RegistryHost) isProxy(refhost string) bool { - if refhost != h.Host { - if refhost != "docker.io" || h.Host != "registry-1.docker.io" { - return true - } - } - return false -} - -// RegistryHosts fetches the registry hosts for a given namespace, -// provided by the host component of an distribution image reference. -type RegistryHosts func(string) ([]RegistryHost, error) - -// Registries joins multiple registry configuration functions, using the same -// order as provided within the arguments. When an empty registry configuration -// is returned with a nil error, the next function will be called. -// NOTE: This function will not join configurations, as soon as a non-empty -// configuration is returned from a configuration function, it will be returned -// to the caller. -func Registries(registries ...RegistryHosts) RegistryHosts { - return func(host string) ([]RegistryHost, error) { - for _, registry := range registries { - config, err := registry(host) - if err != nil { - return config, err - } - if len(config) > 0 { - return config, nil - } - } - return nil, nil - } -} - -type registryOpts struct { - authorizer Authorizer - plainHTTP func(string) (bool, error) - host func(string) (string, error) - client *http.Client -} - -// RegistryOpt defines a registry default option -type RegistryOpt func(*registryOpts) - -// WithPlainHTTP configures registries to use plaintext http scheme -// for the provided host match function. -func WithPlainHTTP(f func(string) (bool, error)) RegistryOpt { - return func(opts *registryOpts) { - opts.plainHTTP = f - } -} - -// WithAuthorizer configures the default authorizer for a registry -func WithAuthorizer(a Authorizer) RegistryOpt { - return func(opts *registryOpts) { - opts.authorizer = a - } -} - -// WithHostTranslator defines the default translator to use for registry hosts -func WithHostTranslator(h func(string) (string, error)) RegistryOpt { - return func(opts *registryOpts) { - opts.host = h - } -} - -// WithClient configures the default http client for a registry -func WithClient(c *http.Client) RegistryOpt { - return func(opts *registryOpts) { - opts.client = c - } -} - -// ConfigureDefaultRegistries is used to create a default configuration for -// registries. For more advanced configurations or per-domain setups, -// the RegistryHosts interface should be used directly. -// NOTE: This function will always return a non-empty value or error -func ConfigureDefaultRegistries(ropts ...RegistryOpt) RegistryHosts { - var opts registryOpts - for _, opt := range ropts { - opt(&opts) - } - - return func(host string) ([]RegistryHost, error) { - config := RegistryHost{ - Client: opts.client, - Authorizer: opts.authorizer, - Host: host, - Scheme: "https", - Path: "/v2", - Capabilities: HostCapabilityPull | HostCapabilityResolve | HostCapabilityPush, - } - - if config.Client == nil { - config.Client = http.DefaultClient - } - - if opts.plainHTTP != nil { - match, err := opts.plainHTTP(host) - if err != nil { - return nil, err - } - if match { - config.Scheme = "http" - } - } - - if opts.host != nil { - var err error - config.Host, err = opts.host(config.Host) - if err != nil { - return nil, err - } - } else if host == "docker.io" { - config.Host = "registry-1.docker.io" - } - - return []RegistryHost{config}, nil - } -} - -// MatchAllHosts is a host match function which is always true. -func MatchAllHosts(string) (bool, error) { - return true, nil -} - -// MatchLocalhost is a host match function which returns true for -// localhost. -// -// Note: this does not handle matching of ip addresses in octal, -// decimal or hex form. -func MatchLocalhost(host string) (bool, error) { - switch { - case host == "::1": - return true, nil - case host == "[::1]": - return true, nil - } - h, p, err := net.SplitHostPort(host) - - // addrError helps distinguish between errors of form - // "no colon in address" and "too many colons in address". - // The former is fine as the host string need not have a - // port. Latter needs to be handled. - addrError := &net.AddrError{ - Err: "missing port in address", - Addr: host, - } - if err != nil { - if err.Error() != addrError.Error() { - return false, err - } - // host string without any port specified - h = host - } else if len(p) == 0 { - return false, errors.New("invalid host name format") - } - - // use ipv4 dotted decimal for further checking - if h == "localhost" { - h = "127.0.0.1" - } - ip := net.ParseIP(h) - - return ip.IsLoopback(), nil -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/remotes/docker/resolver.go b/ui/wasm/vendor/github.com/containerd/containerd/remotes/docker/resolver.go deleted file mode 100644 index 1be9e1d05c6..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/remotes/docker/resolver.go +++ /dev/null @@ -1,667 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package docker - -import ( - "context" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "path" - "strings" - - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/images" - "github.com/containerd/containerd/log" - "github.com/containerd/containerd/reference" - "github.com/containerd/containerd/remotes" - "github.com/containerd/containerd/remotes/docker/schema1" - "github.com/containerd/containerd/version" - digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/net/context/ctxhttp" -) - -var ( - // ErrInvalidAuthorization is used when credentials are passed to a server but - // those credentials are rejected. - ErrInvalidAuthorization = errors.New("authorization failed") - - // MaxManifestSize represents the largest size accepted from a registry - // during resolution. Larger manifests may be accepted using a - // resolution method other than the registry. - // - // NOTE: The max supported layers by some runtimes is 128 and individual - // layers will not contribute more than 256 bytes, making a - // reasonable limit for a large image manifests of 32K bytes. - // 4M bytes represents a much larger upper bound for images which may - // contain large annotations or be non-images. A proper manifest - // design puts large metadata in subobjects, as is consistent the - // intent of the manifest design. - MaxManifestSize int64 = 4 * 1048 * 1048 -) - -// Authorizer is used to authorize HTTP requests based on 401 HTTP responses. -// An Authorizer is responsible for caching tokens or credentials used by -// requests. -type Authorizer interface { - // Authorize sets the appropriate `Authorization` header on the given - // request. - // - // If no authorization is found for the request, the request remains - // unmodified. It may also add an `Authorization` header as - // "bearer " - // "basic " - Authorize(context.Context, *http.Request) error - - // AddResponses adds a 401 response for the authorizer to consider when - // authorizing requests. The last response should be unauthorized and - // the previous requests are used to consider redirects and retries - // that may have led to the 401. - // - // If response is not handled, returns `ErrNotImplemented` - AddResponses(context.Context, []*http.Response) error -} - -// ResolverOptions are used to configured a new Docker register resolver -type ResolverOptions struct { - // Hosts returns registry host configurations for a namespace. - Hosts RegistryHosts - - // Headers are the HTTP request header fields sent by the resolver - Headers http.Header - - // Tracker is used to track uploads to the registry. This is used - // since the registry does not have upload tracking and the existing - // mechanism for getting blob upload status is expensive. - Tracker StatusTracker - - // Authorizer is used to authorize registry requests - // Deprecated: use Hosts - Authorizer Authorizer - - // Credentials provides username and secret given a host. - // If username is empty but a secret is given, that secret - // is interpreted as a long lived token. - // Deprecated: use Hosts - Credentials func(string) (string, string, error) - - // Host provides the hostname given a namespace. - // Deprecated: use Hosts - Host func(string) (string, error) - - // PlainHTTP specifies to use plain http and not https - // Deprecated: use Hosts - PlainHTTP bool - - // Client is the http client to used when making registry requests - // Deprecated: use Hosts - Client *http.Client -} - -// DefaultHost is the default host function. -func DefaultHost(ns string) (string, error) { - if ns == "docker.io" { - return "registry-1.docker.io", nil - } - return ns, nil -} - -type dockerResolver struct { - hosts RegistryHosts - header http.Header - resolveHeader http.Header - tracker StatusTracker -} - -// NewResolver returns a new resolver to a Docker registry -func NewResolver(options ResolverOptions) remotes.Resolver { - if options.Tracker == nil { - options.Tracker = NewInMemoryTracker() - } - - if options.Headers == nil { - options.Headers = make(http.Header) - } - if _, ok := options.Headers["User-Agent"]; !ok { - options.Headers.Set("User-Agent", "containerd/"+version.Version) - } - - resolveHeader := http.Header{} - if _, ok := options.Headers["Accept"]; !ok { - // set headers for all the types we support for resolution. - resolveHeader.Set("Accept", strings.Join([]string{ - images.MediaTypeDockerSchema2Manifest, - images.MediaTypeDockerSchema2ManifestList, - ocispec.MediaTypeImageManifest, - ocispec.MediaTypeImageIndex, "*/*"}, ", ")) - } else { - resolveHeader["Accept"] = options.Headers["Accept"] - delete(options.Headers, "Accept") - } - - if options.Hosts == nil { - opts := []RegistryOpt{} - if options.Host != nil { - opts = append(opts, WithHostTranslator(options.Host)) - } - - if options.Authorizer == nil { - options.Authorizer = NewDockerAuthorizer( - WithAuthClient(options.Client), - WithAuthHeader(options.Headers), - WithAuthCreds(options.Credentials)) - } - opts = append(opts, WithAuthorizer(options.Authorizer)) - - if options.Client != nil { - opts = append(opts, WithClient(options.Client)) - } - if options.PlainHTTP { - opts = append(opts, WithPlainHTTP(MatchAllHosts)) - } else { - opts = append(opts, WithPlainHTTP(MatchLocalhost)) - } - options.Hosts = ConfigureDefaultRegistries(opts...) - } - return &dockerResolver{ - hosts: options.Hosts, - header: options.Headers, - resolveHeader: resolveHeader, - tracker: options.Tracker, - } -} - -func getManifestMediaType(resp *http.Response) string { - // Strip encoding data (manifests should always be ascii JSON) - contentType := resp.Header.Get("Content-Type") - if sp := strings.IndexByte(contentType, ';'); sp != -1 { - contentType = contentType[0:sp] - } - - // As of Apr 30 2019 the registry.access.redhat.com registry does not specify - // the content type of any data but uses schema1 manifests. - if contentType == "text/plain" { - contentType = images.MediaTypeDockerSchema1Manifest - } - return contentType -} - -type countingReader struct { - reader io.Reader - bytesRead int64 -} - -func (r *countingReader) Read(p []byte) (int, error) { - n, err := r.reader.Read(p) - r.bytesRead += int64(n) - return n, err -} - -var _ remotes.Resolver = &dockerResolver{} - -func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocispec.Descriptor, error) { - base, err := r.resolveDockerBase(ref) - if err != nil { - return "", ocispec.Descriptor{}, err - } - refspec := base.refspec - if refspec.Object == "" { - return "", ocispec.Descriptor{}, reference.ErrObjectRequired - } - - var ( - firstErr error - paths [][]string - dgst = refspec.Digest() - caps = HostCapabilityPull - ) - - if dgst != "" { - if err := dgst.Validate(); err != nil { - // need to fail here, since we can't actually resolve the invalid - // digest. - return "", ocispec.Descriptor{}, err - } - - // turns out, we have a valid digest, make a url. - paths = append(paths, []string{"manifests", dgst.String()}) - - // fallback to blobs on not found. - paths = append(paths, []string{"blobs", dgst.String()}) - } else { - // Add - paths = append(paths, []string{"manifests", refspec.Object}) - caps |= HostCapabilityResolve - } - - hosts := base.filterHosts(caps) - if len(hosts) == 0 { - return "", ocispec.Descriptor{}, errors.Wrap(errdefs.ErrNotFound, "no resolve hosts") - } - - ctx, err = ContextWithRepositoryScope(ctx, refspec, false) - if err != nil { - return "", ocispec.Descriptor{}, err - } - - for _, u := range paths { - for _, host := range hosts { - ctx := log.WithLogger(ctx, log.G(ctx).WithField("host", host.Host)) - - req := base.request(host, http.MethodHead, u...) - if err := req.addNamespace(base.refspec.Hostname()); err != nil { - return "", ocispec.Descriptor{}, err - } - - for key, value := range r.resolveHeader { - req.header[key] = append(req.header[key], value...) - } - - log.G(ctx).Debug("resolving") - resp, err := req.doWithRetries(ctx, nil) - if err != nil { - if errors.Is(err, ErrInvalidAuthorization) { - err = errors.Wrapf(err, "pull access denied, repository does not exist or may require authorization") - } - // Store the error for referencing later - if firstErr == nil { - firstErr = err - } - log.G(ctx).WithError(err).Info("trying next host") - continue // try another host - } - resp.Body.Close() // don't care about body contents. - - if resp.StatusCode > 299 { - if resp.StatusCode == http.StatusNotFound { - log.G(ctx).Info("trying next host - response was http.StatusNotFound") - continue - } - if resp.StatusCode > 399 { - // Set firstErr when encountering the first non-404 status code. - if firstErr == nil { - firstErr = errors.Errorf("pulling from host %s failed with status code %v: %v", host.Host, u, resp.Status) - } - continue // try another host - } - return "", ocispec.Descriptor{}, errors.Errorf("pulling from host %s failed with unexpected status code %v: %v", host.Host, u, resp.Status) - } - size := resp.ContentLength - contentType := getManifestMediaType(resp) - - // if no digest was provided, then only a resolve - // trusted registry was contacted, in this case use - // the digest header (or content from GET) - if dgst == "" { - // this is the only point at which we trust the registry. we use the - // content headers to assemble a descriptor for the name. when this becomes - // more robust, we mostly get this information from a secure trust store. - dgstHeader := digest.Digest(resp.Header.Get("Docker-Content-Digest")) - - if dgstHeader != "" && size != -1 { - if err := dgstHeader.Validate(); err != nil { - return "", ocispec.Descriptor{}, errors.Wrapf(err, "%q in header not a valid digest", dgstHeader) - } - dgst = dgstHeader - } - } - if dgst == "" || size == -1 { - log.G(ctx).Debug("no Docker-Content-Digest header, fetching manifest instead") - - req = base.request(host, http.MethodGet, u...) - if err := req.addNamespace(base.refspec.Hostname()); err != nil { - return "", ocispec.Descriptor{}, err - } - - for key, value := range r.resolveHeader { - req.header[key] = append(req.header[key], value...) - } - - resp, err := req.doWithRetries(ctx, nil) - if err != nil { - return "", ocispec.Descriptor{}, err - } - defer resp.Body.Close() - - bodyReader := countingReader{reader: resp.Body} - - contentType = getManifestMediaType(resp) - if dgst == "" { - if contentType == images.MediaTypeDockerSchema1Manifest { - b, err := schema1.ReadStripSignature(&bodyReader) - if err != nil { - return "", ocispec.Descriptor{}, err - } - - dgst = digest.FromBytes(b) - } else { - dgst, err = digest.FromReader(&bodyReader) - if err != nil { - return "", ocispec.Descriptor{}, err - } - } - } else if _, err := io.Copy(ioutil.Discard, &bodyReader); err != nil { - return "", ocispec.Descriptor{}, err - } - size = bodyReader.bytesRead - } - // Prevent resolving to excessively large manifests - if size > MaxManifestSize { - if firstErr == nil { - firstErr = errors.Wrapf(errdefs.ErrNotFound, "rejecting %d byte manifest for %s", size, ref) - } - continue - } - - desc := ocispec.Descriptor{ - Digest: dgst, - MediaType: contentType, - Size: size, - } - - log.G(ctx).WithField("desc.digest", desc.Digest).Debug("resolved") - return ref, desc, nil - } - } - - // If above loop terminates without return, then there was an error. - // "firstErr" contains the first non-404 error. That is, "firstErr == nil" - // means that either no registries were given or each registry returned 404. - - if firstErr == nil { - firstErr = errors.Wrap(errdefs.ErrNotFound, ref) - } - - return "", ocispec.Descriptor{}, firstErr -} - -func (r *dockerResolver) Fetcher(ctx context.Context, ref string) (remotes.Fetcher, error) { - base, err := r.resolveDockerBase(ref) - if err != nil { - return nil, err - } - - return dockerFetcher{ - dockerBase: base, - }, nil -} - -func (r *dockerResolver) Pusher(ctx context.Context, ref string) (remotes.Pusher, error) { - base, err := r.resolveDockerBase(ref) - if err != nil { - return nil, err - } - - return dockerPusher{ - dockerBase: base, - object: base.refspec.Object, - tracker: r.tracker, - }, nil -} - -func (r *dockerResolver) resolveDockerBase(ref string) (*dockerBase, error) { - refspec, err := reference.Parse(ref) - if err != nil { - return nil, err - } - - return r.base(refspec) -} - -type dockerBase struct { - refspec reference.Spec - repository string - hosts []RegistryHost - header http.Header -} - -func (r *dockerResolver) base(refspec reference.Spec) (*dockerBase, error) { - host := refspec.Hostname() - hosts, err := r.hosts(host) - if err != nil { - return nil, err - } - return &dockerBase{ - refspec: refspec, - repository: strings.TrimPrefix(refspec.Locator, host+"/"), - hosts: hosts, - header: r.header, - }, nil -} - -func (r *dockerBase) filterHosts(caps HostCapabilities) (hosts []RegistryHost) { - for _, host := range r.hosts { - if host.Capabilities.Has(caps) { - hosts = append(hosts, host) - } - } - return -} - -func (r *dockerBase) request(host RegistryHost, method string, ps ...string) *request { - header := r.header.Clone() - if header == nil { - header = http.Header{} - } - - for key, value := range host.Header { - header[key] = append(header[key], value...) - } - parts := append([]string{"/", host.Path, r.repository}, ps...) - p := path.Join(parts...) - // Join strips trailing slash, re-add ending "/" if included - if len(parts) > 0 && strings.HasSuffix(parts[len(parts)-1], "/") { - p = p + "/" - } - return &request{ - method: method, - path: p, - header: header, - host: host, - } -} - -func (r *request) authorize(ctx context.Context, req *http.Request) error { - // Check if has header for host - if r.host.Authorizer != nil { - if err := r.host.Authorizer.Authorize(ctx, req); err != nil { - return err - } - } - - return nil -} - -func (r *request) addNamespace(ns string) (err error) { - if !r.host.isProxy(ns) { - return nil - } - var q url.Values - // Parse query - if i := strings.IndexByte(r.path, '?'); i > 0 { - r.path = r.path[:i+1] - q, err = url.ParseQuery(r.path[i+1:]) - if err != nil { - return - } - } else { - r.path = r.path + "?" - q = url.Values{} - } - q.Add("ns", ns) - - r.path = r.path + q.Encode() - - return -} - -type request struct { - method string - path string - header http.Header - host RegistryHost - body func() (io.ReadCloser, error) - size int64 -} - -func (r *request) do(ctx context.Context) (*http.Response, error) { - u := r.host.Scheme + "://" + r.host.Host + r.path - req, err := http.NewRequest(r.method, u, nil) - if err != nil { - return nil, err - } - req.Header = http.Header{} // headers need to be copied to avoid concurrent map access - for k, v := range r.header { - req.Header[k] = v - } - if r.body != nil { - body, err := r.body() - if err != nil { - return nil, err - } - req.Body = body - req.GetBody = r.body - if r.size > 0 { - req.ContentLength = r.size - } - } - - ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", u)) - log.G(ctx).WithFields(requestFields(req)).Debug("do request") - if err := r.authorize(ctx, req); err != nil { - return nil, errors.Wrap(err, "failed to authorize") - } - - var client = &http.Client{} - if r.host.Client != nil { - *client = *r.host.Client - } - if client.CheckRedirect == nil { - client.CheckRedirect = func(req *http.Request, via []*http.Request) error { - if len(via) >= 10 { - return errors.New("stopped after 10 redirects") - } - return errors.Wrap(r.authorize(ctx, req), "failed to authorize redirect") - } - } - - resp, err := ctxhttp.Do(ctx, client, req) - if err != nil { - return nil, errors.Wrap(err, "failed to do request") - } - log.G(ctx).WithFields(responseFields(resp)).Debug("fetch response received") - return resp, nil -} - -func (r *request) doWithRetries(ctx context.Context, responses []*http.Response) (*http.Response, error) { - resp, err := r.do(ctx) - if err != nil { - return nil, err - } - - responses = append(responses, resp) - retry, err := r.retryRequest(ctx, responses) - if err != nil { - resp.Body.Close() - return nil, err - } - if retry { - resp.Body.Close() - return r.doWithRetries(ctx, responses) - } - return resp, err -} - -func (r *request) retryRequest(ctx context.Context, responses []*http.Response) (bool, error) { - if len(responses) > 5 { - return false, nil - } - last := responses[len(responses)-1] - switch last.StatusCode { - case http.StatusUnauthorized: - log.G(ctx).WithField("header", last.Header.Get("WWW-Authenticate")).Debug("Unauthorized") - if r.host.Authorizer != nil { - if err := r.host.Authorizer.AddResponses(ctx, responses); err == nil { - return true, nil - } else if !errdefs.IsNotImplemented(err) { - return false, err - } - } - - return false, nil - case http.StatusMethodNotAllowed: - // Support registries which have not properly implemented the HEAD method for - // manifests endpoint - if r.method == http.MethodHead && strings.Contains(r.path, "/manifests/") { - r.method = http.MethodGet - return true, nil - } - case http.StatusRequestTimeout, http.StatusTooManyRequests: - return true, nil - } - - // TODO: Handle 50x errors accounting for attempt history - return false, nil -} - -func (r *request) String() string { - return r.host.Scheme + "://" + r.host.Host + r.path -} - -func requestFields(req *http.Request) logrus.Fields { - fields := map[string]interface{}{ - "request.method": req.Method, - } - for k, vals := range req.Header { - k = strings.ToLower(k) - if k == "authorization" { - continue - } - for i, v := range vals { - field := "request.header." + k - if i > 0 { - field = fmt.Sprintf("%s.%d", field, i) - } - fields[field] = v - } - } - - return logrus.Fields(fields) -} - -func responseFields(resp *http.Response) logrus.Fields { - fields := map[string]interface{}{ - "response.status": resp.Status, - } - for k, vals := range resp.Header { - k = strings.ToLower(k) - for i, v := range vals { - field := "response.header." + k - if i > 0 { - field = fmt.Sprintf("%s.%d", field, i) - } - fields[field] = v - } - } - - return logrus.Fields(fields) -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go b/ui/wasm/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go deleted file mode 100644 index f15a9acf3e8..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go +++ /dev/null @@ -1,606 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package schema1 - -import ( - "bytes" - "context" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "strconv" - "strings" - "sync" - "time" - - "golang.org/x/sync/errgroup" - - "github.com/containerd/containerd/archive/compression" - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/images" - "github.com/containerd/containerd/log" - "github.com/containerd/containerd/remotes" - digest "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -const ( - manifestSizeLimit = 8e6 // 8MB - labelDockerSchema1EmptyLayer = "containerd.io/docker.schema1.empty-layer" -) - -type blobState struct { - diffID digest.Digest - empty bool -} - -// Converter converts schema1 manifests to schema2 on fetch -type Converter struct { - contentStore content.Store - fetcher remotes.Fetcher - - pulledManifest *manifest - - mu sync.Mutex - blobMap map[digest.Digest]blobState - layerBlobs map[digest.Digest]ocispec.Descriptor -} - -// NewConverter returns a new converter -func NewConverter(contentStore content.Store, fetcher remotes.Fetcher) *Converter { - return &Converter{ - contentStore: contentStore, - fetcher: fetcher, - blobMap: map[digest.Digest]blobState{}, - layerBlobs: map[digest.Digest]ocispec.Descriptor{}, - } -} - -// Handle fetching descriptors for a docker media type -func (c *Converter) Handle(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - switch desc.MediaType { - case images.MediaTypeDockerSchema1Manifest: - if err := c.fetchManifest(ctx, desc); err != nil { - return nil, err - } - - m := c.pulledManifest - if len(m.FSLayers) != len(m.History) { - return nil, errors.New("invalid schema 1 manifest, history and layer mismatch") - } - descs := make([]ocispec.Descriptor, 0, len(c.pulledManifest.FSLayers)) - - for i := range m.FSLayers { - if _, ok := c.blobMap[c.pulledManifest.FSLayers[i].BlobSum]; !ok { - empty, err := isEmptyLayer([]byte(m.History[i].V1Compatibility)) - if err != nil { - return nil, err - } - - // Do no attempt to download a known empty blob - if !empty { - descs = append([]ocispec.Descriptor{ - { - MediaType: images.MediaTypeDockerSchema2LayerGzip, - Digest: c.pulledManifest.FSLayers[i].BlobSum, - Size: -1, - }, - }, descs...) - } - c.blobMap[c.pulledManifest.FSLayers[i].BlobSum] = blobState{ - empty: empty, - } - } - } - return descs, nil - case images.MediaTypeDockerSchema2LayerGzip: - if c.pulledManifest == nil { - return nil, errors.New("manifest required for schema 1 blob pull") - } - return nil, c.fetchBlob(ctx, desc) - default: - return nil, fmt.Errorf("%v not support for schema 1 manifests", desc.MediaType) - } -} - -// ConvertOptions provides options on converting a docker schema1 manifest. -type ConvertOptions struct { - // ManifestMediaType specifies the media type of the manifest OCI descriptor. - ManifestMediaType string - - // ConfigMediaType specifies the media type of the manifest config OCI - // descriptor. - ConfigMediaType string -} - -// ConvertOpt allows configuring a convert operation. -type ConvertOpt func(context.Context, *ConvertOptions) error - -// UseDockerSchema2 is used to indicate that a schema1 manifest should be -// converted into the media types for a docker schema2 manifest. -func UseDockerSchema2() ConvertOpt { - return func(ctx context.Context, o *ConvertOptions) error { - o.ManifestMediaType = images.MediaTypeDockerSchema2Manifest - o.ConfigMediaType = images.MediaTypeDockerSchema2Config - return nil - } -} - -// Convert a docker manifest to an OCI descriptor -func (c *Converter) Convert(ctx context.Context, opts ...ConvertOpt) (ocispec.Descriptor, error) { - co := ConvertOptions{ - ManifestMediaType: ocispec.MediaTypeImageManifest, - ConfigMediaType: ocispec.MediaTypeImageConfig, - } - for _, opt := range opts { - if err := opt(ctx, &co); err != nil { - return ocispec.Descriptor{}, err - } - } - - history, diffIDs, err := c.schema1ManifestHistory() - if err != nil { - return ocispec.Descriptor{}, errors.Wrap(err, "schema 1 conversion failed") - } - - var img ocispec.Image - if err := json.Unmarshal([]byte(c.pulledManifest.History[0].V1Compatibility), &img); err != nil { - return ocispec.Descriptor{}, errors.Wrap(err, "failed to unmarshal image from schema 1 history") - } - - img.History = history - img.RootFS = ocispec.RootFS{ - Type: "layers", - DiffIDs: diffIDs, - } - - b, err := json.MarshalIndent(img, "", " ") - if err != nil { - return ocispec.Descriptor{}, errors.Wrap(err, "failed to marshal image") - } - - config := ocispec.Descriptor{ - MediaType: co.ConfigMediaType, - Digest: digest.Canonical.FromBytes(b), - Size: int64(len(b)), - } - - layers := make([]ocispec.Descriptor, len(diffIDs)) - for i, diffID := range diffIDs { - layers[i] = c.layerBlobs[diffID] - } - - manifest := ocispec.Manifest{ - Versioned: specs.Versioned{ - SchemaVersion: 2, - }, - Config: config, - Layers: layers, - } - - mb, err := json.MarshalIndent(manifest, "", " ") - if err != nil { - return ocispec.Descriptor{}, errors.Wrap(err, "failed to marshal image") - } - - desc := ocispec.Descriptor{ - MediaType: co.ManifestMediaType, - Digest: digest.Canonical.FromBytes(mb), - Size: int64(len(mb)), - } - - labels := map[string]string{} - labels["containerd.io/gc.ref.content.0"] = manifest.Config.Digest.String() - for i, ch := range manifest.Layers { - labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i+1)] = ch.Digest.String() - } - - ref := remotes.MakeRefKey(ctx, desc) - if err := content.WriteBlob(ctx, c.contentStore, ref, bytes.NewReader(mb), desc, content.WithLabels(labels)); err != nil { - return ocispec.Descriptor{}, errors.Wrap(err, "failed to write image manifest") - } - - ref = remotes.MakeRefKey(ctx, config) - if err := content.WriteBlob(ctx, c.contentStore, ref, bytes.NewReader(b), config); err != nil { - return ocispec.Descriptor{}, errors.Wrap(err, "failed to write image config") - } - - return desc, nil -} - -// ReadStripSignature reads in a schema1 manifest and returns a byte array -// with the "signatures" field stripped -func ReadStripSignature(schema1Blob io.Reader) ([]byte, error) { - b, err := ioutil.ReadAll(io.LimitReader(schema1Blob, manifestSizeLimit)) // limit to 8MB - if err != nil { - return nil, err - } - - return stripSignature(b) -} - -func (c *Converter) fetchManifest(ctx context.Context, desc ocispec.Descriptor) error { - log.G(ctx).Debug("fetch schema 1") - - rc, err := c.fetcher.Fetch(ctx, desc) - if err != nil { - return err - } - - b, err := ReadStripSignature(rc) - rc.Close() - if err != nil { - return err - } - - var m manifest - if err := json.Unmarshal(b, &m); err != nil { - return err - } - if len(m.Manifests) != 0 || len(m.Layers) != 0 { - return errors.New("converter: expected schema1 document but found extra keys") - } - c.pulledManifest = &m - - return nil -} - -func (c *Converter) fetchBlob(ctx context.Context, desc ocispec.Descriptor) error { - log.G(ctx).Debug("fetch blob") - - var ( - ref = remotes.MakeRefKey(ctx, desc) - calc = newBlobStateCalculator() - compressMethod = compression.Gzip - ) - - // size may be unknown, set to zero for content ingest - ingestDesc := desc - if ingestDesc.Size == -1 { - ingestDesc.Size = 0 - } - - cw, err := content.OpenWriter(ctx, c.contentStore, content.WithRef(ref), content.WithDescriptor(ingestDesc)) - if err != nil { - if !errdefs.IsAlreadyExists(err) { - return err - } - - reuse, err := c.reuseLabelBlobState(ctx, desc) - if err != nil { - return err - } - - if reuse { - return nil - } - - ra, err := c.contentStore.ReaderAt(ctx, desc) - if err != nil { - return err - } - defer ra.Close() - - r, err := compression.DecompressStream(content.NewReader(ra)) - if err != nil { - return err - } - - compressMethod = r.GetCompression() - _, err = io.Copy(calc, r) - r.Close() - if err != nil { - return err - } - } else { - defer cw.Close() - - rc, err := c.fetcher.Fetch(ctx, desc) - if err != nil { - return err - } - defer rc.Close() - - eg, _ := errgroup.WithContext(ctx) - pr, pw := io.Pipe() - - eg.Go(func() error { - r, err := compression.DecompressStream(pr) - if err != nil { - return err - } - - compressMethod = r.GetCompression() - _, err = io.Copy(calc, r) - r.Close() - pr.CloseWithError(err) - return err - }) - - eg.Go(func() error { - defer pw.Close() - - return content.Copy(ctx, cw, io.TeeReader(rc, pw), ingestDesc.Size, ingestDesc.Digest) - }) - - if err := eg.Wait(); err != nil { - return err - } - } - - if desc.Size == -1 { - info, err := c.contentStore.Info(ctx, desc.Digest) - if err != nil { - return errors.Wrap(err, "failed to get blob info") - } - desc.Size = info.Size - } - - if compressMethod == compression.Uncompressed { - log.G(ctx).WithField("id", desc.Digest).Debugf("changed media type for uncompressed schema1 layer blob") - desc.MediaType = images.MediaTypeDockerSchema2Layer - } - - state := calc.State() - - cinfo := content.Info{ - Digest: desc.Digest, - Labels: map[string]string{ - "containerd.io/uncompressed": state.diffID.String(), - labelDockerSchema1EmptyLayer: strconv.FormatBool(state.empty), - }, - } - - if _, err := c.contentStore.Update(ctx, cinfo, "labels.containerd.io/uncompressed", fmt.Sprintf("labels.%s", labelDockerSchema1EmptyLayer)); err != nil { - return errors.Wrap(err, "failed to update uncompressed label") - } - - c.mu.Lock() - c.blobMap[desc.Digest] = state - c.layerBlobs[state.diffID] = desc - c.mu.Unlock() - - return nil -} - -func (c *Converter) reuseLabelBlobState(ctx context.Context, desc ocispec.Descriptor) (bool, error) { - cinfo, err := c.contentStore.Info(ctx, desc.Digest) - if err != nil { - return false, errors.Wrap(err, "failed to get blob info") - } - desc.Size = cinfo.Size - - diffID, ok := cinfo.Labels["containerd.io/uncompressed"] - if !ok { - return false, nil - } - - emptyVal, ok := cinfo.Labels[labelDockerSchema1EmptyLayer] - if !ok { - return false, nil - } - - isEmpty, err := strconv.ParseBool(emptyVal) - if err != nil { - log.G(ctx).WithField("id", desc.Digest).Warnf("failed to parse bool from label %s: %v", labelDockerSchema1EmptyLayer, isEmpty) - return false, nil - } - - bState := blobState{empty: isEmpty} - - if bState.diffID, err = digest.Parse(diffID); err != nil { - log.G(ctx).WithField("id", desc.Digest).Warnf("failed to parse digest from label containerd.io/uncompressed: %v", diffID) - return false, nil - } - - // NOTE: there is no need to read header to get compression method - // because there are only two kinds of methods. - if bState.diffID == desc.Digest { - desc.MediaType = images.MediaTypeDockerSchema2Layer - } else { - desc.MediaType = images.MediaTypeDockerSchema2LayerGzip - } - - c.mu.Lock() - c.blobMap[desc.Digest] = bState - c.layerBlobs[bState.diffID] = desc - c.mu.Unlock() - return true, nil -} - -func (c *Converter) schema1ManifestHistory() ([]ocispec.History, []digest.Digest, error) { - if c.pulledManifest == nil { - return nil, nil, errors.New("missing schema 1 manifest for conversion") - } - m := *c.pulledManifest - - if len(m.History) == 0 { - return nil, nil, errors.New("no history") - } - - history := make([]ocispec.History, len(m.History)) - diffIDs := []digest.Digest{} - for i := range m.History { - var h v1History - if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), &h); err != nil { - return nil, nil, errors.Wrap(err, "failed to unmarshal history") - } - - blobSum := m.FSLayers[i].BlobSum - - state := c.blobMap[blobSum] - - history[len(history)-i-1] = ocispec.History{ - Author: h.Author, - Comment: h.Comment, - Created: &h.Created, - CreatedBy: strings.Join(h.ContainerConfig.Cmd, " "), - EmptyLayer: state.empty, - } - - if !state.empty { - diffIDs = append([]digest.Digest{state.diffID}, diffIDs...) - - } - } - - return history, diffIDs, nil -} - -type fsLayer struct { - BlobSum digest.Digest `json:"blobSum"` -} - -type history struct { - V1Compatibility string `json:"v1Compatibility"` -} - -type manifest struct { - FSLayers []fsLayer `json:"fsLayers"` - History []history `json:"history"` - Layers json.RawMessage `json:"layers,omitempty"` // OCI manifest - Manifests json.RawMessage `json:"manifests,omitempty"` // OCI index -} - -type v1History struct { - Author string `json:"author,omitempty"` - Created time.Time `json:"created"` - Comment string `json:"comment,omitempty"` - ThrowAway *bool `json:"throwaway,omitempty"` - Size *int `json:"Size,omitempty"` // used before ThrowAway field - ContainerConfig struct { - Cmd []string `json:"Cmd,omitempty"` - } `json:"container_config,omitempty"` -} - -// isEmptyLayer returns whether the v1 compatibility history describes an -// empty layer. A return value of true indicates the layer is empty, -// however false does not indicate non-empty. -func isEmptyLayer(compatHistory []byte) (bool, error) { - var h v1History - if err := json.Unmarshal(compatHistory, &h); err != nil { - return false, err - } - - if h.ThrowAway != nil { - return *h.ThrowAway, nil - } - if h.Size != nil { - return *h.Size == 0, nil - } - - // If no `Size` or `throwaway` field is given, then - // it cannot be determined whether the layer is empty - // from the history, return false - return false, nil -} - -type signature struct { - Signatures []jsParsedSignature `json:"signatures"` -} - -type jsParsedSignature struct { - Protected string `json:"protected"` -} - -type protectedBlock struct { - Length int `json:"formatLength"` - Tail string `json:"formatTail"` -} - -// joseBase64UrlDecode decodes the given string using the standard base64 url -// decoder but first adds the appropriate number of trailing '=' characters in -// accordance with the jose specification. -// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 -func joseBase64UrlDecode(s string) ([]byte, error) { - switch len(s) % 4 { - case 0: - case 2: - s += "==" - case 3: - s += "=" - default: - return nil, errors.New("illegal base64url string") - } - return base64.URLEncoding.DecodeString(s) -} - -func stripSignature(b []byte) ([]byte, error) { - var sig signature - if err := json.Unmarshal(b, &sig); err != nil { - return nil, err - } - if len(sig.Signatures) == 0 { - return nil, errors.New("no signatures") - } - pb, err := joseBase64UrlDecode(sig.Signatures[0].Protected) - if err != nil { - return nil, errors.Wrapf(err, "could not decode %s", sig.Signatures[0].Protected) - } - - var protected protectedBlock - if err := json.Unmarshal(pb, &protected); err != nil { - return nil, err - } - - if protected.Length > len(b) { - return nil, errors.New("invalid protected length block") - } - - tail, err := joseBase64UrlDecode(protected.Tail) - if err != nil { - return nil, errors.Wrap(err, "invalid tail base 64 value") - } - - return append(b[:protected.Length], tail...), nil -} - -type blobStateCalculator struct { - empty bool - digester digest.Digester -} - -func newBlobStateCalculator() *blobStateCalculator { - return &blobStateCalculator{ - empty: true, - digester: digest.Canonical.Digester(), - } -} - -func (c *blobStateCalculator) Write(p []byte) (int, error) { - if c.empty { - for _, b := range p { - if b != 0x00 { - c.empty = false - break - } - } - } - return c.digester.Hash().Write(p) -} - -func (c *blobStateCalculator) State() blobState { - return blobState{ - empty: c.empty, - diffID: c.digester.Digest(), - } -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/remotes/docker/scope.go b/ui/wasm/vendor/github.com/containerd/containerd/remotes/docker/scope.go deleted file mode 100644 index fe57f023d35..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/remotes/docker/scope.go +++ /dev/null @@ -1,97 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package docker - -import ( - "context" - "fmt" - "net/url" - "sort" - "strings" - - "github.com/containerd/containerd/reference" -) - -// RepositoryScope returns a repository scope string such as "repository:foo/bar:pull" -// for "host/foo/bar:baz". -// When push is true, both pull and push are added to the scope. -func RepositoryScope(refspec reference.Spec, push bool) (string, error) { - u, err := url.Parse("dummy://" + refspec.Locator) - if err != nil { - return "", err - } - s := "repository:" + strings.TrimPrefix(u.Path, "/") + ":pull" - if push { - s += ",push" - } - return s, nil -} - -// tokenScopesKey is used for the key for context.WithValue(). -// value: []string (e.g. {"registry:foo/bar:pull"}) -type tokenScopesKey struct{} - -// ContextWithRepositoryScope returns a context with tokenScopesKey{} and the repository scope value. -func ContextWithRepositoryScope(ctx context.Context, refspec reference.Spec, push bool) (context.Context, error) { - s, err := RepositoryScope(refspec, push) - if err != nil { - return nil, err - } - return WithScope(ctx, s), nil -} - -// WithScope appends a custom registry auth scope to the context. -func WithScope(ctx context.Context, scope string) context.Context { - var scopes []string - if v := ctx.Value(tokenScopesKey{}); v != nil { - scopes = v.([]string) - scopes = append(scopes, scope) - } else { - scopes = []string{scope} - } - return context.WithValue(ctx, tokenScopesKey{}, scopes) -} - -// ContextWithAppendPullRepositoryScope is used to append repository pull -// scope into existing scopes indexed by the tokenScopesKey{}. -func ContextWithAppendPullRepositoryScope(ctx context.Context, repo string) context.Context { - return WithScope(ctx, fmt.Sprintf("repository:%s:pull", repo)) -} - -// GetTokenScopes returns deduplicated and sorted scopes from ctx.Value(tokenScopesKey{}) and common scopes. -func GetTokenScopes(ctx context.Context, common []string) []string { - var scopes []string - if x := ctx.Value(tokenScopesKey{}); x != nil { - scopes = append(scopes, x.([]string)...) - } - - scopes = append(scopes, common...) - sort.Strings(scopes) - - l := 0 - for idx := 1; idx < len(scopes); idx++ { - // Note: this comparison is unaware of the scope grammar (https://docs.docker.com/registry/spec/auth/scope/) - // So, "repository:foo/bar:pull,push" != "repository:foo/bar:push,pull", although semantically they are equal. - if scopes[l] == scopes[idx] { - continue - } - - l++ - scopes[l] = scopes[idx] - } - return scopes[:l+1] -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/remotes/docker/status.go b/ui/wasm/vendor/github.com/containerd/containerd/remotes/docker/status.go deleted file mode 100644 index 9751edac7f9..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/remotes/docker/status.go +++ /dev/null @@ -1,87 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package docker - -import ( - "sync" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" - "github.com/moby/locker" - "github.com/pkg/errors" -) - -// Status of a content operation -type Status struct { - content.Status - - Committed bool - - // UploadUUID is used by the Docker registry to reference blob uploads - UploadUUID string -} - -// StatusTracker to track status of operations -type StatusTracker interface { - GetStatus(string) (Status, error) - SetStatus(string, Status) -} - -// StatusTrackLocker to track status of operations with lock -type StatusTrackLocker interface { - StatusTracker - Lock(string) - Unlock(string) -} - -type memoryStatusTracker struct { - statuses map[string]Status - m sync.Mutex - locker *locker.Locker -} - -// NewInMemoryTracker returns a StatusTracker that tracks content status in-memory -func NewInMemoryTracker() StatusTrackLocker { - return &memoryStatusTracker{ - statuses: map[string]Status{}, - locker: locker.New(), - } -} - -func (t *memoryStatusTracker) GetStatus(ref string) (Status, error) { - t.m.Lock() - defer t.m.Unlock() - status, ok := t.statuses[ref] - if !ok { - return Status{}, errors.Wrapf(errdefs.ErrNotFound, "status for ref %v", ref) - } - return status, nil -} - -func (t *memoryStatusTracker) SetStatus(ref string, status Status) { - t.m.Lock() - t.statuses[ref] = status - t.m.Unlock() -} - -func (t *memoryStatusTracker) Lock(ref string) { - t.locker.Lock(ref) -} - -func (t *memoryStatusTracker) Unlock(ref string) { - t.locker.Unlock(ref) -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/remotes/errors/errors.go b/ui/wasm/vendor/github.com/containerd/containerd/remotes/errors/errors.go deleted file mode 100644 index 519dbac105f..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/remotes/errors/errors.go +++ /dev/null @@ -1,56 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package errors - -import ( - "fmt" - "io" - "io/ioutil" - "net/http" -) - -var _ error = ErrUnexpectedStatus{} - -// ErrUnexpectedStatus is returned if a registry API request returned with unexpected HTTP status -type ErrUnexpectedStatus struct { - Status string - StatusCode int - Body []byte - RequestURL, RequestMethod string -} - -func (e ErrUnexpectedStatus) Error() string { - return fmt.Sprintf("unexpected status: %s", e.Status) -} - -// NewUnexpectedStatusErr creates an ErrUnexpectedStatus from HTTP response -func NewUnexpectedStatusErr(resp *http.Response) error { - var b []byte - if resp.Body != nil { - b, _ = ioutil.ReadAll(io.LimitReader(resp.Body, 64000)) // 64KB - } - err := ErrUnexpectedStatus{ - Body: b, - Status: resp.Status, - StatusCode: resp.StatusCode, - RequestMethod: resp.Request.Method, - } - if resp.Request.URL != nil { - err.RequestURL = resp.Request.URL.String() - } - return err -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/remotes/handlers.go b/ui/wasm/vendor/github.com/containerd/containerd/remotes/handlers.go deleted file mode 100644 index 8f79c608e68..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/remotes/handlers.go +++ /dev/null @@ -1,331 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package remotes - -import ( - "context" - "fmt" - "io" - "strings" - "sync" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/images" - "github.com/containerd/containerd/log" - "github.com/containerd/containerd/platforms" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/sync/semaphore" -) - -type refKeyPrefix struct{} - -// WithMediaTypeKeyPrefix adds a custom key prefix for a media type which is used when storing -// data in the content store from the FetchHandler. -// -// Used in `MakeRefKey` to determine what the key prefix should be. -func WithMediaTypeKeyPrefix(ctx context.Context, mediaType, prefix string) context.Context { - var values map[string]string - if v := ctx.Value(refKeyPrefix{}); v != nil { - values = v.(map[string]string) - } else { - values = make(map[string]string) - } - - values[mediaType] = prefix - return context.WithValue(ctx, refKeyPrefix{}, values) -} - -// MakeRefKey returns a unique reference for the descriptor. This reference can be -// used to lookup ongoing processes related to the descriptor. This function -// may look to the context to namespace the reference appropriately. -func MakeRefKey(ctx context.Context, desc ocispec.Descriptor) string { - key := desc.Digest.String() - if desc.Annotations != nil { - if name, ok := desc.Annotations[ocispec.AnnotationRefName]; ok { - key = fmt.Sprintf("%s@%s", name, desc.Digest.String()) - } - } - - if v := ctx.Value(refKeyPrefix{}); v != nil { - values := v.(map[string]string) - if prefix := values[desc.MediaType]; prefix != "" { - return prefix + "-" + key - } - } - - switch mt := desc.MediaType; { - case mt == images.MediaTypeDockerSchema2Manifest || mt == ocispec.MediaTypeImageManifest: - return "manifest-" + key - case mt == images.MediaTypeDockerSchema2ManifestList || mt == ocispec.MediaTypeImageIndex: - return "index-" + key - case images.IsLayerType(mt): - return "layer-" + key - case images.IsKnownConfig(mt): - return "config-" + key - default: - log.G(ctx).Warnf("reference for unknown type: %s", mt) - return "unknown-" + key - } -} - -// FetchHandler returns a handler that will fetch all content into the ingester -// discovered in a call to Dispatch. Use with ChildrenHandler to do a full -// recursive fetch. -func FetchHandler(ingester content.Ingester, fetcher Fetcher) images.HandlerFunc { - return func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) { - ctx = log.WithLogger(ctx, log.G(ctx).WithFields(logrus.Fields{ - "digest": desc.Digest, - "mediatype": desc.MediaType, - "size": desc.Size, - })) - - switch desc.MediaType { - case images.MediaTypeDockerSchema1Manifest: - return nil, fmt.Errorf("%v not supported", desc.MediaType) - default: - err := fetch(ctx, ingester, fetcher, desc) - return nil, err - } - } -} - -func fetch(ctx context.Context, ingester content.Ingester, fetcher Fetcher, desc ocispec.Descriptor) error { - log.G(ctx).Debug("fetch") - - cw, err := content.OpenWriter(ctx, ingester, content.WithRef(MakeRefKey(ctx, desc)), content.WithDescriptor(desc)) - if err != nil { - if errdefs.IsAlreadyExists(err) { - return nil - } - return err - } - defer cw.Close() - - ws, err := cw.Status() - if err != nil { - return err - } - - if desc.Size == 0 { - // most likely a poorly configured registry/web front end which responded with no - // Content-Length header; unable (not to mention useless) to commit a 0-length entry - // into the content store. Error out here otherwise the error sent back is confusing - return errors.Wrapf(errdefs.ErrInvalidArgument, "unable to fetch descriptor (%s) which reports content size of zero", desc.Digest) - } - if ws.Offset == desc.Size { - // If writer is already complete, commit and return - err := cw.Commit(ctx, desc.Size, desc.Digest) - if err != nil && !errdefs.IsAlreadyExists(err) { - return errors.Wrapf(err, "failed commit on ref %q", ws.Ref) - } - return nil - } - - rc, err := fetcher.Fetch(ctx, desc) - if err != nil { - return err - } - defer rc.Close() - - return content.Copy(ctx, cw, rc, desc.Size, desc.Digest) -} - -// PushHandler returns a handler that will push all content from the provider -// using a writer from the pusher. -func PushHandler(pusher Pusher, provider content.Provider) images.HandlerFunc { - return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - ctx = log.WithLogger(ctx, log.G(ctx).WithFields(logrus.Fields{ - "digest": desc.Digest, - "mediatype": desc.MediaType, - "size": desc.Size, - })) - - err := push(ctx, provider, pusher, desc) - return nil, err - } -} - -func push(ctx context.Context, provider content.Provider, pusher Pusher, desc ocispec.Descriptor) error { - log.G(ctx).Debug("push") - - var ( - cw content.Writer - err error - ) - if cs, ok := pusher.(content.Ingester); ok { - cw, err = content.OpenWriter(ctx, cs, content.WithRef(MakeRefKey(ctx, desc)), content.WithDescriptor(desc)) - } else { - cw, err = pusher.Push(ctx, desc) - } - if err != nil { - if !errdefs.IsAlreadyExists(err) { - return err - } - - return nil - } - defer cw.Close() - - ra, err := provider.ReaderAt(ctx, desc) - if err != nil { - return err - } - defer ra.Close() - - rd := io.NewSectionReader(ra, 0, desc.Size) - return content.Copy(ctx, cw, rd, desc.Size, desc.Digest) -} - -// PushContent pushes content specified by the descriptor from the provider. -// -// Base handlers can be provided which will be called before any push specific -// handlers. -func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, store content.Store, limiter *semaphore.Weighted, platform platforms.MatchComparer, wrapper func(h images.Handler) images.Handler) error { - - var m sync.Mutex - manifestStack := []ocispec.Descriptor{} - - filterHandler := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - switch desc.MediaType { - case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest, - images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: - m.Lock() - manifestStack = append(manifestStack, desc) - m.Unlock() - return nil, images.ErrStopHandler - default: - return nil, nil - } - }) - - pushHandler := PushHandler(pusher, store) - - platformFilterhandler := images.FilterPlatforms(images.ChildrenHandler(store), platform) - - annotateHandler := annotateDistributionSourceHandler(platformFilterhandler, store) - - var handler images.Handler = images.Handlers( - annotateHandler, - filterHandler, - pushHandler, - ) - if wrapper != nil { - handler = wrapper(handler) - } - - if err := images.Dispatch(ctx, handler, limiter, desc); err != nil { - return err - } - - // Iterate in reverse order as seen, parent always uploaded after child - for i := len(manifestStack) - 1; i >= 0; i-- { - _, err := pushHandler(ctx, manifestStack[i]) - if err != nil { - // TODO(estesp): until we have a more complete method for index push, we need to report - // missing dependencies in an index/manifest list by sensing the "400 Bad Request" - // as a marker for this problem - if (manifestStack[i].MediaType == ocispec.MediaTypeImageIndex || - manifestStack[i].MediaType == images.MediaTypeDockerSchema2ManifestList) && - errors.Cause(err) != nil && strings.Contains(errors.Cause(err).Error(), "400 Bad Request") { - return errors.Wrap(err, "manifest list/index references to blobs and/or manifests are missing in your target registry") - } - return err - } - } - - return nil -} - -// FilterManifestByPlatformHandler allows Handler to handle non-target -// platform's manifest and configuration data. -func FilterManifestByPlatformHandler(f images.HandlerFunc, m platforms.Matcher) images.HandlerFunc { - return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - children, err := f(ctx, desc) - if err != nil { - return nil, err - } - - // no platform information - if desc.Platform == nil || m == nil { - return children, nil - } - - var descs []ocispec.Descriptor - switch desc.MediaType { - case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: - if m.Match(*desc.Platform) { - descs = children - } else { - for _, child := range children { - if child.MediaType == images.MediaTypeDockerSchema2Config || - child.MediaType == ocispec.MediaTypeImageConfig { - - descs = append(descs, child) - } - } - } - default: - descs = children - } - return descs, nil - } -} - -// annotateDistributionSourceHandler add distribution source label into -// annotation of config or blob descriptor. -func annotateDistributionSourceHandler(f images.HandlerFunc, manager content.Manager) images.HandlerFunc { - return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - children, err := f(ctx, desc) - if err != nil { - return nil, err - } - - // only add distribution source for the config or blob data descriptor - switch desc.MediaType { - case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest, - images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: - default: - return children, nil - } - - for i := range children { - child := children[i] - - info, err := manager.Info(ctx, child.Digest) - if err != nil { - return nil, err - } - - for k, v := range info.Labels { - if !strings.HasPrefix(k, "containerd.io/distribution.source.") { - continue - } - - if child.Annotations == nil { - child.Annotations = map[string]string{} - } - child.Annotations[k] = v - } - - children[i] = child - } - return children, nil - } -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/remotes/resolver.go b/ui/wasm/vendor/github.com/containerd/containerd/remotes/resolver.go deleted file mode 100644 index 624b14f05d6..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/remotes/resolver.go +++ /dev/null @@ -1,82 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package remotes - -import ( - "context" - "io" - - "github.com/containerd/containerd/content" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -// Resolver provides remotes based on a locator. -type Resolver interface { - // Resolve attempts to resolve the reference into a name and descriptor. - // - // The argument `ref` should be a scheme-less URI representing the remote. - // Structurally, it has a host and path. The "host" can be used to directly - // reference a specific host or be matched against a specific handler. - // - // The returned name should be used to identify the referenced entity. - // Dependending on the remote namespace, this may be immutable or mutable. - // While the name may differ from ref, it should itself be a valid ref. - // - // If the resolution fails, an error will be returned. - Resolve(ctx context.Context, ref string) (name string, desc ocispec.Descriptor, err error) - - // Fetcher returns a new fetcher for the provided reference. - // All content fetched from the returned fetcher will be - // from the namespace referred to by ref. - Fetcher(ctx context.Context, ref string) (Fetcher, error) - - // Pusher returns a new pusher for the provided reference - // The returned Pusher should satisfy content.Ingester and concurrent attempts - // to push the same blob using the Ingester API should result in ErrUnavailable. - Pusher(ctx context.Context, ref string) (Pusher, error) -} - -// Fetcher fetches content -type Fetcher interface { - // Fetch the resource identified by the descriptor. - Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) -} - -// Pusher pushes content -type Pusher interface { - // Push returns a content writer for the given resource identified - // by the descriptor. - Push(ctx context.Context, d ocispec.Descriptor) (content.Writer, error) -} - -// FetcherFunc allows package users to implement a Fetcher with just a -// function. -type FetcherFunc func(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) - -// Fetch content -func (fn FetcherFunc) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) { - return fn(ctx, desc) -} - -// PusherFunc allows package users to implement a Pusher with just a -// function. -type PusherFunc func(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) - -// Push content -func (fn PusherFunc) Push(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) { - return fn(ctx, desc) -} diff --git a/ui/wasm/vendor/github.com/containerd/containerd/version/version.go b/ui/wasm/vendor/github.com/containerd/containerd/version/version.go deleted file mode 100644 index dda0ee93f6e..00000000000 --- a/ui/wasm/vendor/github.com/containerd/containerd/version/version.go +++ /dev/null @@ -1,34 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package version - -import "runtime" - -var ( - // Package is filled at linking time - Package = "github.com/containerd/containerd" - - // Version holds the complete version number. Filled in at linking time. - Version = "1.5.9+unknown" - - // Revision is filled with the VCS (e.g. git) revision being used to build - // the program at linking time. - Revision = "" - - // GoVersion is Go tree's version. - GoVersion = runtime.Version() -) diff --git a/ui/wasm/vendor/github.com/davecgh/go-spew/LICENSE b/ui/wasm/vendor/github.com/davecgh/go-spew/LICENSE deleted file mode 100644 index bc52e96f2b0..00000000000 --- a/ui/wasm/vendor/github.com/davecgh/go-spew/LICENSE +++ /dev/null @@ -1,15 +0,0 @@ -ISC License - -Copyright (c) 2012-2016 Dave Collins - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/ui/wasm/vendor/github.com/davecgh/go-spew/spew/bypass.go b/ui/wasm/vendor/github.com/davecgh/go-spew/spew/bypass.go deleted file mode 100644 index 792994785e3..00000000000 --- a/ui/wasm/vendor/github.com/davecgh/go-spew/spew/bypass.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright (c) 2015-2016 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when the code is not running on Google App Engine, compiled by GopherJS, and -// "-tags safe" is not added to the go build command line. The "disableunsafe" -// tag is deprecated and thus should not be used. -// Go versions prior to 1.4 are disabled because they use a different layout -// for interfaces which make the implementation of unsafeReflectValue more complex. -// +build !js,!appengine,!safe,!disableunsafe,go1.4 - -package spew - -import ( - "reflect" - "unsafe" -) - -const ( - // UnsafeDisabled is a build-time constant which specifies whether or - // not access to the unsafe package is available. - UnsafeDisabled = false - - // ptrSize is the size of a pointer on the current arch. - ptrSize = unsafe.Sizeof((*byte)(nil)) -) - -type flag uintptr - -var ( - // flagRO indicates whether the value field of a reflect.Value - // is read-only. - flagRO flag - - // flagAddr indicates whether the address of the reflect.Value's - // value may be taken. - flagAddr flag -) - -// flagKindMask holds the bits that make up the kind -// part of the flags field. In all the supported versions, -// it is in the lower 5 bits. -const flagKindMask = flag(0x1f) - -// Different versions of Go have used different -// bit layouts for the flags type. This table -// records the known combinations. -var okFlags = []struct { - ro, addr flag -}{{ - // From Go 1.4 to 1.5 - ro: 1 << 5, - addr: 1 << 7, -}, { - // Up to Go tip. - ro: 1<<5 | 1<<6, - addr: 1 << 8, -}} - -var flagValOffset = func() uintptr { - field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") - if !ok { - panic("reflect.Value has no flag field") - } - return field.Offset -}() - -// flagField returns a pointer to the flag field of a reflect.Value. -func flagField(v *reflect.Value) *flag { - return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset)) -} - -// unsafeReflectValue converts the passed reflect.Value into a one that bypasses -// the typical safety restrictions preventing access to unaddressable and -// unexported data. It works by digging the raw pointer to the underlying -// value out of the protected value and generating a new unprotected (unsafe) -// reflect.Value to it. -// -// This allows us to check for implementations of the Stringer and error -// interfaces to be used for pretty printing ordinarily unaddressable and -// inaccessible values such as unexported struct fields. -func unsafeReflectValue(v reflect.Value) reflect.Value { - if !v.IsValid() || (v.CanInterface() && v.CanAddr()) { - return v - } - flagFieldPtr := flagField(&v) - *flagFieldPtr &^= flagRO - *flagFieldPtr |= flagAddr - return v -} - -// Sanity checks against future reflect package changes -// to the type or semantics of the Value.flag field. -func init() { - field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") - if !ok { - panic("reflect.Value has no flag field") - } - if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() { - panic("reflect.Value flag field has changed kind") - } - type t0 int - var t struct { - A t0 - // t0 will have flagEmbedRO set. - t0 - // a will have flagStickyRO set - a t0 - } - vA := reflect.ValueOf(t).FieldByName("A") - va := reflect.ValueOf(t).FieldByName("a") - vt0 := reflect.ValueOf(t).FieldByName("t0") - - // Infer flagRO from the difference between the flags - // for the (otherwise identical) fields in t. - flagPublic := *flagField(&vA) - flagWithRO := *flagField(&va) | *flagField(&vt0) - flagRO = flagPublic ^ flagWithRO - - // Infer flagAddr from the difference between a value - // taken from a pointer and not. - vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A") - flagNoPtr := *flagField(&vA) - flagPtr := *flagField(&vPtrA) - flagAddr = flagNoPtr ^ flagPtr - - // Check that the inferred flags tally with one of the known versions. - for _, f := range okFlags { - if flagRO == f.ro && flagAddr == f.addr { - return - } - } - panic("reflect.Value read-only flag has changed semantics") -} diff --git a/ui/wasm/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/ui/wasm/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go deleted file mode 100644 index 205c28d68c4..00000000000 --- a/ui/wasm/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) 2015-2016 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when the code is running on Google App Engine, compiled by GopherJS, or -// "-tags safe" is added to the go build command line. The "disableunsafe" -// tag is deprecated and thus should not be used. -// +build js appengine safe disableunsafe !go1.4 - -package spew - -import "reflect" - -const ( - // UnsafeDisabled is a build-time constant which specifies whether or - // not access to the unsafe package is available. - UnsafeDisabled = true -) - -// unsafeReflectValue typically converts the passed reflect.Value into a one -// that bypasses the typical safety restrictions preventing access to -// unaddressable and unexported data. However, doing this relies on access to -// the unsafe package. This is a stub version which simply returns the passed -// reflect.Value when the unsafe package is not available. -func unsafeReflectValue(v reflect.Value) reflect.Value { - return v -} diff --git a/ui/wasm/vendor/github.com/davecgh/go-spew/spew/common.go b/ui/wasm/vendor/github.com/davecgh/go-spew/spew/common.go deleted file mode 100644 index 1be8ce94576..00000000000 --- a/ui/wasm/vendor/github.com/davecgh/go-spew/spew/common.go +++ /dev/null @@ -1,341 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "io" - "reflect" - "sort" - "strconv" -) - -// Some constants in the form of bytes to avoid string overhead. This mirrors -// the technique used in the fmt package. -var ( - panicBytes = []byte("(PANIC=") - plusBytes = []byte("+") - iBytes = []byte("i") - trueBytes = []byte("true") - falseBytes = []byte("false") - interfaceBytes = []byte("(interface {})") - commaNewlineBytes = []byte(",\n") - newlineBytes = []byte("\n") - openBraceBytes = []byte("{") - openBraceNewlineBytes = []byte("{\n") - closeBraceBytes = []byte("}") - asteriskBytes = []byte("*") - colonBytes = []byte(":") - colonSpaceBytes = []byte(": ") - openParenBytes = []byte("(") - closeParenBytes = []byte(")") - spaceBytes = []byte(" ") - pointerChainBytes = []byte("->") - nilAngleBytes = []byte("") - maxNewlineBytes = []byte("\n") - maxShortBytes = []byte("") - circularBytes = []byte("") - circularShortBytes = []byte("") - invalidAngleBytes = []byte("") - openBracketBytes = []byte("[") - closeBracketBytes = []byte("]") - percentBytes = []byte("%") - precisionBytes = []byte(".") - openAngleBytes = []byte("<") - closeAngleBytes = []byte(">") - openMapBytes = []byte("map[") - closeMapBytes = []byte("]") - lenEqualsBytes = []byte("len=") - capEqualsBytes = []byte("cap=") -) - -// hexDigits is used to map a decimal value to a hex digit. -var hexDigits = "0123456789abcdef" - -// catchPanic handles any panics that might occur during the handleMethods -// calls. -func catchPanic(w io.Writer, v reflect.Value) { - if err := recover(); err != nil { - w.Write(panicBytes) - fmt.Fprintf(w, "%v", err) - w.Write(closeParenBytes) - } -} - -// handleMethods attempts to call the Error and String methods on the underlying -// type the passed reflect.Value represents and outputes the result to Writer w. -// -// It handles panics in any called methods by catching and displaying the error -// as the formatted value. -func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { - // We need an interface to check if the type implements the error or - // Stringer interface. However, the reflect package won't give us an - // interface on certain things like unexported struct fields in order - // to enforce visibility rules. We use unsafe, when it's available, - // to bypass these restrictions since this package does not mutate the - // values. - if !v.CanInterface() { - if UnsafeDisabled { - return false - } - - v = unsafeReflectValue(v) - } - - // Choose whether or not to do error and Stringer interface lookups against - // the base type or a pointer to the base type depending on settings. - // Technically calling one of these methods with a pointer receiver can - // mutate the value, however, types which choose to satisify an error or - // Stringer interface with a pointer receiver should not be mutating their - // state inside these interface methods. - if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { - v = unsafeReflectValue(v) - } - if v.CanAddr() { - v = v.Addr() - } - - // Is it an error or Stringer? - switch iface := v.Interface().(type) { - case error: - defer catchPanic(w, v) - if cs.ContinueOnMethod { - w.Write(openParenBytes) - w.Write([]byte(iface.Error())) - w.Write(closeParenBytes) - w.Write(spaceBytes) - return false - } - - w.Write([]byte(iface.Error())) - return true - - case fmt.Stringer: - defer catchPanic(w, v) - if cs.ContinueOnMethod { - w.Write(openParenBytes) - w.Write([]byte(iface.String())) - w.Write(closeParenBytes) - w.Write(spaceBytes) - return false - } - w.Write([]byte(iface.String())) - return true - } - return false -} - -// printBool outputs a boolean value as true or false to Writer w. -func printBool(w io.Writer, val bool) { - if val { - w.Write(trueBytes) - } else { - w.Write(falseBytes) - } -} - -// printInt outputs a signed integer value to Writer w. -func printInt(w io.Writer, val int64, base int) { - w.Write([]byte(strconv.FormatInt(val, base))) -} - -// printUint outputs an unsigned integer value to Writer w. -func printUint(w io.Writer, val uint64, base int) { - w.Write([]byte(strconv.FormatUint(val, base))) -} - -// printFloat outputs a floating point value using the specified precision, -// which is expected to be 32 or 64bit, to Writer w. -func printFloat(w io.Writer, val float64, precision int) { - w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) -} - -// printComplex outputs a complex value using the specified float precision -// for the real and imaginary parts to Writer w. -func printComplex(w io.Writer, c complex128, floatPrecision int) { - r := real(c) - w.Write(openParenBytes) - w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) - i := imag(c) - if i >= 0 { - w.Write(plusBytes) - } - w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) - w.Write(iBytes) - w.Write(closeParenBytes) -} - -// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x' -// prefix to Writer w. -func printHexPtr(w io.Writer, p uintptr) { - // Null pointer. - num := uint64(p) - if num == 0 { - w.Write(nilAngleBytes) - return - } - - // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix - buf := make([]byte, 18) - - // It's simpler to construct the hex string right to left. - base := uint64(16) - i := len(buf) - 1 - for num >= base { - buf[i] = hexDigits[num%base] - num /= base - i-- - } - buf[i] = hexDigits[num] - - // Add '0x' prefix. - i-- - buf[i] = 'x' - i-- - buf[i] = '0' - - // Strip unused leading bytes. - buf = buf[i:] - w.Write(buf) -} - -// valuesSorter implements sort.Interface to allow a slice of reflect.Value -// elements to be sorted. -type valuesSorter struct { - values []reflect.Value - strings []string // either nil or same len and values - cs *ConfigState -} - -// newValuesSorter initializes a valuesSorter instance, which holds a set of -// surrogate keys on which the data should be sorted. It uses flags in -// ConfigState to decide if and how to populate those surrogate keys. -func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { - vs := &valuesSorter{values: values, cs: cs} - if canSortSimply(vs.values[0].Kind()) { - return vs - } - if !cs.DisableMethods { - vs.strings = make([]string, len(values)) - for i := range vs.values { - b := bytes.Buffer{} - if !handleMethods(cs, &b, vs.values[i]) { - vs.strings = nil - break - } - vs.strings[i] = b.String() - } - } - if vs.strings == nil && cs.SpewKeys { - vs.strings = make([]string, len(values)) - for i := range vs.values { - vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) - } - } - return vs -} - -// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted -// directly, or whether it should be considered for sorting by surrogate keys -// (if the ConfigState allows it). -func canSortSimply(kind reflect.Kind) bool { - // This switch parallels valueSortLess, except for the default case. - switch kind { - case reflect.Bool: - return true - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return true - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - return true - case reflect.Float32, reflect.Float64: - return true - case reflect.String: - return true - case reflect.Uintptr: - return true - case reflect.Array: - return true - } - return false -} - -// Len returns the number of values in the slice. It is part of the -// sort.Interface implementation. -func (s *valuesSorter) Len() int { - return len(s.values) -} - -// Swap swaps the values at the passed indices. It is part of the -// sort.Interface implementation. -func (s *valuesSorter) Swap(i, j int) { - s.values[i], s.values[j] = s.values[j], s.values[i] - if s.strings != nil { - s.strings[i], s.strings[j] = s.strings[j], s.strings[i] - } -} - -// valueSortLess returns whether the first value should sort before the second -// value. It is used by valueSorter.Less as part of the sort.Interface -// implementation. -func valueSortLess(a, b reflect.Value) bool { - switch a.Kind() { - case reflect.Bool: - return !a.Bool() && b.Bool() - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return a.Int() < b.Int() - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - return a.Uint() < b.Uint() - case reflect.Float32, reflect.Float64: - return a.Float() < b.Float() - case reflect.String: - return a.String() < b.String() - case reflect.Uintptr: - return a.Uint() < b.Uint() - case reflect.Array: - // Compare the contents of both arrays. - l := a.Len() - for i := 0; i < l; i++ { - av := a.Index(i) - bv := b.Index(i) - if av.Interface() == bv.Interface() { - continue - } - return valueSortLess(av, bv) - } - } - return a.String() < b.String() -} - -// Less returns whether the value at index i should sort before the -// value at index j. It is part of the sort.Interface implementation. -func (s *valuesSorter) Less(i, j int) bool { - if s.strings == nil { - return valueSortLess(s.values[i], s.values[j]) - } - return s.strings[i] < s.strings[j] -} - -// sortValues is a sort function that handles both native types and any type that -// can be converted to error or Stringer. Other inputs are sorted according to -// their Value.String() value to ensure display stability. -func sortValues(values []reflect.Value, cs *ConfigState) { - if len(values) == 0 { - return - } - sort.Sort(newValuesSorter(values, cs)) -} diff --git a/ui/wasm/vendor/github.com/davecgh/go-spew/spew/config.go b/ui/wasm/vendor/github.com/davecgh/go-spew/spew/config.go deleted file mode 100644 index 2e3d22f3120..00000000000 --- a/ui/wasm/vendor/github.com/davecgh/go-spew/spew/config.go +++ /dev/null @@ -1,306 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "io" - "os" -) - -// ConfigState houses the configuration options used by spew to format and -// display values. There is a global instance, Config, that is used to control -// all top-level Formatter and Dump functionality. Each ConfigState instance -// provides methods equivalent to the top-level functions. -// -// The zero value for ConfigState provides no indentation. You would typically -// want to set it to a space or a tab. -// -// Alternatively, you can use NewDefaultConfig to get a ConfigState instance -// with default settings. See the documentation of NewDefaultConfig for default -// values. -type ConfigState struct { - // Indent specifies the string to use for each indentation level. The - // global config instance that all top-level functions use set this to a - // single space by default. If you would like more indentation, you might - // set this to a tab with "\t" or perhaps two spaces with " ". - Indent string - - // MaxDepth controls the maximum number of levels to descend into nested - // data structures. The default, 0, means there is no limit. - // - // NOTE: Circular data structures are properly detected, so it is not - // necessary to set this value unless you specifically want to limit deeply - // nested data structures. - MaxDepth int - - // DisableMethods specifies whether or not error and Stringer interfaces are - // invoked for types that implement them. - DisableMethods bool - - // DisablePointerMethods specifies whether or not to check for and invoke - // error and Stringer interfaces on types which only accept a pointer - // receiver when the current type is not a pointer. - // - // NOTE: This might be an unsafe action since calling one of these methods - // with a pointer receiver could technically mutate the value, however, - // in practice, types which choose to satisify an error or Stringer - // interface with a pointer receiver should not be mutating their state - // inside these interface methods. As a result, this option relies on - // access to the unsafe package, so it will not have any effect when - // running in environments without access to the unsafe package such as - // Google App Engine or with the "safe" build tag specified. - DisablePointerMethods bool - - // DisablePointerAddresses specifies whether to disable the printing of - // pointer addresses. This is useful when diffing data structures in tests. - DisablePointerAddresses bool - - // DisableCapacities specifies whether to disable the printing of capacities - // for arrays, slices, maps and channels. This is useful when diffing - // data structures in tests. - DisableCapacities bool - - // ContinueOnMethod specifies whether or not recursion should continue once - // a custom error or Stringer interface is invoked. The default, false, - // means it will print the results of invoking the custom error or Stringer - // interface and return immediately instead of continuing to recurse into - // the internals of the data type. - // - // NOTE: This flag does not have any effect if method invocation is disabled - // via the DisableMethods or DisablePointerMethods options. - ContinueOnMethod bool - - // SortKeys specifies map keys should be sorted before being printed. Use - // this to have a more deterministic, diffable output. Note that only - // native types (bool, int, uint, floats, uintptr and string) and types - // that support the error or Stringer interfaces (if methods are - // enabled) are supported, with other types sorted according to the - // reflect.Value.String() output which guarantees display stability. - SortKeys bool - - // SpewKeys specifies that, as a last resort attempt, map keys should - // be spewed to strings and sorted by those strings. This is only - // considered if SortKeys is true. - SpewKeys bool -} - -// Config is the active configuration of the top-level functions. -// The configuration can be changed by modifying the contents of spew.Config. -var Config = ConfigState{Indent: " "} - -// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the formatted string as a value that satisfies error. See NewFormatter -// for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { - return fmt.Errorf(format, c.convertArgs(a)...) -} - -// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprint(w, c.convertArgs(a)...) -} - -// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(w, format, c.convertArgs(a)...) -} - -// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it -// passed with a Formatter interface returned by c.NewFormatter. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprintln(w, c.convertArgs(a)...) -} - -// Print is a wrapper for fmt.Print that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Print(a ...interface{}) (n int, err error) { - return fmt.Print(c.convertArgs(a)...) -} - -// Printf is a wrapper for fmt.Printf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { - return fmt.Printf(format, c.convertArgs(a)...) -} - -// Println is a wrapper for fmt.Println that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Println(a ...interface{}) (n int, err error) { - return fmt.Println(c.convertArgs(a)...) -} - -// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprint(a ...interface{}) string { - return fmt.Sprint(c.convertArgs(a)...) -} - -// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprintf(format string, a ...interface{}) string { - return fmt.Sprintf(format, c.convertArgs(a)...) -} - -// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it -// were passed with a Formatter interface returned by c.NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprintln(a ...interface{}) string { - return fmt.Sprintln(c.convertArgs(a)...) -} - -/* -NewFormatter returns a custom formatter that satisfies the fmt.Formatter -interface. As a result, it integrates cleanly with standard fmt package -printing functions. The formatter is useful for inline printing of smaller data -types similar to the standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Typically this function shouldn't be called directly. It is much easier to make -use of the custom formatter by calling one of the convenience functions such as -c.Printf, c.Println, or c.Printf. -*/ -func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { - return newFormatter(c, v) -} - -// Fdump formats and displays the passed arguments to io.Writer w. It formats -// exactly the same as Dump. -func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { - fdump(c, w, a...) -} - -/* -Dump displays the passed parameters to standard out with newlines, customizable -indentation, and additional debug information such as complete types and all -pointer addresses used to indirect to the final value. It provides the -following features over the built-in printing facilities provided by the fmt -package: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output - -The configuration options are controlled by modifying the public members -of c. See ConfigState for options documentation. - -See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to -get the formatted result as a string. -*/ -func (c *ConfigState) Dump(a ...interface{}) { - fdump(c, os.Stdout, a...) -} - -// Sdump returns a string with the passed arguments formatted exactly the same -// as Dump. -func (c *ConfigState) Sdump(a ...interface{}) string { - var buf bytes.Buffer - fdump(c, &buf, a...) - return buf.String() -} - -// convertArgs accepts a slice of arguments and returns a slice of the same -// length with each argument converted to a spew Formatter interface using -// the ConfigState associated with s. -func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { - formatters = make([]interface{}, len(args)) - for index, arg := range args { - formatters[index] = newFormatter(c, arg) - } - return formatters -} - -// NewDefaultConfig returns a ConfigState with the following default settings. -// -// Indent: " " -// MaxDepth: 0 -// DisableMethods: false -// DisablePointerMethods: false -// ContinueOnMethod: false -// SortKeys: false -func NewDefaultConfig() *ConfigState { - return &ConfigState{Indent: " "} -} diff --git a/ui/wasm/vendor/github.com/davecgh/go-spew/spew/doc.go b/ui/wasm/vendor/github.com/davecgh/go-spew/spew/doc.go deleted file mode 100644 index aacaac6f1e1..00000000000 --- a/ui/wasm/vendor/github.com/davecgh/go-spew/spew/doc.go +++ /dev/null @@ -1,211 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -/* -Package spew implements a deep pretty printer for Go data structures to aid in -debugging. - -A quick overview of the additional features spew provides over the built-in -printing facilities for Go data types are as follows: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output (only when using - Dump style) - -There are two different approaches spew allows for dumping Go data structures: - - * Dump style which prints with newlines, customizable indentation, - and additional debug information such as types and all pointer addresses - used to indirect to the final value - * A custom Formatter interface that integrates cleanly with the standard fmt - package and replaces %v, %+v, %#v, and %#+v to provide inline printing - similar to the default %v while providing the additional functionality - outlined above and passing unsupported format verbs such as %x and %q - along to fmt - -Quick Start - -This section demonstrates how to quickly get started with spew. See the -sections below for further details on formatting and configuration options. - -To dump a variable with full newlines, indentation, type, and pointer -information use Dump, Fdump, or Sdump: - spew.Dump(myVar1, myVar2, ...) - spew.Fdump(someWriter, myVar1, myVar2, ...) - str := spew.Sdump(myVar1, myVar2, ...) - -Alternatively, if you would prefer to use format strings with a compacted inline -printing style, use the convenience wrappers Printf, Fprintf, etc with -%v (most compact), %+v (adds pointer addresses), %#v (adds types), or -%#+v (adds types and pointer addresses): - spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - -Configuration Options - -Configuration of spew is handled by fields in the ConfigState type. For -convenience, all of the top-level functions use a global state available -via the spew.Config global. - -It is also possible to create a ConfigState instance that provides methods -equivalent to the top-level functions. This allows concurrent configuration -options. See the ConfigState documentation for more details. - -The following configuration options are available: - * Indent - String to use for each indentation level for Dump functions. - It is a single space by default. A popular alternative is "\t". - - * MaxDepth - Maximum number of levels to descend into nested data structures. - There is no limit by default. - - * DisableMethods - Disables invocation of error and Stringer interface methods. - Method invocation is enabled by default. - - * DisablePointerMethods - Disables invocation of error and Stringer interface methods on types - which only accept pointer receivers from non-pointer variables. - Pointer method invocation is enabled by default. - - * DisablePointerAddresses - DisablePointerAddresses specifies whether to disable the printing of - pointer addresses. This is useful when diffing data structures in tests. - - * DisableCapacities - DisableCapacities specifies whether to disable the printing of - capacities for arrays, slices, maps and channels. This is useful when - diffing data structures in tests. - - * ContinueOnMethod - Enables recursion into types after invoking error and Stringer interface - methods. Recursion after method invocation is disabled by default. - - * SortKeys - Specifies map keys should be sorted before being printed. Use - this to have a more deterministic, diffable output. Note that - only native types (bool, int, uint, floats, uintptr and string) - and types which implement error or Stringer interfaces are - supported with other types sorted according to the - reflect.Value.String() output which guarantees display - stability. Natural map order is used by default. - - * SpewKeys - Specifies that, as a last resort attempt, map keys should be - spewed to strings and sorted by those strings. This is only - considered if SortKeys is true. - -Dump Usage - -Simply call spew.Dump with a list of variables you want to dump: - - spew.Dump(myVar1, myVar2, ...) - -You may also call spew.Fdump if you would prefer to output to an arbitrary -io.Writer. For example, to dump to standard error: - - spew.Fdump(os.Stderr, myVar1, myVar2, ...) - -A third option is to call spew.Sdump to get the formatted output as a string: - - str := spew.Sdump(myVar1, myVar2, ...) - -Sample Dump Output - -See the Dump example for details on the setup of the types and variables being -shown here. - - (main.Foo) { - unexportedField: (*main.Bar)(0xf84002e210)({ - flag: (main.Flag) flagTwo, - data: (uintptr) - }), - ExportedField: (map[interface {}]interface {}) (len=1) { - (string) (len=3) "one": (bool) true - } - } - -Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C -command as shown. - ([]uint8) (len=32 cap=32) { - 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | - 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| - 00000020 31 32 |12| - } - -Custom Formatter - -Spew provides a custom formatter that implements the fmt.Formatter interface -so that it integrates cleanly with standard fmt package printing functions. The -formatter is useful for inline printing of smaller data types similar to the -standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Custom Formatter Usage - -The simplest way to make use of the spew custom formatter is to call one of the -convenience functions such as spew.Printf, spew.Println, or spew.Printf. The -functions have syntax you are most likely already familiar with: - - spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - spew.Println(myVar, myVar2) - spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - -See the Index for the full list convenience functions. - -Sample Formatter Output - -Double pointer to a uint8: - %v: <**>5 - %+v: <**>(0xf8400420d0->0xf8400420c8)5 - %#v: (**uint8)5 - %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 - -Pointer to circular struct with a uint8 field and a pointer to itself: - %v: <*>{1 <*>} - %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} - %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} - %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} - -See the Printf example for details on the setup of variables being shown -here. - -Errors - -Since it is possible for custom Stringer/error interfaces to panic, spew -detects them and handles them internally by printing the panic information -inline with the output. Since spew is intended to provide deep pretty printing -capabilities on structures, it intentionally does not return any errors. -*/ -package spew diff --git a/ui/wasm/vendor/github.com/davecgh/go-spew/spew/dump.go b/ui/wasm/vendor/github.com/davecgh/go-spew/spew/dump.go deleted file mode 100644 index f78d89fc1f6..00000000000 --- a/ui/wasm/vendor/github.com/davecgh/go-spew/spew/dump.go +++ /dev/null @@ -1,509 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "encoding/hex" - "fmt" - "io" - "os" - "reflect" - "regexp" - "strconv" - "strings" -) - -var ( - // uint8Type is a reflect.Type representing a uint8. It is used to - // convert cgo types to uint8 slices for hexdumping. - uint8Type = reflect.TypeOf(uint8(0)) - - // cCharRE is a regular expression that matches a cgo char. - // It is used to detect character arrays to hexdump them. - cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`) - - // cUnsignedCharRE is a regular expression that matches a cgo unsigned - // char. It is used to detect unsigned character arrays to hexdump - // them. - cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`) - - // cUint8tCharRE is a regular expression that matches a cgo uint8_t. - // It is used to detect uint8_t arrays to hexdump them. - cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`) -) - -// dumpState contains information about the state of a dump operation. -type dumpState struct { - w io.Writer - depth int - pointers map[uintptr]int - ignoreNextType bool - ignoreNextIndent bool - cs *ConfigState -} - -// indent performs indentation according to the depth level and cs.Indent -// option. -func (d *dumpState) indent() { - if d.ignoreNextIndent { - d.ignoreNextIndent = false - return - } - d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) -} - -// unpackValue returns values inside of non-nil interfaces when possible. -// This is useful for data types like structs, arrays, slices, and maps which -// can contain varying types packed inside an interface. -func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { - if v.Kind() == reflect.Interface && !v.IsNil() { - v = v.Elem() - } - return v -} - -// dumpPtr handles formatting of pointers by indirecting them as necessary. -func (d *dumpState) dumpPtr(v reflect.Value) { - // Remove pointers at or below the current depth from map used to detect - // circular refs. - for k, depth := range d.pointers { - if depth >= d.depth { - delete(d.pointers, k) - } - } - - // Keep list of all dereferenced pointers to show later. - pointerChain := make([]uintptr, 0) - - // Figure out how many levels of indirection there are by dereferencing - // pointers and unpacking interfaces down the chain while detecting circular - // references. - nilFound := false - cycleFound := false - indirects := 0 - ve := v - for ve.Kind() == reflect.Ptr { - if ve.IsNil() { - nilFound = true - break - } - indirects++ - addr := ve.Pointer() - pointerChain = append(pointerChain, addr) - if pd, ok := d.pointers[addr]; ok && pd < d.depth { - cycleFound = true - indirects-- - break - } - d.pointers[addr] = d.depth - - ve = ve.Elem() - if ve.Kind() == reflect.Interface { - if ve.IsNil() { - nilFound = true - break - } - ve = ve.Elem() - } - } - - // Display type information. - d.w.Write(openParenBytes) - d.w.Write(bytes.Repeat(asteriskBytes, indirects)) - d.w.Write([]byte(ve.Type().String())) - d.w.Write(closeParenBytes) - - // Display pointer information. - if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { - d.w.Write(openParenBytes) - for i, addr := range pointerChain { - if i > 0 { - d.w.Write(pointerChainBytes) - } - printHexPtr(d.w, addr) - } - d.w.Write(closeParenBytes) - } - - // Display dereferenced value. - d.w.Write(openParenBytes) - switch { - case nilFound: - d.w.Write(nilAngleBytes) - - case cycleFound: - d.w.Write(circularBytes) - - default: - d.ignoreNextType = true - d.dump(ve) - } - d.w.Write(closeParenBytes) -} - -// dumpSlice handles formatting of arrays and slices. Byte (uint8 under -// reflection) arrays and slices are dumped in hexdump -C fashion. -func (d *dumpState) dumpSlice(v reflect.Value) { - // Determine whether this type should be hex dumped or not. Also, - // for types which should be hexdumped, try to use the underlying data - // first, then fall back to trying to convert them to a uint8 slice. - var buf []uint8 - doConvert := false - doHexDump := false - numEntries := v.Len() - if numEntries > 0 { - vt := v.Index(0).Type() - vts := vt.String() - switch { - // C types that need to be converted. - case cCharRE.MatchString(vts): - fallthrough - case cUnsignedCharRE.MatchString(vts): - fallthrough - case cUint8tCharRE.MatchString(vts): - doConvert = true - - // Try to use existing uint8 slices and fall back to converting - // and copying if that fails. - case vt.Kind() == reflect.Uint8: - // We need an addressable interface to convert the type - // to a byte slice. However, the reflect package won't - // give us an interface on certain things like - // unexported struct fields in order to enforce - // visibility rules. We use unsafe, when available, to - // bypass these restrictions since this package does not - // mutate the values. - vs := v - if !vs.CanInterface() || !vs.CanAddr() { - vs = unsafeReflectValue(vs) - } - if !UnsafeDisabled { - vs = vs.Slice(0, numEntries) - - // Use the existing uint8 slice if it can be - // type asserted. - iface := vs.Interface() - if slice, ok := iface.([]uint8); ok { - buf = slice - doHexDump = true - break - } - } - - // The underlying data needs to be converted if it can't - // be type asserted to a uint8 slice. - doConvert = true - } - - // Copy and convert the underlying type if needed. - if doConvert && vt.ConvertibleTo(uint8Type) { - // Convert and copy each element into a uint8 byte - // slice. - buf = make([]uint8, numEntries) - for i := 0; i < numEntries; i++ { - vv := v.Index(i) - buf[i] = uint8(vv.Convert(uint8Type).Uint()) - } - doHexDump = true - } - } - - // Hexdump the entire slice as needed. - if doHexDump { - indent := strings.Repeat(d.cs.Indent, d.depth) - str := indent + hex.Dump(buf) - str = strings.Replace(str, "\n", "\n"+indent, -1) - str = strings.TrimRight(str, d.cs.Indent) - d.w.Write([]byte(str)) - return - } - - // Recursively call dump for each item. - for i := 0; i < numEntries; i++ { - d.dump(d.unpackValue(v.Index(i))) - if i < (numEntries - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } -} - -// dump is the main workhorse for dumping a value. It uses the passed reflect -// value to figure out what kind of object we are dealing with and formats it -// appropriately. It is a recursive function, however circular data structures -// are detected and handled properly. -func (d *dumpState) dump(v reflect.Value) { - // Handle invalid reflect values immediately. - kind := v.Kind() - if kind == reflect.Invalid { - d.w.Write(invalidAngleBytes) - return - } - - // Handle pointers specially. - if kind == reflect.Ptr { - d.indent() - d.dumpPtr(v) - return - } - - // Print type information unless already handled elsewhere. - if !d.ignoreNextType { - d.indent() - d.w.Write(openParenBytes) - d.w.Write([]byte(v.Type().String())) - d.w.Write(closeParenBytes) - d.w.Write(spaceBytes) - } - d.ignoreNextType = false - - // Display length and capacity if the built-in len and cap functions - // work with the value's kind and the len/cap itself is non-zero. - valueLen, valueCap := 0, 0 - switch v.Kind() { - case reflect.Array, reflect.Slice, reflect.Chan: - valueLen, valueCap = v.Len(), v.Cap() - case reflect.Map, reflect.String: - valueLen = v.Len() - } - if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { - d.w.Write(openParenBytes) - if valueLen != 0 { - d.w.Write(lenEqualsBytes) - printInt(d.w, int64(valueLen), 10) - } - if !d.cs.DisableCapacities && valueCap != 0 { - if valueLen != 0 { - d.w.Write(spaceBytes) - } - d.w.Write(capEqualsBytes) - printInt(d.w, int64(valueCap), 10) - } - d.w.Write(closeParenBytes) - d.w.Write(spaceBytes) - } - - // Call Stringer/error interfaces if they exist and the handle methods flag - // is enabled - if !d.cs.DisableMethods { - if (kind != reflect.Invalid) && (kind != reflect.Interface) { - if handled := handleMethods(d.cs, d.w, v); handled { - return - } - } - } - - switch kind { - case reflect.Invalid: - // Do nothing. We should never get here since invalid has already - // been handled above. - - case reflect.Bool: - printBool(d.w, v.Bool()) - - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - printInt(d.w, v.Int(), 10) - - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - printUint(d.w, v.Uint(), 10) - - case reflect.Float32: - printFloat(d.w, v.Float(), 32) - - case reflect.Float64: - printFloat(d.w, v.Float(), 64) - - case reflect.Complex64: - printComplex(d.w, v.Complex(), 32) - - case reflect.Complex128: - printComplex(d.w, v.Complex(), 64) - - case reflect.Slice: - if v.IsNil() { - d.w.Write(nilAngleBytes) - break - } - fallthrough - - case reflect.Array: - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - d.dumpSlice(v) - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.String: - d.w.Write([]byte(strconv.Quote(v.String()))) - - case reflect.Interface: - // The only time we should get here is for nil interfaces due to - // unpackValue calls. - if v.IsNil() { - d.w.Write(nilAngleBytes) - } - - case reflect.Ptr: - // Do nothing. We should never get here since pointers have already - // been handled above. - - case reflect.Map: - // nil maps should be indicated as different than empty maps - if v.IsNil() { - d.w.Write(nilAngleBytes) - break - } - - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - numEntries := v.Len() - keys := v.MapKeys() - if d.cs.SortKeys { - sortValues(keys, d.cs) - } - for i, key := range keys { - d.dump(d.unpackValue(key)) - d.w.Write(colonSpaceBytes) - d.ignoreNextIndent = true - d.dump(d.unpackValue(v.MapIndex(key))) - if i < (numEntries - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.Struct: - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - vt := v.Type() - numFields := v.NumField() - for i := 0; i < numFields; i++ { - d.indent() - vtf := vt.Field(i) - d.w.Write([]byte(vtf.Name)) - d.w.Write(colonSpaceBytes) - d.ignoreNextIndent = true - d.dump(d.unpackValue(v.Field(i))) - if i < (numFields - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.Uintptr: - printHexPtr(d.w, uintptr(v.Uint())) - - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - printHexPtr(d.w, v.Pointer()) - - // There were not any other types at the time this code was written, but - // fall back to letting the default fmt package handle it in case any new - // types are added. - default: - if v.CanInterface() { - fmt.Fprintf(d.w, "%v", v.Interface()) - } else { - fmt.Fprintf(d.w, "%v", v.String()) - } - } -} - -// fdump is a helper function to consolidate the logic from the various public -// methods which take varying writers and config states. -func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { - for _, arg := range a { - if arg == nil { - w.Write(interfaceBytes) - w.Write(spaceBytes) - w.Write(nilAngleBytes) - w.Write(newlineBytes) - continue - } - - d := dumpState{w: w, cs: cs} - d.pointers = make(map[uintptr]int) - d.dump(reflect.ValueOf(arg)) - d.w.Write(newlineBytes) - } -} - -// Fdump formats and displays the passed arguments to io.Writer w. It formats -// exactly the same as Dump. -func Fdump(w io.Writer, a ...interface{}) { - fdump(&Config, w, a...) -} - -// Sdump returns a string with the passed arguments formatted exactly the same -// as Dump. -func Sdump(a ...interface{}) string { - var buf bytes.Buffer - fdump(&Config, &buf, a...) - return buf.String() -} - -/* -Dump displays the passed parameters to standard out with newlines, customizable -indentation, and additional debug information such as complete types and all -pointer addresses used to indirect to the final value. It provides the -following features over the built-in printing facilities provided by the fmt -package: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output - -The configuration options are controlled by an exported package global, -spew.Config. See ConfigState for options documentation. - -See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to -get the formatted result as a string. -*/ -func Dump(a ...interface{}) { - fdump(&Config, os.Stdout, a...) -} diff --git a/ui/wasm/vendor/github.com/davecgh/go-spew/spew/format.go b/ui/wasm/vendor/github.com/davecgh/go-spew/spew/format.go deleted file mode 100644 index b04edb7d7ac..00000000000 --- a/ui/wasm/vendor/github.com/davecgh/go-spew/spew/format.go +++ /dev/null @@ -1,419 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "reflect" - "strconv" - "strings" -) - -// supportedFlags is a list of all the character flags supported by fmt package. -const supportedFlags = "0-+# " - -// formatState implements the fmt.Formatter interface and contains information -// about the state of a formatting operation. The NewFormatter function can -// be used to get a new Formatter which can be used directly as arguments -// in standard fmt package printing calls. -type formatState struct { - value interface{} - fs fmt.State - depth int - pointers map[uintptr]int - ignoreNextType bool - cs *ConfigState -} - -// buildDefaultFormat recreates the original format string without precision -// and width information to pass in to fmt.Sprintf in the case of an -// unrecognized type. Unless new types are added to the language, this -// function won't ever be called. -func (f *formatState) buildDefaultFormat() (format string) { - buf := bytes.NewBuffer(percentBytes) - - for _, flag := range supportedFlags { - if f.fs.Flag(int(flag)) { - buf.WriteRune(flag) - } - } - - buf.WriteRune('v') - - format = buf.String() - return format -} - -// constructOrigFormat recreates the original format string including precision -// and width information to pass along to the standard fmt package. This allows -// automatic deferral of all format strings this package doesn't support. -func (f *formatState) constructOrigFormat(verb rune) (format string) { - buf := bytes.NewBuffer(percentBytes) - - for _, flag := range supportedFlags { - if f.fs.Flag(int(flag)) { - buf.WriteRune(flag) - } - } - - if width, ok := f.fs.Width(); ok { - buf.WriteString(strconv.Itoa(width)) - } - - if precision, ok := f.fs.Precision(); ok { - buf.Write(precisionBytes) - buf.WriteString(strconv.Itoa(precision)) - } - - buf.WriteRune(verb) - - format = buf.String() - return format -} - -// unpackValue returns values inside of non-nil interfaces when possible and -// ensures that types for values which have been unpacked from an interface -// are displayed when the show types flag is also set. -// This is useful for data types like structs, arrays, slices, and maps which -// can contain varying types packed inside an interface. -func (f *formatState) unpackValue(v reflect.Value) reflect.Value { - if v.Kind() == reflect.Interface { - f.ignoreNextType = false - if !v.IsNil() { - v = v.Elem() - } - } - return v -} - -// formatPtr handles formatting of pointers by indirecting them as necessary. -func (f *formatState) formatPtr(v reflect.Value) { - // Display nil if top level pointer is nil. - showTypes := f.fs.Flag('#') - if v.IsNil() && (!showTypes || f.ignoreNextType) { - f.fs.Write(nilAngleBytes) - return - } - - // Remove pointers at or below the current depth from map used to detect - // circular refs. - for k, depth := range f.pointers { - if depth >= f.depth { - delete(f.pointers, k) - } - } - - // Keep list of all dereferenced pointers to possibly show later. - pointerChain := make([]uintptr, 0) - - // Figure out how many levels of indirection there are by derferencing - // pointers and unpacking interfaces down the chain while detecting circular - // references. - nilFound := false - cycleFound := false - indirects := 0 - ve := v - for ve.Kind() == reflect.Ptr { - if ve.IsNil() { - nilFound = true - break - } - indirects++ - addr := ve.Pointer() - pointerChain = append(pointerChain, addr) - if pd, ok := f.pointers[addr]; ok && pd < f.depth { - cycleFound = true - indirects-- - break - } - f.pointers[addr] = f.depth - - ve = ve.Elem() - if ve.Kind() == reflect.Interface { - if ve.IsNil() { - nilFound = true - break - } - ve = ve.Elem() - } - } - - // Display type or indirection level depending on flags. - if showTypes && !f.ignoreNextType { - f.fs.Write(openParenBytes) - f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) - f.fs.Write([]byte(ve.Type().String())) - f.fs.Write(closeParenBytes) - } else { - if nilFound || cycleFound { - indirects += strings.Count(ve.Type().String(), "*") - } - f.fs.Write(openAngleBytes) - f.fs.Write([]byte(strings.Repeat("*", indirects))) - f.fs.Write(closeAngleBytes) - } - - // Display pointer information depending on flags. - if f.fs.Flag('+') && (len(pointerChain) > 0) { - f.fs.Write(openParenBytes) - for i, addr := range pointerChain { - if i > 0 { - f.fs.Write(pointerChainBytes) - } - printHexPtr(f.fs, addr) - } - f.fs.Write(closeParenBytes) - } - - // Display dereferenced value. - switch { - case nilFound: - f.fs.Write(nilAngleBytes) - - case cycleFound: - f.fs.Write(circularShortBytes) - - default: - f.ignoreNextType = true - f.format(ve) - } -} - -// format is the main workhorse for providing the Formatter interface. It -// uses the passed reflect value to figure out what kind of object we are -// dealing with and formats it appropriately. It is a recursive function, -// however circular data structures are detected and handled properly. -func (f *formatState) format(v reflect.Value) { - // Handle invalid reflect values immediately. - kind := v.Kind() - if kind == reflect.Invalid { - f.fs.Write(invalidAngleBytes) - return - } - - // Handle pointers specially. - if kind == reflect.Ptr { - f.formatPtr(v) - return - } - - // Print type information unless already handled elsewhere. - if !f.ignoreNextType && f.fs.Flag('#') { - f.fs.Write(openParenBytes) - f.fs.Write([]byte(v.Type().String())) - f.fs.Write(closeParenBytes) - } - f.ignoreNextType = false - - // Call Stringer/error interfaces if they exist and the handle methods - // flag is enabled. - if !f.cs.DisableMethods { - if (kind != reflect.Invalid) && (kind != reflect.Interface) { - if handled := handleMethods(f.cs, f.fs, v); handled { - return - } - } - } - - switch kind { - case reflect.Invalid: - // Do nothing. We should never get here since invalid has already - // been handled above. - - case reflect.Bool: - printBool(f.fs, v.Bool()) - - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - printInt(f.fs, v.Int(), 10) - - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - printUint(f.fs, v.Uint(), 10) - - case reflect.Float32: - printFloat(f.fs, v.Float(), 32) - - case reflect.Float64: - printFloat(f.fs, v.Float(), 64) - - case reflect.Complex64: - printComplex(f.fs, v.Complex(), 32) - - case reflect.Complex128: - printComplex(f.fs, v.Complex(), 64) - - case reflect.Slice: - if v.IsNil() { - f.fs.Write(nilAngleBytes) - break - } - fallthrough - - case reflect.Array: - f.fs.Write(openBracketBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - numEntries := v.Len() - for i := 0; i < numEntries; i++ { - if i > 0 { - f.fs.Write(spaceBytes) - } - f.ignoreNextType = true - f.format(f.unpackValue(v.Index(i))) - } - } - f.depth-- - f.fs.Write(closeBracketBytes) - - case reflect.String: - f.fs.Write([]byte(v.String())) - - case reflect.Interface: - // The only time we should get here is for nil interfaces due to - // unpackValue calls. - if v.IsNil() { - f.fs.Write(nilAngleBytes) - } - - case reflect.Ptr: - // Do nothing. We should never get here since pointers have already - // been handled above. - - case reflect.Map: - // nil maps should be indicated as different than empty maps - if v.IsNil() { - f.fs.Write(nilAngleBytes) - break - } - - f.fs.Write(openMapBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - keys := v.MapKeys() - if f.cs.SortKeys { - sortValues(keys, f.cs) - } - for i, key := range keys { - if i > 0 { - f.fs.Write(spaceBytes) - } - f.ignoreNextType = true - f.format(f.unpackValue(key)) - f.fs.Write(colonBytes) - f.ignoreNextType = true - f.format(f.unpackValue(v.MapIndex(key))) - } - } - f.depth-- - f.fs.Write(closeMapBytes) - - case reflect.Struct: - numFields := v.NumField() - f.fs.Write(openBraceBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - vt := v.Type() - for i := 0; i < numFields; i++ { - if i > 0 { - f.fs.Write(spaceBytes) - } - vtf := vt.Field(i) - if f.fs.Flag('+') || f.fs.Flag('#') { - f.fs.Write([]byte(vtf.Name)) - f.fs.Write(colonBytes) - } - f.format(f.unpackValue(v.Field(i))) - } - } - f.depth-- - f.fs.Write(closeBraceBytes) - - case reflect.Uintptr: - printHexPtr(f.fs, uintptr(v.Uint())) - - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - printHexPtr(f.fs, v.Pointer()) - - // There were not any other types at the time this code was written, but - // fall back to letting the default fmt package handle it if any get added. - default: - format := f.buildDefaultFormat() - if v.CanInterface() { - fmt.Fprintf(f.fs, format, v.Interface()) - } else { - fmt.Fprintf(f.fs, format, v.String()) - } - } -} - -// Format satisfies the fmt.Formatter interface. See NewFormatter for usage -// details. -func (f *formatState) Format(fs fmt.State, verb rune) { - f.fs = fs - - // Use standard formatting for verbs that are not v. - if verb != 'v' { - format := f.constructOrigFormat(verb) - fmt.Fprintf(fs, format, f.value) - return - } - - if f.value == nil { - if fs.Flag('#') { - fs.Write(interfaceBytes) - } - fs.Write(nilAngleBytes) - return - } - - f.format(reflect.ValueOf(f.value)) -} - -// newFormatter is a helper function to consolidate the logic from the various -// public methods which take varying config states. -func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { - fs := &formatState{value: v, cs: cs} - fs.pointers = make(map[uintptr]int) - return fs -} - -/* -NewFormatter returns a custom formatter that satisfies the fmt.Formatter -interface. As a result, it integrates cleanly with standard fmt package -printing functions. The formatter is useful for inline printing of smaller data -types similar to the standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Typically this function shouldn't be called directly. It is much easier to make -use of the custom formatter by calling one of the convenience functions such as -Printf, Println, or Fprintf. -*/ -func NewFormatter(v interface{}) fmt.Formatter { - return newFormatter(&Config, v) -} diff --git a/ui/wasm/vendor/github.com/davecgh/go-spew/spew/spew.go b/ui/wasm/vendor/github.com/davecgh/go-spew/spew/spew.go deleted file mode 100644 index 32c0e338825..00000000000 --- a/ui/wasm/vendor/github.com/davecgh/go-spew/spew/spew.go +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "fmt" - "io" -) - -// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the formatted string as a value that satisfies error. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Errorf(format string, a ...interface{}) (err error) { - return fmt.Errorf(format, convertArgs(a)...) -} - -// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprint(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprint(w, convertArgs(a)...) -} - -// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(w, format, convertArgs(a)...) -} - -// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it -// passed with a default Formatter interface returned by NewFormatter. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprintln(w, convertArgs(a)...) -} - -// Print is a wrapper for fmt.Print that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) -func Print(a ...interface{}) (n int, err error) { - return fmt.Print(convertArgs(a)...) -} - -// Printf is a wrapper for fmt.Printf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Printf(format string, a ...interface{}) (n int, err error) { - return fmt.Printf(format, convertArgs(a)...) -} - -// Println is a wrapper for fmt.Println that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) -func Println(a ...interface{}) (n int, err error) { - return fmt.Println(convertArgs(a)...) -} - -// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprint(a ...interface{}) string { - return fmt.Sprint(convertArgs(a)...) -} - -// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprintf(format string, a ...interface{}) string { - return fmt.Sprintf(format, convertArgs(a)...) -} - -// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it -// were passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprintln(a ...interface{}) string { - return fmt.Sprintln(convertArgs(a)...) -} - -// convertArgs accepts a slice of arguments and returns a slice of the same -// length with each argument converted to a default spew Formatter interface. -func convertArgs(args []interface{}) (formatters []interface{}) { - formatters = make([]interface{}, len(args)) - for index, arg := range args { - formatters[index] = NewFormatter(arg) - } - return formatters -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/LICENSE b/ui/wasm/vendor/github.com/devfile/api/v2/LICENSE deleted file mode 100644 index 261eeb9e9f8..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/commands.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/commands.go deleted file mode 100644 index 3d136af0cc9..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/commands.go +++ /dev/null @@ -1,189 +0,0 @@ -package v1alpha2 - -import ( - attributes "github.com/devfile/api/v2/pkg/attributes" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// CommandType describes the type of command. -// Only one of the following command type may be specified. -// +kubebuilder:validation:Enum=Exec;Apply;Composite;Custom -type CommandType string - -const ( - ExecCommandType CommandType = "Exec" - ApplyCommandType CommandType = "Apply" - CompositeCommandType CommandType = "Composite" - CustomCommandType CommandType = "Custom" -) - -// CommandGroupKind describes the kind of command group. -// +kubebuilder:validation:Enum=build;run;test;debug;deploy -type CommandGroupKind string - -const ( - BuildCommandGroupKind CommandGroupKind = "build" - RunCommandGroupKind CommandGroupKind = "run" - TestCommandGroupKind CommandGroupKind = "test" - DebugCommandGroupKind CommandGroupKind = "debug" - DeployCommandGroupKind CommandGroupKind = "deploy" -) - -// +devfile:getter:generate -type CommandGroup struct { - // Kind of group the command is part of - Kind CommandGroupKind `json:"kind"` - - // +optional - // Identifies the default command for a given group kind - // +devfile:default:value=false - IsDefault *bool `json:"isDefault,omitempty"` -} - -type BaseCommand struct { - // +optional - // Defines the group this command is part of - Group *CommandGroup `json:"group,omitempty"` -} - -type LabeledCommand struct { - BaseCommand `json:",inline"` - - // +optional - // Optional label that provides a label for this command - // to be used in Editor UI menus for example - Label string `json:"label,omitempty"` -} - -type Command struct { - // Mandatory identifier that allows referencing - // this command in composite commands, from - // a parent, or in events. - // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ - // +kubebuilder:validation:MaxLength=63 - Id string `json:"id"` - // Map of implementation-dependant free-form YAML attributes. - // +optional - // +kubebuilder:validation:Type=object - // +kubebuilder:pruning:PreserveUnknownFields - // +kubebuilder:validation:Schemaless - Attributes attributes.Attributes `json:"attributes,omitempty"` - CommandUnion `json:",inline"` -} - -// +union -type CommandUnion struct { - // Type of devworkspace command - // +unionDiscriminator - // +optional - CommandType CommandType `json:"commandType,omitempty"` - - // CLI Command executed in an existing component container - // +optional - Exec *ExecCommand `json:"exec,omitempty"` - - // Command that consists in applying a given component definition, - // typically bound to a devworkspace event. - // - // For example, when an `apply` command is bound to a `preStart` event, - // and references a `container` component, it will start the container as a - // K8S initContainer in the devworkspace POD, unless the component has its - // `dedicatedPod` field set to `true`. - // - // When no `apply` command exist for a given component, - // it is assumed the component will be applied at devworkspace start - // by default, unless `deployByDefault` for that component is set to false. - // +optional - Apply *ApplyCommand `json:"apply,omitempty"` - - // Composite command that allows executing several sub-commands - // either sequentially or concurrently - // +optional - Composite *CompositeCommand `json:"composite,omitempty"` - - // Custom command whose logic is implementation-dependant - // and should be provided by the user - // possibly through some dedicated plugin - // +optional - // +devfile:overrides:include:omit=true - Custom *CustomCommand `json:"custom,omitempty"` -} - -// +devfile:getter:generate -type ExecCommand struct { - LabeledCommand `json:",inline"` - - // The actual command-line string - // - // Special variables that can be used: - // - // - `$PROJECTS_ROOT`: A path where projects sources are mounted as defined by container component's sourceMapping. - // - // - `$PROJECT_SOURCE`: A path to a project source ($PROJECTS_ROOT/). If there are multiple projects, this will point to the directory of the first one. - CommandLine string `json:"commandLine"` - - // Describes component to which given action relates - // - Component string `json:"component"` - - // Working directory where the command should be executed - // - // Special variables that can be used: - // - // - `$PROJECTS_ROOT`: A path where projects sources are mounted as defined by container component's sourceMapping. - // - // - `$PROJECT_SOURCE`: A path to a project source ($PROJECTS_ROOT/). If there are multiple projects, this will point to the directory of the first one. - // +optional - WorkingDir string `json:"workingDir,omitempty"` - - // +optional - // +patchMergeKey=name - // +patchStrategy=merge - // Optional list of environment variables that have to be set - // before running the command - Env []EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name"` - - // +optional - // Whether the command is capable to reload itself when source code changes. - // If set to `true` the command won't be restarted and it is expected to handle file changes on its own. - // - // Default value is `false` - // +devfile:default:value=false - HotReloadCapable *bool `json:"hotReloadCapable,omitempty"` -} - -type ApplyCommand struct { - LabeledCommand `json:",inline"` - - // Describes component that will be applied - // - Component string `json:"component"` -} - -// +devfile:getter:generate -type CompositeCommand struct { - LabeledCommand `json:",inline"` - - // The commands that comprise this composite command - Commands []string `json:"commands,omitempty" patchStrategy:"replace"` - - // Indicates if the sub-commands should be executed concurrently - // +optional - // +devfile:default:value=false - Parallel *bool `json:"parallel,omitempty"` -} - -type CustomCommand struct { - LabeledCommand `json:",inline"` - - // Class of command that the associated implementation component - // should use to process this command with the appropriate logic - CommandClass string `json:"commandClass"` - - // Additional free-form configuration for this custom command - // that the implementation component will know how to use - // - // +kubebuilder:pruning:PreserveUnknownFields - // +kubebuilder:validation:EmbeddedResource - EmbeddedResource runtime.RawExtension `json:"embeddedResource"` -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_container.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_container.go deleted file mode 100644 index c913ca26834..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_container.go +++ /dev/null @@ -1,122 +0,0 @@ -package v1alpha2 - -// Component that allows the developer to add a configured container into their devworkspace -type ContainerComponent struct { - BaseComponent `json:",inline"` - Container `json:",inline"` - Endpoints []Endpoint `json:"endpoints,omitempty" patchStrategy:"merge" patchMergeKey:"name"` -} - -// Annotation specifies the annotations to be added to specific resources -type Annotation struct { - // +optional - // Annotations to be added to deployment - Deployment map[string]string `json:"deployment,omitempty" patchStrategy:"merge" patchMergeKey:"name"` - - // +optional - // Annotations to be added to service - Service map[string]string `json:"service,omitempty" patchStrategy:"merge" patchMergeKey:"name"` -} - -// +devfile:getter:generate -type Container struct { - Image string `json:"image"` - - // +optional - // +patchMergeKey=name - // +patchStrategy=merge - // Environment variables used in this container. - // - // The following variables are reserved and cannot be overridden via env: - // - // - `$PROJECTS_ROOT` - // - // - `$PROJECT_SOURCE` - Env []EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name"` - - // +optional - // Annotations that should be added to specific resources for this container - Annotation *Annotation `json:"annotation,omitempty" patchStrategy:"merge" patchMergeKey:"name"` - - // +optional - // List of volumes mounts that should be mounted is this container. - VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"name"` - - // +optional - MemoryLimit string `json:"memoryLimit,omitempty"` - - // +optional - MemoryRequest string `json:"memoryRequest,omitempty"` - - // +optional - CpuLimit string `json:"cpuLimit,omitempty"` - - // +optional - CpuRequest string `json:"cpuRequest,omitempty"` - - // The command to run in the dockerimage component instead of the default one provided in the image. - // - // Defaults to an empty array, meaning use whatever is defined in the image. - // +optional - Command []string `json:"command,omitempty" patchStrategy:"replace"` - - // The arguments to supply to the command running the dockerimage component. The arguments are supplied either to the default command provided in the image or to the overridden command. - // - // Defaults to an empty array, meaning use whatever is defined in the image. - // +optional - Args []string `json:"args,omitempty" patchStrategy:"replace"` - - // Toggles whether or not the project source code should - // be mounted in the component. - // - // Defaults to true for all component types except plugins and components that set `dedicatedPod` to true. - // +optional - MountSources *bool `json:"mountSources,omitempty"` - - // Optional specification of the path in the container where - // project sources should be transferred/mounted when `mountSources` is `true`. - // When omitted, the default value of /projects is used. - // +optional - // +kubebuilder:default=/projects - SourceMapping string `json:"sourceMapping,omitempty"` - - // Specify if a container should run in its own separated pod, - // instead of running as part of the main development environment pod. - // - // Default value is `false` - // +optional - // +devfile:default:value=false - DedicatedPod *bool `json:"dedicatedPod,omitempty"` -} - -//GetMountSources returns the value of the boolean property. If it's unset, the default value is true for all component types except plugins and components that set `dedicatedPod` to true. -func (in *Container) GetMountSources() bool { - if in.MountSources != nil { - return *in.MountSources - } else { - if in.GetDedicatedPod() { - return false - } - return true - } -} - -type EnvVar struct { - Name string `json:"name" yaml:"name"` - Value string `json:"value" yaml:"value"` -} - -// Volume that should be mounted to a component container -type VolumeMount struct { - // The volume mount name is the name of an existing `Volume` component. - // If several containers mount the same volume name - // then they will reuse the same volume and will be able to access to the same files. - // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ - // +kubebuilder:validation:MaxLength=63 - Name string `json:"name"` - - // The path in the component container where the volume should be mounted. - // If not path is mentioned, default path is the is `/`. - // +optional - Path string `json:"path,omitempty"` -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_contribution.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_contribution.go deleted file mode 100644 index 2ed69da191b..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_contribution.go +++ /dev/null @@ -1,21 +0,0 @@ -package v1alpha2 - -import attributes "github.com/devfile/api/v2/pkg/attributes" - -type ComponentContribution struct { - // Mandatory name that allows referencing the component - // from other elements (such as commands) or from an external - // devfile that may reference this component through a parent or a plugin. - // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ - // +kubebuilder:validation:MaxLength=63 - Name string `json:"name"` - // Map of implementation-dependant free-form YAML attributes. - // +optional - // +kubebuilder:validation:Type=object - // +kubebuilder:pruning:PreserveUnknownFields - // +kubebuilder:validation:Schemaless - Attributes attributes.Attributes `json:"attributes,omitempty"` - // Reference to a remote Devfile or DevWorkspace resource to be included - // in the DevWorkspace. - PluginComponent `json:",inline"` -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_image.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_image.go deleted file mode 100644 index 59ac2174a1d..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_image.go +++ /dev/null @@ -1,46 +0,0 @@ -package v1alpha2 - -// ImageType describes the type of image. -// Only one of the following image type may be specified. -// +kubebuilder:validation:Enum=Dockerfile -type ImageType string - -const ( - DockerfileImageType ImageType = "Dockerfile" -) - -type BaseImage struct { -} - -// Component that allows the developer to build a runtime image for outerloop -type ImageComponent struct { - BaseComponent `json:",inline"` - Image `json:",inline"` -} - -type Image struct { - // Name of the image for the resulting outerloop build - ImageName string `json:"imageName"` - ImageUnion `json:",inline"` -} - -// +union -// +devfile:getter:generate -type ImageUnion struct { - // Type of image - // - // +unionDiscriminator - // +optional - ImageType ImageType `json:"imageType,omitempty"` - - // Allows specifying dockerfile type build - // +optional - Dockerfile *DockerfileImage `json:"dockerfile,omitempty"` - - // Defines if the image should be built during startup. - // - // Default value is `false` - // +optional - // +devfile:default:value=false - AutoBuild *bool `json:"autoBuild,omitempty"` -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_image_dockerfile.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_image_dockerfile.go deleted file mode 100644 index 87f43a83ca9..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_image_dockerfile.go +++ /dev/null @@ -1,83 +0,0 @@ -package v1alpha2 - -// DockerfileSrcType describes the type of -// the src for the Dockerfile outerloop build. -// Only one of the following location type may be specified. -// +kubebuilder:validation:Enum=Uri;DevfileRegistry;Git -type DockerfileSrcType string - -const ( - UriLikeDockerfileSrcType DockerfileSrcType = "Uri" - DevfileRegistryLikeDockerfileSrcType DockerfileSrcType = "DevfileRegistry" - GitLikeDockerfileSrcType DockerfileSrcType = "Git" -) - -// Dockerfile Image type to specify the outerloop build using a Dockerfile -type DockerfileImage struct { - BaseImage `json:",inline"` - DockerfileSrc `json:",inline"` - Dockerfile `json:",inline"` -} - -// +union -type DockerfileSrc struct { - // Type of Dockerfile src - // + - // +unionDiscriminator - // +optional - SrcType DockerfileSrcType `json:"srcType,omitempty"` - - // URI Reference of a Dockerfile. - // It can be a full URL or a relative URI from the current devfile as the base URI. - // +optional - Uri string `json:"uri,omitempty"` - - // Dockerfile's Devfile Registry source - // +optional - DevfileRegistry *DockerfileDevfileRegistrySource `json:"devfileRegistry,omitempty"` - - // Dockerfile's Git source - // +optional - Git *DockerfileGitProjectSource `json:"git,omitempty"` -} - -// +devfile:getter:generate -type Dockerfile struct { - // Path of source directory to establish build context. Defaults to ${PROJECT_SOURCE} in the container - // +optional - BuildContext string `json:"buildContext,omitempty"` - - // The arguments to supply to the dockerfile build. - // +optional - Args []string `json:"args,omitempty" patchStrategy:"replace"` - - // Specify if a privileged builder pod is required. - // - // Default value is `false` - // +optional - // +devfile:default:value=false - RootRequired *bool `json:"rootRequired,omitempty"` -} - -type DockerfileDevfileRegistrySource struct { - // Id in a devfile registry that contains a Dockerfile. The src in the OCI registry - // required for the Dockerfile build will be downloaded for building the image. - Id string `json:"id"` - - // Devfile Registry URL to pull the Dockerfile from when using the Devfile Registry as Dockerfile src. - // To ensure the Dockerfile gets resolved consistently in different environments, - // it is recommended to always specify the `devfileRegistryUrl` when `Id` is used. - // +optional - RegistryUrl string `json:"registryUrl,omitempty"` -} - -type DockerfileGitProjectSource struct { - // Git src for the Dockerfile build. The src required for the Dockerfile build will need to be - // cloned for building the image. - GitProjectSource `json:",inline"` - - // Location of the Dockerfile in the Git repository when using git as Dockerfile src. - // Defaults to Dockerfile. - // +optional - FileLocation string `json:"fileLocation,omitempty"` -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_kubernetes_like.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_kubernetes_like.go deleted file mode 100644 index 941900a8596..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_kubernetes_like.go +++ /dev/null @@ -1,54 +0,0 @@ -package v1alpha2 - -// K8sLikeComponentLocationType describes the type of -// the location the configuration is fetched from. -// Only one of the following component type may be specified. -// +kubebuilder:validation:Enum=Uri;Inlined -type K8sLikeComponentLocationType string - -const ( - UriK8sLikeComponentLocationType K8sLikeComponentLocationType = "Uri" - InlinedK8sLikeComponentLocationType K8sLikeComponentLocationType = "Inlined" -) - -// +union -type K8sLikeComponentLocation struct { - // Type of Kubernetes-like location - // + - // +unionDiscriminator - // +optional - LocationType K8sLikeComponentLocationType `json:"locationType,omitempty"` - - // Location in a file fetched from a uri. - // +optional - Uri string `json:"uri,omitempty"` - - // Inlined manifest - // +optional - Inlined string `json:"inlined,omitempty"` -} - -// +devfile:getter:generate -type K8sLikeComponent struct { - BaseComponent `json:",inline"` - K8sLikeComponentLocation `json:",inline"` - - // Defines if the component should be deployed during startup. - // - // Default value is `false` - // +optional - // +devfile:default:value=false - DeployByDefault *bool `json:"deployByDefault,omitempty"` - - Endpoints []Endpoint `json:"endpoints,omitempty" patchStrategy:"merge" patchMergeKey:"name"` -} - -// Component that allows partly importing Kubernetes resources into the devworkspace POD -type KubernetesComponent struct { - K8sLikeComponent `json:",inline"` -} - -// Component that allows partly importing Openshift resources into the devworkspace POD -type OpenshiftComponent struct { - K8sLikeComponent `json:",inline"` -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_plugin.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_plugin.go deleted file mode 100644 index 68933dd7184..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_plugin.go +++ /dev/null @@ -1,7 +0,0 @@ -package v1alpha2 - -type PluginComponent struct { - BaseComponent `json:",inline"` - ImportReference `json:",inline"` - PluginOverrides `json:",inline"` -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_volume.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_volume.go deleted file mode 100644 index c3bd6928039..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/component_volume.go +++ /dev/null @@ -1,21 +0,0 @@ -package v1alpha2 - -// Component that allows the developer to declare and configure a volume into their devworkspace -type VolumeComponent struct { - BaseComponent `json:",inline"` - Volume `json:",inline"` -} - -// Volume that should be mounted to a component container -// +devfile:getter:generate -type Volume struct { - // +optional - // Size of the volume - Size string `json:"size,omitempty"` - - // +optional - // Ephemeral volumes are not stored persistently across restarts. Defaults - // to false - // +devfile:default:value=false - Ephemeral *bool `json:"ephemeral,omitempty"` -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/components.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/components.go deleted file mode 100644 index 9356aa65522..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/components.go +++ /dev/null @@ -1,109 +0,0 @@ -package v1alpha2 - -import ( - attributes "github.com/devfile/api/v2/pkg/attributes" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// ComponentType describes the type of component. -// Only one of the following component type may be specified. -// +kubebuilder:validation:Enum=Container;Kubernetes;Openshift;Volume;Image;Plugin;Custom -type ComponentType string - -const ( - ContainerComponentType ComponentType = "Container" - KubernetesComponentType ComponentType = "Kubernetes" - OpenshiftComponentType ComponentType = "Openshift" - PluginComponentType ComponentType = "Plugin" - VolumeComponentType ComponentType = "Volume" - ImageComponentType ComponentType = "Image" - CustomComponentType ComponentType = "Custom" -) - -// DevWorkspace component: Anything that will bring additional features / tooling / behaviour / context -// to the devworkspace, in order to make working in it easier. -type BaseComponent struct { -} - -//+k8s:openapi-gen=true -type Component struct { - // Mandatory name that allows referencing the component - // from other elements (such as commands) or from an external - // devfile that may reference this component through a parent or a plugin. - // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ - // +kubebuilder:validation:MaxLength=63 - Name string `json:"name"` - // Map of implementation-dependant free-form YAML attributes. - // +optional - // +kubebuilder:validation:Type=object - // +kubebuilder:pruning:PreserveUnknownFields - // +kubebuilder:validation:Schemaless - Attributes attributes.Attributes `json:"attributes,omitempty"` - ComponentUnion `json:",inline"` -} - -// +union -type ComponentUnion struct { - // Type of component - // - // +unionDiscriminator - // +optional - ComponentType ComponentType `json:"componentType,omitempty"` - - // Allows adding and configuring devworkspace-related containers - // +optional - Container *ContainerComponent `json:"container,omitempty"` - - // Allows importing into the devworkspace the Kubernetes resources - // defined in a given manifest. For example this allows reusing the Kubernetes - // definitions used to deploy some runtime components in production. - // - // +optional - Kubernetes *KubernetesComponent `json:"kubernetes,omitempty"` - - // Allows importing into the devworkspace the OpenShift resources - // defined in a given manifest. For example this allows reusing the OpenShift - // definitions used to deploy some runtime components in production. - // - // +optional - Openshift *OpenshiftComponent `json:"openshift,omitempty"` - - // Allows specifying the definition of a volume - // shared by several other components - // +optional - Volume *VolumeComponent `json:"volume,omitempty"` - - // Allows specifying the definition of an image for outer loop builds - // +optional - Image *ImageComponent `json:"image,omitempty"` - - // Allows importing a plugin. - // - // Plugins are mainly imported devfiles that contribute components, commands - // and events as a consistent single unit. They are defined in either YAML files - // following the devfile syntax, - // or as `DevWorkspaceTemplate` Kubernetes Custom Resources - // +optional - // +devfile:overrides:include:omitInPlugin=true - Plugin *PluginComponent `json:"plugin,omitempty"` - - // Custom component whose logic is implementation-dependant - // and should be provided by the user - // possibly through some dedicated controller - // +optional - // +devfile:overrides:include:omit=true - Custom *CustomComponent `json:"custom,omitempty"` -} - -type CustomComponent struct { - // Class of component that the associated implementation controller - // should use to process this command with the appropriate logic - ComponentClass string `json:"componentClass"` - - // Additional free-form configuration for this custom component - // that the implementation controller will know how to use - // - // +kubebuilder:pruning:PreserveUnknownFields - // +kubebuilder:validation:EmbeddedResource - EmbeddedResource runtime.RawExtension `json:"embeddedResource"` -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devfile.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devfile.go deleted file mode 100644 index 4785ce11b03..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devfile.go +++ /dev/null @@ -1,14 +0,0 @@ -package v1alpha2 - -import ( - "github.com/devfile/api/v2/pkg/devfile" -) - -// Devfile describes the structure of a cloud-native devworkspace and development environment. -// +k8s:deepcopy-gen=false -// +devfile:jsonschema:generate:omitCustomUnionMembers=true,omitPluginUnionMembers=true,shortenEndpointNameLength=true -type Devfile struct { - devfile.DevfileHeader `json:",inline"` - - DevWorkspaceTemplateSpec `json:",inline"` -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devworkspace_conversion.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devworkspace_conversion.go deleted file mode 100644 index 03731eb1e81..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devworkspace_conversion.go +++ /dev/null @@ -1,4 +0,0 @@ -package v1alpha2 - -// Hub marks this type as a conversion hub. -func (*DevWorkspace) Hub() {} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devworkspace_types.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devworkspace_types.go deleted file mode 100644 index c8a500a0822..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devworkspace_types.go +++ /dev/null @@ -1,98 +0,0 @@ -package v1alpha2 - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// DevWorkspaceSpec defines the desired state of DevWorkspace -type DevWorkspaceSpec struct { - Started bool `json:"started"` - RoutingClass string `json:"routingClass,omitempty"` - Template DevWorkspaceTemplateSpec `json:"template,omitempty"` - Contributions []ComponentContribution `json:"contributions,omitempty"` -} - -// DevWorkspaceStatus defines the observed state of DevWorkspace -type DevWorkspaceStatus struct { - // Id of the DevWorkspace - DevWorkspaceId string `json:"devworkspaceId"` - // Main URL for this DevWorkspace - MainUrl string `json:"mainUrl,omitempty"` - Phase DevWorkspacePhase `json:"phase,omitempty"` - // Conditions represent the latest available observations of an object's state - Conditions []DevWorkspaceCondition `json:"conditions,omitempty"` - // Message is a short user-readable message giving additional information - // about an object's state - Message string `json:"message,omitempty"` -} - -type DevWorkspacePhase string - -// Valid devworkspace Statuses -const ( - DevWorkspaceStatusStarting DevWorkspacePhase = "Starting" - DevWorkspaceStatusRunning DevWorkspacePhase = "Running" - DevWorkspaceStatusStopped DevWorkspacePhase = "Stopped" - DevWorkspaceStatusStopping DevWorkspacePhase = "Stopping" - DevWorkspaceStatusFailed DevWorkspacePhase = "Failed" - DevWorkspaceStatusError DevWorkspacePhase = "Error" -) - -// DevWorkspaceCondition contains details for the current condition of this devworkspace. -type DevWorkspaceCondition struct { - // Type is the type of the condition. - Type DevWorkspaceConditionType `json:"type"` - // Phase is the status of the condition. - // Can be True, False, Unknown. - Status corev1.ConditionStatus `json:"status"` - // Last time the condition transitioned from one status to another. - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` - // Unique, one-word, CamelCase reason for the condition's last transition. - Reason string `json:"reason,omitempty"` - // Human-readable message indicating details about last transition. - Message string `json:"message,omitempty"` -} - -// Types of conditions reported by devworkspace -type DevWorkspaceConditionType string - -const ( - DevWorkspaceComponentsReady DevWorkspaceConditionType = "ComponentsReady" - DevWorkspaceRoutingReady DevWorkspaceConditionType = "RoutingReady" - DevWorkspaceServiceAccountReady DevWorkspaceConditionType = "ServiceAccountReady" - DevWorkspaceReady DevWorkspaceConditionType = "Ready" - DevWorkspaceFailedStart DevWorkspaceConditionType = "FailedStart" - DevWorkspaceError DevWorkspaceConditionType = "Error" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// DevWorkspace is the Schema for the devworkspaces API -// +kubebuilder:subresource:status -// +kubebuilder:resource:path=devworkspaces,scope=Namespaced,shortName=dw -// +kubebuilder:printcolumn:name="DevWorkspace ID",type="string",JSONPath=".status.devworkspaceId",description="The devworkspace's unique id" -// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="The current devworkspace startup phase" -// +kubebuilder:printcolumn:name="Info",type="string",JSONPath=".status.message",description="Additional information about the devworkspace" -// +devfile:jsonschema:generate -// +kubebuilder:storageversion -type DevWorkspace struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec DevWorkspaceSpec `json:"spec,omitempty"` - Status DevWorkspaceStatus `json:"status,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// DevWorkspaceList contains a list of DevWorkspace -type DevWorkspaceList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []DevWorkspace `json:"items"` -} - -func init() { - SchemeBuilder.Register(&DevWorkspace{}, &DevWorkspaceList{}) -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devworkspacetemplate_conversion.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devworkspacetemplate_conversion.go deleted file mode 100644 index fe889a1b4f8..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devworkspacetemplate_conversion.go +++ /dev/null @@ -1,4 +0,0 @@ -package v1alpha2 - -// Hub marks this type as a conversion hub. -func (*DevWorkspaceTemplate) Hub() {} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devworkspacetemplate_spec.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devworkspacetemplate_spec.go deleted file mode 100644 index f398cc9dddd..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devworkspacetemplate_spec.go +++ /dev/null @@ -1,79 +0,0 @@ -package v1alpha2 - -import attributes "github.com/devfile/api/v2/pkg/attributes" - -// Structure of the devworkspace. This is also the specification of a devworkspace template. -// +devfile:jsonschema:generate -type DevWorkspaceTemplateSpec struct { - // Parent devworkspace template - // +optional - Parent *Parent `json:"parent,omitempty"` - - DevWorkspaceTemplateSpecContent `json:",inline"` -} - -// +devfile:overrides:generate -type DevWorkspaceTemplateSpecContent struct { - // Map of key-value variables used for string replacement in the devfile. Values can be referenced via {{variable-key}} - // to replace the corresponding value in string fields in the devfile. Replacement cannot be used for - // - // - schemaVersion, metadata, parent source - // - // - element identifiers, e.g. command id, component name, endpoint name, project name - // - // - references to identifiers, e.g. in events, a command's component, container's volume mount name - // - // - string enums, e.g. command group kind, endpoint exposure - // +optional - // +patchStrategy=merge - // +devfile:overrides:include:omitInPlugin=true,description=Overrides of variables encapsulated in a parent devfile. - Variables map[string]string `json:"variables,omitempty" patchStrategy:"merge"` - - // Map of implementation-dependant free-form YAML attributes. - // +optional - // +patchStrategy=merge - // +devfile:overrides:include:omitInPlugin=true,description=Overrides of attributes encapsulated in a parent devfile. - // +kubebuilder:validation:Type=object - // +kubebuilder:pruning:PreserveUnknownFields - // +kubebuilder:validation:Schemaless - Attributes attributes.Attributes `json:"attributes,omitempty" patchStrategy:"merge"` - - // List of the devworkspace components, such as editor and plugins, - // user-provided containers, or other types of components - // +optional - // +patchMergeKey=name - // +patchStrategy=merge - // +devfile:overrides:include:description=Overrides of components encapsulated in a parent devfile or a plugin. - // +devfile:toplevellist - Components []Component `json:"components,omitempty" patchStrategy:"merge" patchMergeKey:"name"` - - // Projects worked on in the devworkspace, containing names and sources locations - // +optional - // +patchMergeKey=name - // +patchStrategy=merge - // +devfile:overrides:include:omitInPlugin=true,description=Overrides of projects encapsulated in a parent devfile. - // +devfile:toplevellist - Projects []Project `json:"projects,omitempty" patchStrategy:"merge" patchMergeKey:"name"` - - // StarterProjects is a project that can be used as a starting point when bootstrapping new projects - // +optional - // +patchMergeKey=name - // +patchStrategy=merge - // +devfile:overrides:include:omitInPlugin=true,description=Overrides of starterProjects encapsulated in a parent devfile. - // +devfile:toplevellist - StarterProjects []StarterProject `json:"starterProjects,omitempty" patchStrategy:"merge" patchMergeKey:"name"` - - // Predefined, ready-to-use, devworkspace-related commands - // +optional - // +patchMergeKey=id - // +patchStrategy=merge - // +devfile:overrides:include:description=Overrides of commands encapsulated in a parent devfile or a plugin. - // +devfile:toplevellist - Commands []Command `json:"commands,omitempty" patchStrategy:"merge" patchMergeKey:"id"` - - // Bindings of commands to events. - // Each command is referred-to by its name. - // +optional - // +devfile:overrides:include:omit=true - Events *Events `json:"events,omitempty"` -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devworkspacetemplate_types.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devworkspacetemplate_types.go deleted file mode 100644 index e7c31813e0b..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/devworkspacetemplate_types.go +++ /dev/null @@ -1,31 +0,0 @@ -package v1alpha2 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// DevWorkspaceTemplate is the Schema for the devworkspacetemplates API -// +kubebuilder:resource:path=devworkspacetemplates,scope=Namespaced,shortName=dwt -// +devfile:jsonschema:generate -// +kubebuilder:storageversion -type DevWorkspaceTemplate struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec DevWorkspaceTemplateSpec `json:"spec,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// DevWorkspaceTemplateList contains a list of DevWorkspaceTemplate -type DevWorkspaceTemplateList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []DevWorkspaceTemplate `json:"items"` -} - -func init() { - SchemeBuilder.Register(&DevWorkspaceTemplate{}, &DevWorkspaceTemplateList{}) -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/doc.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/doc.go deleted file mode 100644 index e6e04397ec0..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Package v1alpha2 contains API Schema definitions for the org v1alpha2 API group -// +k8s:deepcopy-gen=package,register -// +k8s:openapi-gen=true -// +groupName=workspace.devfile.io -// +devfile:jsonschema:version=2.2.0 -package v1alpha2 diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/endpoint.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/endpoint.go deleted file mode 100644 index 92f7f9e14d1..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/endpoint.go +++ /dev/null @@ -1,121 +0,0 @@ -package v1alpha2 - -import "github.com/devfile/api/v2/pkg/attributes" - -// EndpointProtocol defines the application and transport protocols of the traffic that will go through this endpoint. -// Only one of the following protocols may be specified: http, ws, tcp, udp. -// +kubebuilder:validation:Enum=http;https;ws;wss;tcp;udp -type EndpointProtocol string - -const ( - // Endpoint will have `http` traffic, typically on a TCP connection. - // It will be automaticaly promoted to `https` when the `secure` field is set to `true` - HTTPEndpointProtocol EndpointProtocol = "http" - // Endpoint will have `https` traffic, typically on a TCP connection - HTTPSEndpointProtocol EndpointProtocol = "https" - // Endpoint will have `ws` traffic, typically on a TCP connection - // It will be automaticaly promoted to `wss` when the `secure` field is set to `true` - WSEndpointProtocol EndpointProtocol = "ws" - // Endpoint will have `wss` traffic, typically on a TCP connection - WSSEndpointProtocol EndpointProtocol = "wss" - // Endpoint will have traffic on a TCP connection, - // without specifying an application protocol - TCPEndpointProtocol EndpointProtocol = "tcp" - // Endpoint will have traffic on an UDP connection, - // without specifying an application protocol - UDPEndpointProtocol EndpointProtocol = "udp" -) - -// EndpointExposure describes the way an endpoint is exposed on the network. -// Only one of the following exposures may be specified: public, internal, none. -// +kubebuilder:validation:Enum=public;internal;none -type EndpointExposure string - -const ( - // Endpoint will be exposed on the public network, typically through - // a K8S ingress or an OpenShift route - PublicEndpointExposure EndpointExposure = "public" - // Endpoint will be exposed internally outside of the main devworkspace POD, - // typically by K8S services, to be consumed by other elements running - // on the same cloud internal network. - InternalEndpointExposure EndpointExposure = "internal" - // Endpoint will not be exposed and will only be accessible - // inside the main devworkspace POD, on a local address. - NoneEndpointExposure EndpointExposure = "none" -) - -// +devfile:getter:generate -type Endpoint struct { - // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ - // +kubebuilder:validation:MaxLength=63 - Name string `json:"name"` - - // Port number to be used within the container component. The same port cannot - // be used by two different container components. - TargetPort int `json:"targetPort"` - - // Describes how the endpoint should be exposed on the network. - // - // - `public` means that the endpoint will be exposed on the public network, typically through - // a K8S ingress or an OpenShift route. - // - // - `internal` means that the endpoint will be exposed internally outside of the main devworkspace POD, - // typically by K8S services, to be consumed by other elements running - // on the same cloud internal network. - // - // - `none` means that the endpoint will not be exposed and will only be accessible - // inside the main devworkspace POD, on a local address. - // - // Default value is `public` - // +optional - // +kubebuilder:default=public - Exposure EndpointExposure `json:"exposure,omitempty"` - - // Describes the application and transport protocols of the traffic that will go through this endpoint. - // - // - `http`: Endpoint will have `http` traffic, typically on a TCP connection. - // It will be automaticaly promoted to `https` when the `secure` field is set to `true`. - // - // - `https`: Endpoint will have `https` traffic, typically on a TCP connection. - // - // - `ws`: Endpoint will have `ws` traffic, typically on a TCP connection. - // It will be automaticaly promoted to `wss` when the `secure` field is set to `true`. - // - // - `wss`: Endpoint will have `wss` traffic, typically on a TCP connection. - // - // - `tcp`: Endpoint will have traffic on a TCP connection, without specifying an application protocol. - // - // - `udp`: Endpoint will have traffic on an UDP connection, without specifying an application protocol. - // - // Default value is `http` - // +optional - // +kubebuilder:default=http - Protocol EndpointProtocol `json:"protocol,omitempty"` - - // Describes whether the endpoint should be secured and protected by some - // authentication process. This requires a protocol of `https` or `wss`. - // +optional - // +devfile:default:value=false - Secure *bool `json:"secure,omitempty"` - - // Path of the endpoint URL - // +optional - Path string `json:"path,omitempty"` - - // Map of implementation-dependant string-based free-form attributes. - // - // Examples of Che-specific attributes: - // - // - cookiesAuthEnabled: "true" / "false", - // - // - type: "terminal" / "ide" / "ide-dev", - // +optional - // +kubebuilder:validation:Type=object - // +kubebuilder:pruning:PreserveUnknownFields - // +kubebuilder:validation:Schemaless - Attributes attributes.Attributes `json:"attributes,omitempty"` - - // +optional - // Annotations to be added to Kubernetes Ingress or Openshift Route - Annotations map[string]string `json:"annotation,omitempty" patchStrategy:"merge" patchMergeKey:"name"` -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/events.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/events.go deleted file mode 100644 index 2a8bd91a584..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/events.go +++ /dev/null @@ -1,26 +0,0 @@ -package v1alpha2 - -type Events struct { - DevWorkspaceEvents `json:",inline"` -} - -type DevWorkspaceEvents struct { - // IDs of commands that should be executed before the devworkspace start. - // Kubernetes-wise, these commands would typically be executed in init containers of the devworkspace POD. - // +optional - PreStart []string `json:"preStart,omitempty"` - - // IDs of commands that should be executed after the devworkspace is completely started. - // In the case of Che-Theia, these commands should be executed after all plugins and extensions have started, including project cloning. - // This means that those commands are not triggered until the user opens the IDE in his browser. - // +optional - PostStart []string `json:"postStart,omitempty"` - - // +optional - // IDs of commands that should be executed before stopping the devworkspace. - PreStop []string `json:"preStop,omitempty"` - - // +optional - // IDs of commands that should be executed after stopping the devworkspace. - PostStop []string `json:"postStop,omitempty"` -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/import_reference.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/import_reference.go deleted file mode 100644 index 65414c10aa9..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/import_reference.go +++ /dev/null @@ -1,61 +0,0 @@ -package v1alpha2 - -// ImportReferenceType describes the type of location -// from where the referenced template structure should be retrieved. -// Only one of the following parent locations may be specified. -// +kubebuilder:validation:Enum=Uri;Id;Kubernetes -type ImportReferenceType string - -const ( - UriImportReferenceType ImportReferenceType = "Uri" - IdImportReferenceType ImportReferenceType = "Id" - KubernetesImportReferenceType ImportReferenceType = "Kubernetes" -) - -// Location from where the an import reference is retrieved -// +union -type ImportReferenceUnion struct { - // type of location from where the referenced template structure should be retrieved - // + - // +unionDiscriminator - // +optional - ImportReferenceType ImportReferenceType `json:"importReferenceType,omitempty"` - - // URI Reference of a parent devfile YAML file. - // It can be a full URL or a relative URI with the current devfile as the base URI. - // +optional - Uri string `json:"uri,omitempty"` - - // Id in a registry that contains a Devfile yaml file - // +optional - Id string `json:"id,omitempty"` - - // Reference to a Kubernetes CRD of type DevWorkspaceTemplate - // +optional - Kubernetes *KubernetesCustomResourceImportReference `json:"kubernetes,omitempty"` -} - -type KubernetesCustomResourceImportReference struct { - Name string `json:"name"` - - // +optional - Namespace string `json:"namespace,omitempty"` -} - -type ImportReference struct { - ImportReferenceUnion `json:",inline"` - - // Registry URL to pull the parent devfile from when using id in the parent reference. - // To ensure the parent devfile gets resolved consistently in different environments, - // it is recommended to always specify the `registryUrl` when `id` is used. - // +optional - RegistryUrl string `json:"registryUrl,omitempty"` - - // Specific stack/sample version to pull the parent devfile from, when using id in the parent reference. - // To specify `version`, `id` must be defined and used as the import reference source. - // `version` can be either a specific stack version, or `latest`. - // If no `version` specified, default version will be used. - // +optional - // +kubebuilder:validation:Pattern=^(latest)|(([1-9])\.([0-9]+)\.([0-9]+)(\-[0-9a-z-]+(\.[0-9a-z-]+)*)?(\+[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?)$ - Version string `json:"version,omitempty"` -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/keyed.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/keyed.go deleted file mode 100644 index 81182efc7eb..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/keyed.go +++ /dev/null @@ -1,51 +0,0 @@ -package v1alpha2 - -// Keyed is expected to be implemented by the elements of the devfile top-level lists -// (such as Command, Component, Project, ...). -// -// The Keys of list objects will typically be used to merge the top-level lists -// according to strategic merge patch rules, during parent or plugin overriding. -// +k8s:deepcopy-gen=false -type Keyed interface { - // Key is a string that allows uniquely identifying the object, - // especially in the Devfile top-level lists that are map-like K8S-compatible lists. - Key() string -} - -// KeyedList is a list of object that are uniquely identified by a Key -// The devfile top-level list (such as Commands, Components, Projects, ...) -// are examples of such lists of Keyed objects -// +k8s:deepcopy-gen=false -type KeyedList []Keyed - -// GetKeys converts a KeyedList into a slice of string by calling Key() on each -// element in the list. -func (l KeyedList) GetKeys() []string { - var res []string - for _, keyed := range l { - res = append(res, keyed.Key()) - } - return res -} - -// TopLevelLists is a map that contains several Devfile top-level lists -// (such as `Commands`, `Components`, `Projects`, ...), available as `KeyedList`s. -// -// Each key of this map is the name of the field that contains the given top-level list: -// `Commands`, `Components`, etc... -// +k8s:deepcopy-gen=false -type TopLevelLists map[string]KeyedList - -// TopLevelListContainer is an interface that allows retrieving the devfile top-level lists -// from an object. -// Main implementor of this interface will be the `DevWorkspaceTemplateSpecContent`, which -// will return all its devfile top-level lists. -// -// But this will also be implemented by `Overrides` which may return less top-level lists -// the `DevWorkspaceTemplateSpecContent`, according to the top-level lists they can override. -// `PluginOverride` will not return the `Projects` and `StarterProjects` list, since plugins are -// not expected to override `projects` or `starterProjects` -// +k8s:deepcopy-gen=false -type TopLevelListContainer interface { - GetToplevelLists() TopLevelLists -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/keyed_implementations.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/keyed_implementations.go deleted file mode 100644 index 5121f4ff139..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/keyed_implementations.go +++ /dev/null @@ -1,41 +0,0 @@ -package v1alpha2 - -import ( - "fmt" - "reflect" -) - -func extractKeys(keyedList interface{}) []Keyed { - value := reflect.ValueOf(keyedList) - keys := make([]Keyed, 0, value.Len()) - for i := 0; i < value.Len(); i++ { - elem := value.Index(i) - if elem.CanInterface() { - i := elem.Interface() - if keyed, ok := i.(Keyed); ok { - keys = append(keys, keyed) - } - } - } - return keys -} - -// CheckDuplicateKeys checks if duplicate keys are present in the devfile objects -func CheckDuplicateKeys(keyedList interface{}) error { - seen := map[string]bool{} - value := reflect.ValueOf(keyedList) - for i := 0; i < value.Len(); i++ { - elem := value.Index(i) - if elem.CanInterface() { - i := elem.Interface() - if keyed, ok := i.(Keyed); ok { - key := keyed.Key() - if seen[key] { - return fmt.Errorf("duplicate key: %s", key) - } - seen[key] = true - } - } - } - return nil -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/override_directives.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/override_directives.go deleted file mode 100644 index 280ea367c10..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/override_directives.go +++ /dev/null @@ -1,58 +0,0 @@ -package v1alpha2 - -// +kubebuilder:validation:Enum=replace;delete -type OverridingPatchDirective string - -const ( - ReplaceOverridingDirective OverridingPatchDirective = "replace" - DeleteOverridingDirective OverridingPatchDirective = "delete" -) - -const ( - DeleteFromPrimitiveListOverridingPatchDirective OverridingPatchDirective = "replace" -) - -type OverrideDirective struct { - // Path of the element the directive should be applied on - // - // For the following path tree: - // - // ```json - // commands: - // - exec - // id: commandId - // ``` - // - // the path would be: `commands["commandId"]`. - Path string `json:"path"` - - // `$Patch` directlive as defined in - // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-api-machinery/strategic-merge-patch.md#basic-patch-format - // - // This is an enumeration that allows the following values: - // - // - *replace*: indicates that the element matched by the `jsonPath` field should be replaced instead of being merged. - // - // - *delete*: indicates that the element matched by the `jsonPath` field should be deleted. - // - // +optional - Patch OverridingPatchDirective `json:"patch,omitempty"` - - // `DeleteFromPrimitiveList` directive as defined in - // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-api-machinery/strategic-merge-patch.md#deletefromprimitivelist-directive - // - // This indicates that the elements in this list should be deleted from the original primitive list. - // The original primitive list is the element matched by the `jsonPath` field. - // +optional - DeleteFromPrimitiveList []string `json:"deleteFromPrimitiveList,omitempty"` - - // `SetElementOrder` directive as defined in - // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-api-machinery/strategic-merge-patch.md#deletefromprimitivelist-directive - // - // This provides a way to specify the order of a list. The relative order specified in this directive will be retained. - // The list whose order is controller is the element matched by the `jsonPath` field. - // If the controller list is a list of objects, then the values in this list should be - // the merge keys of the objects to order. - // +optional - SetElementOrder []string `json:"setElementOrder,omitempty"` -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/overrides.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/overrides.go deleted file mode 100644 index 69ad5b6e3c1..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/overrides.go +++ /dev/null @@ -1,11 +0,0 @@ -package v1alpha2 - -// +k8s:deepcopy-gen=false -type Overrides interface { - TopLevelListContainer - isOverride() -} - -// OverridesBase is used in the Overrides generator in order to provide a common base for the generated Overrides -// So please be careful when renaming -type OverridesBase struct{} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/parent.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/parent.go deleted file mode 100644 index 69ebaa6b2c6..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/parent.go +++ /dev/null @@ -1,6 +0,0 @@ -package v1alpha2 - -type Parent struct { - ImportReference `json:",inline"` - ParentOverrides `json:",inline"` -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/projects.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/projects.go deleted file mode 100644 index 9348d4d685f..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/projects.go +++ /dev/null @@ -1,129 +0,0 @@ -package v1alpha2 - -import ( - attributes "github.com/devfile/api/v2/pkg/attributes" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -type Project struct { - // Project name - // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ - // +kubebuilder:validation:MaxLength=63 - Name string `json:"name"` - - // Map of implementation-dependant free-form YAML attributes. - // +optional - // +kubebuilder:validation:Type=object - // +kubebuilder:pruning:PreserveUnknownFields - // +kubebuilder:validation:Schemaless - Attributes attributes.Attributes `json:"attributes,omitempty"` - - // Path relative to the root of the projects to which this project should be cloned into. This is a unix-style relative path (i.e. uses forward slashes). The path is invalid if it is absolute or tries to escape the project root through the usage of '..'. If not specified, defaults to the project name. - // +optional - ClonePath string `json:"clonePath,omitempty"` - - ProjectSource `json:",inline"` -} - -type StarterProject struct { - // Project name - // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ - // +kubebuilder:validation:MaxLength=63 - Name string `json:"name"` - - // Map of implementation-dependant free-form YAML attributes. - // +optional - // +kubebuilder:validation:Type=object - // +kubebuilder:pruning:PreserveUnknownFields - // +kubebuilder:validation:Schemaless - Attributes attributes.Attributes `json:"attributes,omitempty"` - - // Description of a starter project - // +optional - Description string `json:"description,omitempty"` - - // Sub-directory from a starter project to be used as root for starter project. - // +optional - SubDir string `json:"subDir,omitempty"` - - ProjectSource `json:",inline"` -} - -// ProjectSourceType describes the type of Project sources. -// Only one of the following project sources may be specified. -// If none of the following policies is specified, the default one -// is AllowConcurrent. -// +kubebuilder:validation:Enum=Git;Zip;Custom -type ProjectSourceType string - -const ( - GitProjectSourceType ProjectSourceType = "Git" - ZipProjectSourceType ProjectSourceType = "Zip" - CustomProjectSourceType ProjectSourceType = "Custom" -) - -// +union -type ProjectSource struct { - // Type of project source - // + - // +unionDiscriminator - // +optional - SourceType ProjectSourceType `json:"sourceType,omitempty"` - - // Project's Git source - // +optional - Git *GitProjectSource `json:"git,omitempty"` - - // Project's Zip source - // +optional - Zip *ZipProjectSource `json:"zip,omitempty"` - - // Project's Custom source - // +optional - // +devfile:overrides:include:omit=true - Custom *CustomProjectSource `json:"custom,omitempty"` -} - -type CommonProjectSource struct { -} - -type CustomProjectSource struct { - ProjectSourceClass string `json:"projectSourceClass"` - // +kubebuilder:pruning:PreserveUnknownFields - // +kubebuilder:validation:EmbeddedResource - EmbeddedResource runtime.RawExtension `json:"embeddedResource"` -} - -type ZipProjectSource struct { - CommonProjectSource `json:",inline"` - - // Zip project's source location address. Should be file path of the archive, e.g. file://$FILE_PATH - // +required - Location string `json:"location,omitempty"` -} - -type GitLikeProjectSource struct { - CommonProjectSource `json:",inline"` - - // Defines from what the project should be checked out. Required if there are more than one remote configured - // +optional - CheckoutFrom *CheckoutFrom `json:"checkoutFrom,omitempty"` - - // The remotes map which should be initialized in the git project. - // Projects must have at least one remote configured while StarterProjects & Image Component's Git source can only have at most one remote configured. - Remotes map[string]string `json:"remotes"` -} - -type CheckoutFrom struct { - // The revision to checkout from. Should be branch name, tag or commit id. - // Default branch is used if missing or specified revision is not found. - // +optional - Revision string `json:"revision,omitempty"` - // The remote name should be used as init. Required if there are more than one remote configured - // +optional - Remote string `json:"remote,omitempty"` -} - -type GitProjectSource struct { - GitLikeProjectSource `json:",inline"` -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/register.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/register.go deleted file mode 100644 index e83c8de584c..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/register.go +++ /dev/null @@ -1,22 +0,0 @@ -// NOTE: Boilerplate only. Ignore this file. - -// Package v1alpha2 contains API Schema definitions for the org v1alpha2 API group -// +k8s:deepcopy-gen=package,register -// +groupName=workspace.devfile.io -package v1alpha2 - -import ( - "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/scheme" -) - -var ( - // SchemeGroupVersion is group version used to register these objects - SchemeGroupVersion = schema.GroupVersion{Group: "workspace.devfile.io", Version: "v1alpha2"} - - // SchemeBuilder is used to add go types to the GroupVersionKind scheme - SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} - - // AddToScheme adds the types in this group-version to the given scheme. - AddToScheme = SchemeBuilder.AddToScheme -) diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/union.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/union.go deleted file mode 100644 index 34fb138c9ac..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/union.go +++ /dev/null @@ -1,21 +0,0 @@ -package v1alpha2 - -// +k8s:deepcopy-gen=false - -// Union is an interface that allows managing structs defined as -// Kubernetes unions with discriminators, according to the following KEP: -// https://github.com/kubernetes/enhancements/blob/master/keps/sig-api-machinery/20190325-unions.md -type Union interface { - discriminator() *string - - // Normalize allows normalizing the union, according to the following rules: - // - When only one field of the union is set and no discriminator is set, set the discriminator according to the union value. - // - When several fields are set and a discrimnator is set, remove (== reset to zero value) all the values that do not match the discriminator. - // - When only one union value is set and it matches discriminator, just do nothing. - // - In other case, something is inconsistent or ambiguous: an error is thrown. - Normalize() error - - // Simplify allows removing the union discriminator, - // but only after normalizing it if necessary. - Simplify() -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/union_implementation.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/union_implementation.go deleted file mode 100644 index d82ba4f261f..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/union_implementation.go +++ /dev/null @@ -1,103 +0,0 @@ -package v1alpha2 - -import ( - "errors" - "reflect" -) - -func visitUnion(union interface{}, visitor interface{}) (err error) { - visitorValue := reflect.ValueOf(visitor) - unionValue := reflect.ValueOf(union) - oneMemberPresent := false - typeOfVisitor := visitorValue.Type() - for i := 0; i < visitorValue.NumField(); i++ { - unionMemberToRead := typeOfVisitor.Field(i).Name - unionMember := unionValue.FieldByName(unionMemberToRead) - if !unionMember.IsZero() { - if oneMemberPresent { - err = errors.New("Only one element should be set in union: " + unionValue.Type().Name()) - return - } - oneMemberPresent = true - visitorFunction := visitorValue.Field(i) - if visitorFunction.IsNil() { - return - } - results := visitorFunction.Call([]reflect.Value{unionMember}) - if !results[0].IsNil() { - err = results[0].Interface().(error) - } - return - } - } - return -} - -func simplifyUnion(union Union, visitorType reflect.Type) { - normalizeUnion(union, visitorType) - *union.discriminator() = "" -} - -func normalizeUnion(union Union, visitorType reflect.Type) error { - err := updateDiscriminator(union, visitorType) - if err != nil { - return err - } - - err = cleanupValues(union, visitorType) - if err != nil { - return err - } - return nil -} - -func updateDiscriminator(union Union, visitorType reflect.Type) error { - unionValue := reflect.ValueOf(union) - - if union.discriminator() == nil { - return errors.New("Discriminator should not be 'nil' in union: " + unionValue.Type().Name()) - } - - if *union.discriminator() != "" { - // Nothing to do - return nil - } - - oneMemberPresent := false - for i := 0; i < visitorType.NumField(); i++ { - unionMemberToRead := visitorType.Field(i).Name - unionMember := unionValue.Elem().FieldByName(unionMemberToRead) - if !unionMember.IsZero() { - if oneMemberPresent { - return errors.New("Discriminator cannot be deduced from 2 values in union: " + unionValue.Type().Name()) - } - oneMemberPresent = true - *(union.discriminator()) = unionMemberToRead - } - } - return nil -} - -func cleanupValues(union Union, visitorType reflect.Type) error { - unionValue := reflect.ValueOf(union) - - if union.discriminator() == nil { - return errors.New("Discriminator should not be 'nil' in union: " + unionValue.Type().Name()) - } - - if *union.discriminator() == "" { - // Nothing to do - return errors.New("Values cannot be cleaned up without a discriminator in union: " + unionValue.Type().Name()) - } - - for i := 0; i < visitorType.NumField(); i++ { - unionMemberToRead := visitorType.Field(i).Name - unionMember := unionValue.Elem().FieldByName(unionMemberToRead) - if !unionMember.IsZero() { - if unionMemberToRead != *union.discriminator() { - unionMember.Set(reflect.Zero(unionMember.Type())) - } - } - } - return nil -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.deepcopy.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.deepcopy.go deleted file mode 100644 index 60ed5863840..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.deepcopy.go +++ /dev/null @@ -1,4046 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -// Code generated by controller-gen. DO NOT EDIT. - -package v1alpha2 - -import ( - "github.com/devfile/api/v2/pkg/attributes" - "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Annotation) DeepCopyInto(out *Annotation) { - *out = *in - if in.Deployment != nil { - in, out := &in.Deployment, &out.Deployment - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Service != nil { - in, out := &in.Service, &out.Service - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Annotation. -func (in *Annotation) DeepCopy() *Annotation { - if in == nil { - return nil - } - out := new(Annotation) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AnnotationParentOverride) DeepCopyInto(out *AnnotationParentOverride) { - *out = *in - if in.Deployment != nil { - in, out := &in.Deployment, &out.Deployment - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Service != nil { - in, out := &in.Service, &out.Service - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnnotationParentOverride. -func (in *AnnotationParentOverride) DeepCopy() *AnnotationParentOverride { - if in == nil { - return nil - } - out := new(AnnotationParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AnnotationPluginOverride) DeepCopyInto(out *AnnotationPluginOverride) { - *out = *in - if in.Deployment != nil { - in, out := &in.Deployment, &out.Deployment - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Service != nil { - in, out := &in.Service, &out.Service - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnnotationPluginOverride. -func (in *AnnotationPluginOverride) DeepCopy() *AnnotationPluginOverride { - if in == nil { - return nil - } - out := new(AnnotationPluginOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AnnotationPluginOverrideParentOverride) DeepCopyInto(out *AnnotationPluginOverrideParentOverride) { - *out = *in - if in.Deployment != nil { - in, out := &in.Deployment, &out.Deployment - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Service != nil { - in, out := &in.Service, &out.Service - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnnotationPluginOverrideParentOverride. -func (in *AnnotationPluginOverrideParentOverride) DeepCopy() *AnnotationPluginOverrideParentOverride { - if in == nil { - return nil - } - out := new(AnnotationPluginOverrideParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ApplyCommand) DeepCopyInto(out *ApplyCommand) { - *out = *in - in.LabeledCommand.DeepCopyInto(&out.LabeledCommand) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplyCommand. -func (in *ApplyCommand) DeepCopy() *ApplyCommand { - if in == nil { - return nil - } - out := new(ApplyCommand) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ApplyCommandParentOverride) DeepCopyInto(out *ApplyCommandParentOverride) { - *out = *in - in.LabeledCommandParentOverride.DeepCopyInto(&out.LabeledCommandParentOverride) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplyCommandParentOverride. -func (in *ApplyCommandParentOverride) DeepCopy() *ApplyCommandParentOverride { - if in == nil { - return nil - } - out := new(ApplyCommandParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ApplyCommandPluginOverride) DeepCopyInto(out *ApplyCommandPluginOverride) { - *out = *in - in.LabeledCommandPluginOverride.DeepCopyInto(&out.LabeledCommandPluginOverride) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplyCommandPluginOverride. -func (in *ApplyCommandPluginOverride) DeepCopy() *ApplyCommandPluginOverride { - if in == nil { - return nil - } - out := new(ApplyCommandPluginOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ApplyCommandPluginOverrideParentOverride) DeepCopyInto(out *ApplyCommandPluginOverrideParentOverride) { - *out = *in - in.LabeledCommandPluginOverrideParentOverride.DeepCopyInto(&out.LabeledCommandPluginOverrideParentOverride) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplyCommandPluginOverrideParentOverride. -func (in *ApplyCommandPluginOverrideParentOverride) DeepCopy() *ApplyCommandPluginOverrideParentOverride { - if in == nil { - return nil - } - out := new(ApplyCommandPluginOverrideParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BaseCommand) DeepCopyInto(out *BaseCommand) { - *out = *in - if in.Group != nil { - in, out := &in.Group, &out.Group - *out = new(CommandGroup) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BaseCommand. -func (in *BaseCommand) DeepCopy() *BaseCommand { - if in == nil { - return nil - } - out := new(BaseCommand) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BaseCommandParentOverride) DeepCopyInto(out *BaseCommandParentOverride) { - *out = *in - if in.Group != nil { - in, out := &in.Group, &out.Group - *out = new(CommandGroupParentOverride) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BaseCommandParentOverride. -func (in *BaseCommandParentOverride) DeepCopy() *BaseCommandParentOverride { - if in == nil { - return nil - } - out := new(BaseCommandParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BaseCommandPluginOverride) DeepCopyInto(out *BaseCommandPluginOverride) { - *out = *in - if in.Group != nil { - in, out := &in.Group, &out.Group - *out = new(CommandGroupPluginOverride) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BaseCommandPluginOverride. -func (in *BaseCommandPluginOverride) DeepCopy() *BaseCommandPluginOverride { - if in == nil { - return nil - } - out := new(BaseCommandPluginOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BaseCommandPluginOverrideParentOverride) DeepCopyInto(out *BaseCommandPluginOverrideParentOverride) { - *out = *in - if in.Group != nil { - in, out := &in.Group, &out.Group - *out = new(CommandGroupPluginOverrideParentOverride) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BaseCommandPluginOverrideParentOverride. -func (in *BaseCommandPluginOverrideParentOverride) DeepCopy() *BaseCommandPluginOverrideParentOverride { - if in == nil { - return nil - } - out := new(BaseCommandPluginOverrideParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BaseComponent) DeepCopyInto(out *BaseComponent) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BaseComponent. -func (in *BaseComponent) DeepCopy() *BaseComponent { - if in == nil { - return nil - } - out := new(BaseComponent) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BaseComponentParentOverride) DeepCopyInto(out *BaseComponentParentOverride) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BaseComponentParentOverride. -func (in *BaseComponentParentOverride) DeepCopy() *BaseComponentParentOverride { - if in == nil { - return nil - } - out := new(BaseComponentParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BaseComponentPluginOverride) DeepCopyInto(out *BaseComponentPluginOverride) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BaseComponentPluginOverride. -func (in *BaseComponentPluginOverride) DeepCopy() *BaseComponentPluginOverride { - if in == nil { - return nil - } - out := new(BaseComponentPluginOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BaseComponentPluginOverrideParentOverride) DeepCopyInto(out *BaseComponentPluginOverrideParentOverride) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BaseComponentPluginOverrideParentOverride. -func (in *BaseComponentPluginOverrideParentOverride) DeepCopy() *BaseComponentPluginOverrideParentOverride { - if in == nil { - return nil - } - out := new(BaseComponentPluginOverrideParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BaseImage) DeepCopyInto(out *BaseImage) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BaseImage. -func (in *BaseImage) DeepCopy() *BaseImage { - if in == nil { - return nil - } - out := new(BaseImage) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BaseImageParentOverride) DeepCopyInto(out *BaseImageParentOverride) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BaseImageParentOverride. -func (in *BaseImageParentOverride) DeepCopy() *BaseImageParentOverride { - if in == nil { - return nil - } - out := new(BaseImageParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BaseImagePluginOverride) DeepCopyInto(out *BaseImagePluginOverride) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BaseImagePluginOverride. -func (in *BaseImagePluginOverride) DeepCopy() *BaseImagePluginOverride { - if in == nil { - return nil - } - out := new(BaseImagePluginOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BaseImagePluginOverrideParentOverride) DeepCopyInto(out *BaseImagePluginOverrideParentOverride) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BaseImagePluginOverrideParentOverride. -func (in *BaseImagePluginOverrideParentOverride) DeepCopy() *BaseImagePluginOverrideParentOverride { - if in == nil { - return nil - } - out := new(BaseImagePluginOverrideParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CheckoutFrom) DeepCopyInto(out *CheckoutFrom) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CheckoutFrom. -func (in *CheckoutFrom) DeepCopy() *CheckoutFrom { - if in == nil { - return nil - } - out := new(CheckoutFrom) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CheckoutFromParentOverride) DeepCopyInto(out *CheckoutFromParentOverride) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CheckoutFromParentOverride. -func (in *CheckoutFromParentOverride) DeepCopy() *CheckoutFromParentOverride { - if in == nil { - return nil - } - out := new(CheckoutFromParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CheckoutFromPluginOverride) DeepCopyInto(out *CheckoutFromPluginOverride) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CheckoutFromPluginOverride. -func (in *CheckoutFromPluginOverride) DeepCopy() *CheckoutFromPluginOverride { - if in == nil { - return nil - } - out := new(CheckoutFromPluginOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CheckoutFromPluginOverrideParentOverride) DeepCopyInto(out *CheckoutFromPluginOverrideParentOverride) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CheckoutFromPluginOverrideParentOverride. -func (in *CheckoutFromPluginOverrideParentOverride) DeepCopy() *CheckoutFromPluginOverrideParentOverride { - if in == nil { - return nil - } - out := new(CheckoutFromPluginOverrideParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Command) DeepCopyInto(out *Command) { - *out = *in - if in.Attributes != nil { - in, out := &in.Attributes, &out.Attributes - *out = make(attributes.Attributes, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - in.CommandUnion.DeepCopyInto(&out.CommandUnion) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Command. -func (in *Command) DeepCopy() *Command { - if in == nil { - return nil - } - out := new(Command) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CommandGroup) DeepCopyInto(out *CommandGroup) { - *out = *in - if in.IsDefault != nil { - in, out := &in.IsDefault, &out.IsDefault - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommandGroup. -func (in *CommandGroup) DeepCopy() *CommandGroup { - if in == nil { - return nil - } - out := new(CommandGroup) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CommandGroupParentOverride) DeepCopyInto(out *CommandGroupParentOverride) { - *out = *in - if in.IsDefault != nil { - in, out := &in.IsDefault, &out.IsDefault - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommandGroupParentOverride. -func (in *CommandGroupParentOverride) DeepCopy() *CommandGroupParentOverride { - if in == nil { - return nil - } - out := new(CommandGroupParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CommandGroupPluginOverride) DeepCopyInto(out *CommandGroupPluginOverride) { - *out = *in - if in.IsDefault != nil { - in, out := &in.IsDefault, &out.IsDefault - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommandGroupPluginOverride. -func (in *CommandGroupPluginOverride) DeepCopy() *CommandGroupPluginOverride { - if in == nil { - return nil - } - out := new(CommandGroupPluginOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CommandGroupPluginOverrideParentOverride) DeepCopyInto(out *CommandGroupPluginOverrideParentOverride) { - *out = *in - if in.IsDefault != nil { - in, out := &in.IsDefault, &out.IsDefault - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommandGroupPluginOverrideParentOverride. -func (in *CommandGroupPluginOverrideParentOverride) DeepCopy() *CommandGroupPluginOverrideParentOverride { - if in == nil { - return nil - } - out := new(CommandGroupPluginOverrideParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CommandParentOverride) DeepCopyInto(out *CommandParentOverride) { - *out = *in - if in.Attributes != nil { - in, out := &in.Attributes, &out.Attributes - *out = make(attributes.Attributes, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - in.CommandUnionParentOverride.DeepCopyInto(&out.CommandUnionParentOverride) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommandParentOverride. -func (in *CommandParentOverride) DeepCopy() *CommandParentOverride { - if in == nil { - return nil - } - out := new(CommandParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CommandPluginOverride) DeepCopyInto(out *CommandPluginOverride) { - *out = *in - if in.Attributes != nil { - in, out := &in.Attributes, &out.Attributes - *out = make(attributes.Attributes, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - in.CommandUnionPluginOverride.DeepCopyInto(&out.CommandUnionPluginOverride) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommandPluginOverride. -func (in *CommandPluginOverride) DeepCopy() *CommandPluginOverride { - if in == nil { - return nil - } - out := new(CommandPluginOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CommandPluginOverrideParentOverride) DeepCopyInto(out *CommandPluginOverrideParentOverride) { - *out = *in - if in.Attributes != nil { - in, out := &in.Attributes, &out.Attributes - *out = make(attributes.Attributes, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - in.CommandUnionPluginOverrideParentOverride.DeepCopyInto(&out.CommandUnionPluginOverrideParentOverride) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommandPluginOverrideParentOverride. -func (in *CommandPluginOverrideParentOverride) DeepCopy() *CommandPluginOverrideParentOverride { - if in == nil { - return nil - } - out := new(CommandPluginOverrideParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CommandUnion) DeepCopyInto(out *CommandUnion) { - *out = *in - if in.Exec != nil { - in, out := &in.Exec, &out.Exec - *out = new(ExecCommand) - (*in).DeepCopyInto(*out) - } - if in.Apply != nil { - in, out := &in.Apply, &out.Apply - *out = new(ApplyCommand) - (*in).DeepCopyInto(*out) - } - if in.Composite != nil { - in, out := &in.Composite, &out.Composite - *out = new(CompositeCommand) - (*in).DeepCopyInto(*out) - } - if in.Custom != nil { - in, out := &in.Custom, &out.Custom - *out = new(CustomCommand) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommandUnion. -func (in *CommandUnion) DeepCopy() *CommandUnion { - if in == nil { - return nil - } - out := new(CommandUnion) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CommandUnionParentOverride) DeepCopyInto(out *CommandUnionParentOverride) { - *out = *in - if in.Exec != nil { - in, out := &in.Exec, &out.Exec - *out = new(ExecCommandParentOverride) - (*in).DeepCopyInto(*out) - } - if in.Apply != nil { - in, out := &in.Apply, &out.Apply - *out = new(ApplyCommandParentOverride) - (*in).DeepCopyInto(*out) - } - if in.Composite != nil { - in, out := &in.Composite, &out.Composite - *out = new(CompositeCommandParentOverride) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommandUnionParentOverride. -func (in *CommandUnionParentOverride) DeepCopy() *CommandUnionParentOverride { - if in == nil { - return nil - } - out := new(CommandUnionParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CommandUnionPluginOverride) DeepCopyInto(out *CommandUnionPluginOverride) { - *out = *in - if in.Exec != nil { - in, out := &in.Exec, &out.Exec - *out = new(ExecCommandPluginOverride) - (*in).DeepCopyInto(*out) - } - if in.Apply != nil { - in, out := &in.Apply, &out.Apply - *out = new(ApplyCommandPluginOverride) - (*in).DeepCopyInto(*out) - } - if in.Composite != nil { - in, out := &in.Composite, &out.Composite - *out = new(CompositeCommandPluginOverride) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommandUnionPluginOverride. -func (in *CommandUnionPluginOverride) DeepCopy() *CommandUnionPluginOverride { - if in == nil { - return nil - } - out := new(CommandUnionPluginOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CommandUnionPluginOverrideParentOverride) DeepCopyInto(out *CommandUnionPluginOverrideParentOverride) { - *out = *in - if in.Exec != nil { - in, out := &in.Exec, &out.Exec - *out = new(ExecCommandPluginOverrideParentOverride) - (*in).DeepCopyInto(*out) - } - if in.Apply != nil { - in, out := &in.Apply, &out.Apply - *out = new(ApplyCommandPluginOverrideParentOverride) - (*in).DeepCopyInto(*out) - } - if in.Composite != nil { - in, out := &in.Composite, &out.Composite - *out = new(CompositeCommandPluginOverrideParentOverride) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommandUnionPluginOverrideParentOverride. -func (in *CommandUnionPluginOverrideParentOverride) DeepCopy() *CommandUnionPluginOverrideParentOverride { - if in == nil { - return nil - } - out := new(CommandUnionPluginOverrideParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CommonProjectSource) DeepCopyInto(out *CommonProjectSource) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonProjectSource. -func (in *CommonProjectSource) DeepCopy() *CommonProjectSource { - if in == nil { - return nil - } - out := new(CommonProjectSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CommonProjectSourceParentOverride) DeepCopyInto(out *CommonProjectSourceParentOverride) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonProjectSourceParentOverride. -func (in *CommonProjectSourceParentOverride) DeepCopy() *CommonProjectSourceParentOverride { - if in == nil { - return nil - } - out := new(CommonProjectSourceParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CommonProjectSourcePluginOverride) DeepCopyInto(out *CommonProjectSourcePluginOverride) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonProjectSourcePluginOverride. -func (in *CommonProjectSourcePluginOverride) DeepCopy() *CommonProjectSourcePluginOverride { - if in == nil { - return nil - } - out := new(CommonProjectSourcePluginOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CommonProjectSourcePluginOverrideParentOverride) DeepCopyInto(out *CommonProjectSourcePluginOverrideParentOverride) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonProjectSourcePluginOverrideParentOverride. -func (in *CommonProjectSourcePluginOverrideParentOverride) DeepCopy() *CommonProjectSourcePluginOverrideParentOverride { - if in == nil { - return nil - } - out := new(CommonProjectSourcePluginOverrideParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Component) DeepCopyInto(out *Component) { - *out = *in - if in.Attributes != nil { - in, out := &in.Attributes, &out.Attributes - *out = make(attributes.Attributes, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - in.ComponentUnion.DeepCopyInto(&out.ComponentUnion) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Component. -func (in *Component) DeepCopy() *Component { - if in == nil { - return nil - } - out := new(Component) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ComponentContribution) DeepCopyInto(out *ComponentContribution) { - *out = *in - if in.Attributes != nil { - in, out := &in.Attributes, &out.Attributes - *out = make(attributes.Attributes, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - in.PluginComponent.DeepCopyInto(&out.PluginComponent) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentContribution. -func (in *ComponentContribution) DeepCopy() *ComponentContribution { - if in == nil { - return nil - } - out := new(ComponentContribution) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ComponentParentOverride) DeepCopyInto(out *ComponentParentOverride) { - *out = *in - if in.Attributes != nil { - in, out := &in.Attributes, &out.Attributes - *out = make(attributes.Attributes, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - in.ComponentUnionParentOverride.DeepCopyInto(&out.ComponentUnionParentOverride) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentParentOverride. -func (in *ComponentParentOverride) DeepCopy() *ComponentParentOverride { - if in == nil { - return nil - } - out := new(ComponentParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ComponentPluginOverride) DeepCopyInto(out *ComponentPluginOverride) { - *out = *in - if in.Attributes != nil { - in, out := &in.Attributes, &out.Attributes - *out = make(attributes.Attributes, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - in.ComponentUnionPluginOverride.DeepCopyInto(&out.ComponentUnionPluginOverride) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentPluginOverride. -func (in *ComponentPluginOverride) DeepCopy() *ComponentPluginOverride { - if in == nil { - return nil - } - out := new(ComponentPluginOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ComponentPluginOverrideParentOverride) DeepCopyInto(out *ComponentPluginOverrideParentOverride) { - *out = *in - if in.Attributes != nil { - in, out := &in.Attributes, &out.Attributes - *out = make(attributes.Attributes, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - in.ComponentUnionPluginOverrideParentOverride.DeepCopyInto(&out.ComponentUnionPluginOverrideParentOverride) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentPluginOverrideParentOverride. -func (in *ComponentPluginOverrideParentOverride) DeepCopy() *ComponentPluginOverrideParentOverride { - if in == nil { - return nil - } - out := new(ComponentPluginOverrideParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ComponentUnion) DeepCopyInto(out *ComponentUnion) { - *out = *in - if in.Container != nil { - in, out := &in.Container, &out.Container - *out = new(ContainerComponent) - (*in).DeepCopyInto(*out) - } - if in.Kubernetes != nil { - in, out := &in.Kubernetes, &out.Kubernetes - *out = new(KubernetesComponent) - (*in).DeepCopyInto(*out) - } - if in.Openshift != nil { - in, out := &in.Openshift, &out.Openshift - *out = new(OpenshiftComponent) - (*in).DeepCopyInto(*out) - } - if in.Volume != nil { - in, out := &in.Volume, &out.Volume - *out = new(VolumeComponent) - (*in).DeepCopyInto(*out) - } - if in.Image != nil { - in, out := &in.Image, &out.Image - *out = new(ImageComponent) - (*in).DeepCopyInto(*out) - } - if in.Plugin != nil { - in, out := &in.Plugin, &out.Plugin - *out = new(PluginComponent) - (*in).DeepCopyInto(*out) - } - if in.Custom != nil { - in, out := &in.Custom, &out.Custom - *out = new(CustomComponent) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentUnion. -func (in *ComponentUnion) DeepCopy() *ComponentUnion { - if in == nil { - return nil - } - out := new(ComponentUnion) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ComponentUnionParentOverride) DeepCopyInto(out *ComponentUnionParentOverride) { - *out = *in - if in.Container != nil { - in, out := &in.Container, &out.Container - *out = new(ContainerComponentParentOverride) - (*in).DeepCopyInto(*out) - } - if in.Kubernetes != nil { - in, out := &in.Kubernetes, &out.Kubernetes - *out = new(KubernetesComponentParentOverride) - (*in).DeepCopyInto(*out) - } - if in.Openshift != nil { - in, out := &in.Openshift, &out.Openshift - *out = new(OpenshiftComponentParentOverride) - (*in).DeepCopyInto(*out) - } - if in.Volume != nil { - in, out := &in.Volume, &out.Volume - *out = new(VolumeComponentParentOverride) - (*in).DeepCopyInto(*out) - } - if in.Image != nil { - in, out := &in.Image, &out.Image - *out = new(ImageComponentParentOverride) - (*in).DeepCopyInto(*out) - } - if in.Plugin != nil { - in, out := &in.Plugin, &out.Plugin - *out = new(PluginComponentParentOverride) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentUnionParentOverride. -func (in *ComponentUnionParentOverride) DeepCopy() *ComponentUnionParentOverride { - if in == nil { - return nil - } - out := new(ComponentUnionParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ComponentUnionPluginOverride) DeepCopyInto(out *ComponentUnionPluginOverride) { - *out = *in - if in.Container != nil { - in, out := &in.Container, &out.Container - *out = new(ContainerComponentPluginOverride) - (*in).DeepCopyInto(*out) - } - if in.Kubernetes != nil { - in, out := &in.Kubernetes, &out.Kubernetes - *out = new(KubernetesComponentPluginOverride) - (*in).DeepCopyInto(*out) - } - if in.Openshift != nil { - in, out := &in.Openshift, &out.Openshift - *out = new(OpenshiftComponentPluginOverride) - (*in).DeepCopyInto(*out) - } - if in.Volume != nil { - in, out := &in.Volume, &out.Volume - *out = new(VolumeComponentPluginOverride) - (*in).DeepCopyInto(*out) - } - if in.Image != nil { - in, out := &in.Image, &out.Image - *out = new(ImageComponentPluginOverride) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentUnionPluginOverride. -func (in *ComponentUnionPluginOverride) DeepCopy() *ComponentUnionPluginOverride { - if in == nil { - return nil - } - out := new(ComponentUnionPluginOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ComponentUnionPluginOverrideParentOverride) DeepCopyInto(out *ComponentUnionPluginOverrideParentOverride) { - *out = *in - if in.Container != nil { - in, out := &in.Container, &out.Container - *out = new(ContainerComponentPluginOverrideParentOverride) - (*in).DeepCopyInto(*out) - } - if in.Kubernetes != nil { - in, out := &in.Kubernetes, &out.Kubernetes - *out = new(KubernetesComponentPluginOverrideParentOverride) - (*in).DeepCopyInto(*out) - } - if in.Openshift != nil { - in, out := &in.Openshift, &out.Openshift - *out = new(OpenshiftComponentPluginOverrideParentOverride) - (*in).DeepCopyInto(*out) - } - if in.Volume != nil { - in, out := &in.Volume, &out.Volume - *out = new(VolumeComponentPluginOverrideParentOverride) - (*in).DeepCopyInto(*out) - } - if in.Image != nil { - in, out := &in.Image, &out.Image - *out = new(ImageComponentPluginOverrideParentOverride) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentUnionPluginOverrideParentOverride. -func (in *ComponentUnionPluginOverrideParentOverride) DeepCopy() *ComponentUnionPluginOverrideParentOverride { - if in == nil { - return nil - } - out := new(ComponentUnionPluginOverrideParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CompositeCommand) DeepCopyInto(out *CompositeCommand) { - *out = *in - in.LabeledCommand.DeepCopyInto(&out.LabeledCommand) - if in.Commands != nil { - in, out := &in.Commands, &out.Commands - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Parallel != nil { - in, out := &in.Parallel, &out.Parallel - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompositeCommand. -func (in *CompositeCommand) DeepCopy() *CompositeCommand { - if in == nil { - return nil - } - out := new(CompositeCommand) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CompositeCommandParentOverride) DeepCopyInto(out *CompositeCommandParentOverride) { - *out = *in - in.LabeledCommandParentOverride.DeepCopyInto(&out.LabeledCommandParentOverride) - if in.Commands != nil { - in, out := &in.Commands, &out.Commands - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Parallel != nil { - in, out := &in.Parallel, &out.Parallel - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompositeCommandParentOverride. -func (in *CompositeCommandParentOverride) DeepCopy() *CompositeCommandParentOverride { - if in == nil { - return nil - } - out := new(CompositeCommandParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CompositeCommandPluginOverride) DeepCopyInto(out *CompositeCommandPluginOverride) { - *out = *in - in.LabeledCommandPluginOverride.DeepCopyInto(&out.LabeledCommandPluginOverride) - if in.Commands != nil { - in, out := &in.Commands, &out.Commands - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Parallel != nil { - in, out := &in.Parallel, &out.Parallel - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompositeCommandPluginOverride. -func (in *CompositeCommandPluginOverride) DeepCopy() *CompositeCommandPluginOverride { - if in == nil { - return nil - } - out := new(CompositeCommandPluginOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CompositeCommandPluginOverrideParentOverride) DeepCopyInto(out *CompositeCommandPluginOverrideParentOverride) { - *out = *in - in.LabeledCommandPluginOverrideParentOverride.DeepCopyInto(&out.LabeledCommandPluginOverrideParentOverride) - if in.Commands != nil { - in, out := &in.Commands, &out.Commands - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Parallel != nil { - in, out := &in.Parallel, &out.Parallel - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompositeCommandPluginOverrideParentOverride. -func (in *CompositeCommandPluginOverrideParentOverride) DeepCopy() *CompositeCommandPluginOverrideParentOverride { - if in == nil { - return nil - } - out := new(CompositeCommandPluginOverrideParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Container) DeepCopyInto(out *Container) { - *out = *in - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]EnvVar, len(*in)) - copy(*out, *in) - } - if in.Annotation != nil { - in, out := &in.Annotation, &out.Annotation - *out = new(Annotation) - (*in).DeepCopyInto(*out) - } - if in.VolumeMounts != nil { - in, out := &in.VolumeMounts, &out.VolumeMounts - *out = make([]VolumeMount, len(*in)) - copy(*out, *in) - } - if in.Command != nil { - in, out := &in.Command, &out.Command - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Args != nil { - in, out := &in.Args, &out.Args - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.MountSources != nil { - in, out := &in.MountSources, &out.MountSources - *out = new(bool) - **out = **in - } - if in.DedicatedPod != nil { - in, out := &in.DedicatedPod, &out.DedicatedPod - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Container. -func (in *Container) DeepCopy() *Container { - if in == nil { - return nil - } - out := new(Container) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerComponent) DeepCopyInto(out *ContainerComponent) { - *out = *in - out.BaseComponent = in.BaseComponent - in.Container.DeepCopyInto(&out.Container) - if in.Endpoints != nil { - in, out := &in.Endpoints, &out.Endpoints - *out = make([]Endpoint, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerComponent. -func (in *ContainerComponent) DeepCopy() *ContainerComponent { - if in == nil { - return nil - } - out := new(ContainerComponent) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerComponentParentOverride) DeepCopyInto(out *ContainerComponentParentOverride) { - *out = *in - out.BaseComponentParentOverride = in.BaseComponentParentOverride - in.ContainerParentOverride.DeepCopyInto(&out.ContainerParentOverride) - if in.Endpoints != nil { - in, out := &in.Endpoints, &out.Endpoints - *out = make([]EndpointParentOverride, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerComponentParentOverride. -func (in *ContainerComponentParentOverride) DeepCopy() *ContainerComponentParentOverride { - if in == nil { - return nil - } - out := new(ContainerComponentParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerComponentPluginOverride) DeepCopyInto(out *ContainerComponentPluginOverride) { - *out = *in - out.BaseComponentPluginOverride = in.BaseComponentPluginOverride - in.ContainerPluginOverride.DeepCopyInto(&out.ContainerPluginOverride) - if in.Endpoints != nil { - in, out := &in.Endpoints, &out.Endpoints - *out = make([]EndpointPluginOverride, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerComponentPluginOverride. -func (in *ContainerComponentPluginOverride) DeepCopy() *ContainerComponentPluginOverride { - if in == nil { - return nil - } - out := new(ContainerComponentPluginOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerComponentPluginOverrideParentOverride) DeepCopyInto(out *ContainerComponentPluginOverrideParentOverride) { - *out = *in - out.BaseComponentPluginOverrideParentOverride = in.BaseComponentPluginOverrideParentOverride - in.ContainerPluginOverrideParentOverride.DeepCopyInto(&out.ContainerPluginOverrideParentOverride) - if in.Endpoints != nil { - in, out := &in.Endpoints, &out.Endpoints - *out = make([]EndpointPluginOverrideParentOverride, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerComponentPluginOverrideParentOverride. -func (in *ContainerComponentPluginOverrideParentOverride) DeepCopy() *ContainerComponentPluginOverrideParentOverride { - if in == nil { - return nil - } - out := new(ContainerComponentPluginOverrideParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerParentOverride) DeepCopyInto(out *ContainerParentOverride) { - *out = *in - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]EnvVarParentOverride, len(*in)) - copy(*out, *in) - } - if in.Annotation != nil { - in, out := &in.Annotation, &out.Annotation - *out = new(AnnotationParentOverride) - (*in).DeepCopyInto(*out) - } - if in.VolumeMounts != nil { - in, out := &in.VolumeMounts, &out.VolumeMounts - *out = make([]VolumeMountParentOverride, len(*in)) - copy(*out, *in) - } - if in.Command != nil { - in, out := &in.Command, &out.Command - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Args != nil { - in, out := &in.Args, &out.Args - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.MountSources != nil { - in, out := &in.MountSources, &out.MountSources - *out = new(bool) - **out = **in - } - if in.DedicatedPod != nil { - in, out := &in.DedicatedPod, &out.DedicatedPod - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerParentOverride. -func (in *ContainerParentOverride) DeepCopy() *ContainerParentOverride { - if in == nil { - return nil - } - out := new(ContainerParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerPluginOverride) DeepCopyInto(out *ContainerPluginOverride) { - *out = *in - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]EnvVarPluginOverride, len(*in)) - copy(*out, *in) - } - if in.Annotation != nil { - in, out := &in.Annotation, &out.Annotation - *out = new(AnnotationPluginOverride) - (*in).DeepCopyInto(*out) - } - if in.VolumeMounts != nil { - in, out := &in.VolumeMounts, &out.VolumeMounts - *out = make([]VolumeMountPluginOverride, len(*in)) - copy(*out, *in) - } - if in.Command != nil { - in, out := &in.Command, &out.Command - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Args != nil { - in, out := &in.Args, &out.Args - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.MountSources != nil { - in, out := &in.MountSources, &out.MountSources - *out = new(bool) - **out = **in - } - if in.DedicatedPod != nil { - in, out := &in.DedicatedPod, &out.DedicatedPod - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerPluginOverride. -func (in *ContainerPluginOverride) DeepCopy() *ContainerPluginOverride { - if in == nil { - return nil - } - out := new(ContainerPluginOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerPluginOverrideParentOverride) DeepCopyInto(out *ContainerPluginOverrideParentOverride) { - *out = *in - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]EnvVarPluginOverrideParentOverride, len(*in)) - copy(*out, *in) - } - if in.Annotation != nil { - in, out := &in.Annotation, &out.Annotation - *out = new(AnnotationPluginOverrideParentOverride) - (*in).DeepCopyInto(*out) - } - if in.VolumeMounts != nil { - in, out := &in.VolumeMounts, &out.VolumeMounts - *out = make([]VolumeMountPluginOverrideParentOverride, len(*in)) - copy(*out, *in) - } - if in.Command != nil { - in, out := &in.Command, &out.Command - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Args != nil { - in, out := &in.Args, &out.Args - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.MountSources != nil { - in, out := &in.MountSources, &out.MountSources - *out = new(bool) - **out = **in - } - if in.DedicatedPod != nil { - in, out := &in.DedicatedPod, &out.DedicatedPod - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerPluginOverrideParentOverride. -func (in *ContainerPluginOverrideParentOverride) DeepCopy() *ContainerPluginOverrideParentOverride { - if in == nil { - return nil - } - out := new(ContainerPluginOverrideParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomCommand) DeepCopyInto(out *CustomCommand) { - *out = *in - in.LabeledCommand.DeepCopyInto(&out.LabeledCommand) - in.EmbeddedResource.DeepCopyInto(&out.EmbeddedResource) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomCommand. -func (in *CustomCommand) DeepCopy() *CustomCommand { - if in == nil { - return nil - } - out := new(CustomCommand) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomComponent) DeepCopyInto(out *CustomComponent) { - *out = *in - in.EmbeddedResource.DeepCopyInto(&out.EmbeddedResource) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomComponent. -func (in *CustomComponent) DeepCopy() *CustomComponent { - if in == nil { - return nil - } - out := new(CustomComponent) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomProjectSource) DeepCopyInto(out *CustomProjectSource) { - *out = *in - in.EmbeddedResource.DeepCopyInto(&out.EmbeddedResource) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomProjectSource. -func (in *CustomProjectSource) DeepCopy() *CustomProjectSource { - if in == nil { - return nil - } - out := new(CustomProjectSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DevWorkspace) DeepCopyInto(out *DevWorkspace) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevWorkspace. -func (in *DevWorkspace) DeepCopy() *DevWorkspace { - if in == nil { - return nil - } - out := new(DevWorkspace) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *DevWorkspace) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DevWorkspaceCondition) DeepCopyInto(out *DevWorkspaceCondition) { - *out = *in - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevWorkspaceCondition. -func (in *DevWorkspaceCondition) DeepCopy() *DevWorkspaceCondition { - if in == nil { - return nil - } - out := new(DevWorkspaceCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DevWorkspaceEvents) DeepCopyInto(out *DevWorkspaceEvents) { - *out = *in - if in.PreStart != nil { - in, out := &in.PreStart, &out.PreStart - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.PostStart != nil { - in, out := &in.PostStart, &out.PostStart - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.PreStop != nil { - in, out := &in.PreStop, &out.PreStop - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.PostStop != nil { - in, out := &in.PostStop, &out.PostStop - *out = make([]string, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevWorkspaceEvents. -func (in *DevWorkspaceEvents) DeepCopy() *DevWorkspaceEvents { - if in == nil { - return nil - } - out := new(DevWorkspaceEvents) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DevWorkspaceList) DeepCopyInto(out *DevWorkspaceList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]DevWorkspace, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevWorkspaceList. -func (in *DevWorkspaceList) DeepCopy() *DevWorkspaceList { - if in == nil { - return nil - } - out := new(DevWorkspaceList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *DevWorkspaceList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DevWorkspaceSpec) DeepCopyInto(out *DevWorkspaceSpec) { - *out = *in - in.Template.DeepCopyInto(&out.Template) - if in.Contributions != nil { - in, out := &in.Contributions, &out.Contributions - *out = make([]ComponentContribution, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevWorkspaceSpec. -func (in *DevWorkspaceSpec) DeepCopy() *DevWorkspaceSpec { - if in == nil { - return nil - } - out := new(DevWorkspaceSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DevWorkspaceStatus) DeepCopyInto(out *DevWorkspaceStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]DevWorkspaceCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevWorkspaceStatus. -func (in *DevWorkspaceStatus) DeepCopy() *DevWorkspaceStatus { - if in == nil { - return nil - } - out := new(DevWorkspaceStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DevWorkspaceTemplate) DeepCopyInto(out *DevWorkspaceTemplate) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevWorkspaceTemplate. -func (in *DevWorkspaceTemplate) DeepCopy() *DevWorkspaceTemplate { - if in == nil { - return nil - } - out := new(DevWorkspaceTemplate) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *DevWorkspaceTemplate) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DevWorkspaceTemplateList) DeepCopyInto(out *DevWorkspaceTemplateList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]DevWorkspaceTemplate, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevWorkspaceTemplateList. -func (in *DevWorkspaceTemplateList) DeepCopy() *DevWorkspaceTemplateList { - if in == nil { - return nil - } - out := new(DevWorkspaceTemplateList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *DevWorkspaceTemplateList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DevWorkspaceTemplateSpec) DeepCopyInto(out *DevWorkspaceTemplateSpec) { - *out = *in - if in.Parent != nil { - in, out := &in.Parent, &out.Parent - *out = new(Parent) - (*in).DeepCopyInto(*out) - } - in.DevWorkspaceTemplateSpecContent.DeepCopyInto(&out.DevWorkspaceTemplateSpecContent) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevWorkspaceTemplateSpec. -func (in *DevWorkspaceTemplateSpec) DeepCopy() *DevWorkspaceTemplateSpec { - if in == nil { - return nil - } - out := new(DevWorkspaceTemplateSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DevWorkspaceTemplateSpecContent) DeepCopyInto(out *DevWorkspaceTemplateSpecContent) { - *out = *in - if in.Variables != nil { - in, out := &in.Variables, &out.Variables - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Attributes != nil { - in, out := &in.Attributes, &out.Attributes - *out = make(attributes.Attributes, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - if in.Components != nil { - in, out := &in.Components, &out.Components - *out = make([]Component, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Projects != nil { - in, out := &in.Projects, &out.Projects - *out = make([]Project, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.StarterProjects != nil { - in, out := &in.StarterProjects, &out.StarterProjects - *out = make([]StarterProject, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Commands != nil { - in, out := &in.Commands, &out.Commands - *out = make([]Command, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Events != nil { - in, out := &in.Events, &out.Events - *out = new(Events) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevWorkspaceTemplateSpecContent. -func (in *DevWorkspaceTemplateSpecContent) DeepCopy() *DevWorkspaceTemplateSpecContent { - if in == nil { - return nil - } - out := new(DevWorkspaceTemplateSpecContent) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Dockerfile) DeepCopyInto(out *Dockerfile) { - *out = *in - if in.Args != nil { - in, out := &in.Args, &out.Args - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.RootRequired != nil { - in, out := &in.RootRequired, &out.RootRequired - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Dockerfile. -func (in *Dockerfile) DeepCopy() *Dockerfile { - if in == nil { - return nil - } - out := new(Dockerfile) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DockerfileDevfileRegistrySource) DeepCopyInto(out *DockerfileDevfileRegistrySource) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerfileDevfileRegistrySource. -func (in *DockerfileDevfileRegistrySource) DeepCopy() *DockerfileDevfileRegistrySource { - if in == nil { - return nil - } - out := new(DockerfileDevfileRegistrySource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DockerfileDevfileRegistrySourceParentOverride) DeepCopyInto(out *DockerfileDevfileRegistrySourceParentOverride) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerfileDevfileRegistrySourceParentOverride. -func (in *DockerfileDevfileRegistrySourceParentOverride) DeepCopy() *DockerfileDevfileRegistrySourceParentOverride { - if in == nil { - return nil - } - out := new(DockerfileDevfileRegistrySourceParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DockerfileDevfileRegistrySourcePluginOverride) DeepCopyInto(out *DockerfileDevfileRegistrySourcePluginOverride) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerfileDevfileRegistrySourcePluginOverride. -func (in *DockerfileDevfileRegistrySourcePluginOverride) DeepCopy() *DockerfileDevfileRegistrySourcePluginOverride { - if in == nil { - return nil - } - out := new(DockerfileDevfileRegistrySourcePluginOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DockerfileDevfileRegistrySourcePluginOverrideParentOverride) DeepCopyInto(out *DockerfileDevfileRegistrySourcePluginOverrideParentOverride) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerfileDevfileRegistrySourcePluginOverrideParentOverride. -func (in *DockerfileDevfileRegistrySourcePluginOverrideParentOverride) DeepCopy() *DockerfileDevfileRegistrySourcePluginOverrideParentOverride { - if in == nil { - return nil - } - out := new(DockerfileDevfileRegistrySourcePluginOverrideParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DockerfileGitProjectSource) DeepCopyInto(out *DockerfileGitProjectSource) { - *out = *in - in.GitProjectSource.DeepCopyInto(&out.GitProjectSource) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerfileGitProjectSource. -func (in *DockerfileGitProjectSource) DeepCopy() *DockerfileGitProjectSource { - if in == nil { - return nil - } - out := new(DockerfileGitProjectSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DockerfileGitProjectSourceParentOverride) DeepCopyInto(out *DockerfileGitProjectSourceParentOverride) { - *out = *in - in.GitProjectSourceParentOverride.DeepCopyInto(&out.GitProjectSourceParentOverride) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerfileGitProjectSourceParentOverride. -func (in *DockerfileGitProjectSourceParentOverride) DeepCopy() *DockerfileGitProjectSourceParentOverride { - if in == nil { - return nil - } - out := new(DockerfileGitProjectSourceParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DockerfileGitProjectSourcePluginOverride) DeepCopyInto(out *DockerfileGitProjectSourcePluginOverride) { - *out = *in - in.GitProjectSourcePluginOverride.DeepCopyInto(&out.GitProjectSourcePluginOverride) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerfileGitProjectSourcePluginOverride. -func (in *DockerfileGitProjectSourcePluginOverride) DeepCopy() *DockerfileGitProjectSourcePluginOverride { - if in == nil { - return nil - } - out := new(DockerfileGitProjectSourcePluginOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DockerfileGitProjectSourcePluginOverrideParentOverride) DeepCopyInto(out *DockerfileGitProjectSourcePluginOverrideParentOverride) { - *out = *in - in.GitProjectSourcePluginOverrideParentOverride.DeepCopyInto(&out.GitProjectSourcePluginOverrideParentOverride) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerfileGitProjectSourcePluginOverrideParentOverride. -func (in *DockerfileGitProjectSourcePluginOverrideParentOverride) DeepCopy() *DockerfileGitProjectSourcePluginOverrideParentOverride { - if in == nil { - return nil - } - out := new(DockerfileGitProjectSourcePluginOverrideParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DockerfileImage) DeepCopyInto(out *DockerfileImage) { - *out = *in - out.BaseImage = in.BaseImage - in.DockerfileSrc.DeepCopyInto(&out.DockerfileSrc) - in.Dockerfile.DeepCopyInto(&out.Dockerfile) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerfileImage. -func (in *DockerfileImage) DeepCopy() *DockerfileImage { - if in == nil { - return nil - } - out := new(DockerfileImage) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DockerfileImageParentOverride) DeepCopyInto(out *DockerfileImageParentOverride) { - *out = *in - out.BaseImageParentOverride = in.BaseImageParentOverride - in.DockerfileSrcParentOverride.DeepCopyInto(&out.DockerfileSrcParentOverride) - in.DockerfileParentOverride.DeepCopyInto(&out.DockerfileParentOverride) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerfileImageParentOverride. -func (in *DockerfileImageParentOverride) DeepCopy() *DockerfileImageParentOverride { - if in == nil { - return nil - } - out := new(DockerfileImageParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DockerfileImagePluginOverride) DeepCopyInto(out *DockerfileImagePluginOverride) { - *out = *in - out.BaseImagePluginOverride = in.BaseImagePluginOverride - in.DockerfileSrcPluginOverride.DeepCopyInto(&out.DockerfileSrcPluginOverride) - in.DockerfilePluginOverride.DeepCopyInto(&out.DockerfilePluginOverride) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerfileImagePluginOverride. -func (in *DockerfileImagePluginOverride) DeepCopy() *DockerfileImagePluginOverride { - if in == nil { - return nil - } - out := new(DockerfileImagePluginOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DockerfileImagePluginOverrideParentOverride) DeepCopyInto(out *DockerfileImagePluginOverrideParentOverride) { - *out = *in - out.BaseImagePluginOverrideParentOverride = in.BaseImagePluginOverrideParentOverride - in.DockerfileSrcPluginOverrideParentOverride.DeepCopyInto(&out.DockerfileSrcPluginOverrideParentOverride) - in.DockerfilePluginOverrideParentOverride.DeepCopyInto(&out.DockerfilePluginOverrideParentOverride) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerfileImagePluginOverrideParentOverride. -func (in *DockerfileImagePluginOverrideParentOverride) DeepCopy() *DockerfileImagePluginOverrideParentOverride { - if in == nil { - return nil - } - out := new(DockerfileImagePluginOverrideParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DockerfileParentOverride) DeepCopyInto(out *DockerfileParentOverride) { - *out = *in - if in.Args != nil { - in, out := &in.Args, &out.Args - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.RootRequired != nil { - in, out := &in.RootRequired, &out.RootRequired - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerfileParentOverride. -func (in *DockerfileParentOverride) DeepCopy() *DockerfileParentOverride { - if in == nil { - return nil - } - out := new(DockerfileParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DockerfilePluginOverride) DeepCopyInto(out *DockerfilePluginOverride) { - *out = *in - if in.Args != nil { - in, out := &in.Args, &out.Args - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.RootRequired != nil { - in, out := &in.RootRequired, &out.RootRequired - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerfilePluginOverride. -func (in *DockerfilePluginOverride) DeepCopy() *DockerfilePluginOverride { - if in == nil { - return nil - } - out := new(DockerfilePluginOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DockerfilePluginOverrideParentOverride) DeepCopyInto(out *DockerfilePluginOverrideParentOverride) { - *out = *in - if in.Args != nil { - in, out := &in.Args, &out.Args - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.RootRequired != nil { - in, out := &in.RootRequired, &out.RootRequired - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerfilePluginOverrideParentOverride. -func (in *DockerfilePluginOverrideParentOverride) DeepCopy() *DockerfilePluginOverrideParentOverride { - if in == nil { - return nil - } - out := new(DockerfilePluginOverrideParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DockerfileSrc) DeepCopyInto(out *DockerfileSrc) { - *out = *in - if in.DevfileRegistry != nil { - in, out := &in.DevfileRegistry, &out.DevfileRegistry - *out = new(DockerfileDevfileRegistrySource) - **out = **in - } - if in.Git != nil { - in, out := &in.Git, &out.Git - *out = new(DockerfileGitProjectSource) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerfileSrc. -func (in *DockerfileSrc) DeepCopy() *DockerfileSrc { - if in == nil { - return nil - } - out := new(DockerfileSrc) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DockerfileSrcParentOverride) DeepCopyInto(out *DockerfileSrcParentOverride) { - *out = *in - if in.DevfileRegistry != nil { - in, out := &in.DevfileRegistry, &out.DevfileRegistry - *out = new(DockerfileDevfileRegistrySourceParentOverride) - **out = **in - } - if in.Git != nil { - in, out := &in.Git, &out.Git - *out = new(DockerfileGitProjectSourceParentOverride) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerfileSrcParentOverride. -func (in *DockerfileSrcParentOverride) DeepCopy() *DockerfileSrcParentOverride { - if in == nil { - return nil - } - out := new(DockerfileSrcParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DockerfileSrcPluginOverride) DeepCopyInto(out *DockerfileSrcPluginOverride) { - *out = *in - if in.DevfileRegistry != nil { - in, out := &in.DevfileRegistry, &out.DevfileRegistry - *out = new(DockerfileDevfileRegistrySourcePluginOverride) - **out = **in - } - if in.Git != nil { - in, out := &in.Git, &out.Git - *out = new(DockerfileGitProjectSourcePluginOverride) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerfileSrcPluginOverride. -func (in *DockerfileSrcPluginOverride) DeepCopy() *DockerfileSrcPluginOverride { - if in == nil { - return nil - } - out := new(DockerfileSrcPluginOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DockerfileSrcPluginOverrideParentOverride) DeepCopyInto(out *DockerfileSrcPluginOverrideParentOverride) { - *out = *in - if in.DevfileRegistry != nil { - in, out := &in.DevfileRegistry, &out.DevfileRegistry - *out = new(DockerfileDevfileRegistrySourcePluginOverrideParentOverride) - **out = **in - } - if in.Git != nil { - in, out := &in.Git, &out.Git - *out = new(DockerfileGitProjectSourcePluginOverrideParentOverride) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerfileSrcPluginOverrideParentOverride. -func (in *DockerfileSrcPluginOverrideParentOverride) DeepCopy() *DockerfileSrcPluginOverrideParentOverride { - if in == nil { - return nil - } - out := new(DockerfileSrcPluginOverrideParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Endpoint) DeepCopyInto(out *Endpoint) { - *out = *in - if in.Secure != nil { - in, out := &in.Secure, &out.Secure - *out = new(bool) - **out = **in - } - if in.Attributes != nil { - in, out := &in.Attributes, &out.Attributes - *out = make(attributes.Attributes, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoint. -func (in *Endpoint) DeepCopy() *Endpoint { - if in == nil { - return nil - } - out := new(Endpoint) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EndpointParentOverride) DeepCopyInto(out *EndpointParentOverride) { - *out = *in - if in.Secure != nil { - in, out := &in.Secure, &out.Secure - *out = new(bool) - **out = **in - } - if in.Attributes != nil { - in, out := &in.Attributes, &out.Attributes - *out = make(attributes.Attributes, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointParentOverride. -func (in *EndpointParentOverride) DeepCopy() *EndpointParentOverride { - if in == nil { - return nil - } - out := new(EndpointParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EndpointPluginOverride) DeepCopyInto(out *EndpointPluginOverride) { - *out = *in - if in.Secure != nil { - in, out := &in.Secure, &out.Secure - *out = new(bool) - **out = **in - } - if in.Attributes != nil { - in, out := &in.Attributes, &out.Attributes - *out = make(attributes.Attributes, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPluginOverride. -func (in *EndpointPluginOverride) DeepCopy() *EndpointPluginOverride { - if in == nil { - return nil - } - out := new(EndpointPluginOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EndpointPluginOverrideParentOverride) DeepCopyInto(out *EndpointPluginOverrideParentOverride) { - *out = *in - if in.Secure != nil { - in, out := &in.Secure, &out.Secure - *out = new(bool) - **out = **in - } - if in.Attributes != nil { - in, out := &in.Attributes, &out.Attributes - *out = make(attributes.Attributes, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPluginOverrideParentOverride. -func (in *EndpointPluginOverrideParentOverride) DeepCopy() *EndpointPluginOverrideParentOverride { - if in == nil { - return nil - } - out := new(EndpointPluginOverrideParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EnvVar) DeepCopyInto(out *EnvVar) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVar. -func (in *EnvVar) DeepCopy() *EnvVar { - if in == nil { - return nil - } - out := new(EnvVar) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EnvVarParentOverride) DeepCopyInto(out *EnvVarParentOverride) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVarParentOverride. -func (in *EnvVarParentOverride) DeepCopy() *EnvVarParentOverride { - if in == nil { - return nil - } - out := new(EnvVarParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EnvVarPluginOverride) DeepCopyInto(out *EnvVarPluginOverride) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVarPluginOverride. -func (in *EnvVarPluginOverride) DeepCopy() *EnvVarPluginOverride { - if in == nil { - return nil - } - out := new(EnvVarPluginOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EnvVarPluginOverrideParentOverride) DeepCopyInto(out *EnvVarPluginOverrideParentOverride) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVarPluginOverrideParentOverride. -func (in *EnvVarPluginOverrideParentOverride) DeepCopy() *EnvVarPluginOverrideParentOverride { - if in == nil { - return nil - } - out := new(EnvVarPluginOverrideParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Events) DeepCopyInto(out *Events) { - *out = *in - in.DevWorkspaceEvents.DeepCopyInto(&out.DevWorkspaceEvents) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Events. -func (in *Events) DeepCopy() *Events { - if in == nil { - return nil - } - out := new(Events) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExecCommand) DeepCopyInto(out *ExecCommand) { - *out = *in - in.LabeledCommand.DeepCopyInto(&out.LabeledCommand) - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]EnvVar, len(*in)) - copy(*out, *in) - } - if in.HotReloadCapable != nil { - in, out := &in.HotReloadCapable, &out.HotReloadCapable - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCommand. -func (in *ExecCommand) DeepCopy() *ExecCommand { - if in == nil { - return nil - } - out := new(ExecCommand) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExecCommandParentOverride) DeepCopyInto(out *ExecCommandParentOverride) { - *out = *in - in.LabeledCommandParentOverride.DeepCopyInto(&out.LabeledCommandParentOverride) - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]EnvVarParentOverride, len(*in)) - copy(*out, *in) - } - if in.HotReloadCapable != nil { - in, out := &in.HotReloadCapable, &out.HotReloadCapable - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCommandParentOverride. -func (in *ExecCommandParentOverride) DeepCopy() *ExecCommandParentOverride { - if in == nil { - return nil - } - out := new(ExecCommandParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExecCommandPluginOverride) DeepCopyInto(out *ExecCommandPluginOverride) { - *out = *in - in.LabeledCommandPluginOverride.DeepCopyInto(&out.LabeledCommandPluginOverride) - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]EnvVarPluginOverride, len(*in)) - copy(*out, *in) - } - if in.HotReloadCapable != nil { - in, out := &in.HotReloadCapable, &out.HotReloadCapable - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCommandPluginOverride. -func (in *ExecCommandPluginOverride) DeepCopy() *ExecCommandPluginOverride { - if in == nil { - return nil - } - out := new(ExecCommandPluginOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExecCommandPluginOverrideParentOverride) DeepCopyInto(out *ExecCommandPluginOverrideParentOverride) { - *out = *in - in.LabeledCommandPluginOverrideParentOverride.DeepCopyInto(&out.LabeledCommandPluginOverrideParentOverride) - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]EnvVarPluginOverrideParentOverride, len(*in)) - copy(*out, *in) - } - if in.HotReloadCapable != nil { - in, out := &in.HotReloadCapable, &out.HotReloadCapable - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCommandPluginOverrideParentOverride. -func (in *ExecCommandPluginOverrideParentOverride) DeepCopy() *ExecCommandPluginOverrideParentOverride { - if in == nil { - return nil - } - out := new(ExecCommandPluginOverrideParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GitLikeProjectSource) DeepCopyInto(out *GitLikeProjectSource) { - *out = *in - out.CommonProjectSource = in.CommonProjectSource - if in.CheckoutFrom != nil { - in, out := &in.CheckoutFrom, &out.CheckoutFrom - *out = new(CheckoutFrom) - **out = **in - } - if in.Remotes != nil { - in, out := &in.Remotes, &out.Remotes - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitLikeProjectSource. -func (in *GitLikeProjectSource) DeepCopy() *GitLikeProjectSource { - if in == nil { - return nil - } - out := new(GitLikeProjectSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GitLikeProjectSourceParentOverride) DeepCopyInto(out *GitLikeProjectSourceParentOverride) { - *out = *in - out.CommonProjectSourceParentOverride = in.CommonProjectSourceParentOverride - if in.CheckoutFrom != nil { - in, out := &in.CheckoutFrom, &out.CheckoutFrom - *out = new(CheckoutFromParentOverride) - **out = **in - } - if in.Remotes != nil { - in, out := &in.Remotes, &out.Remotes - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitLikeProjectSourceParentOverride. -func (in *GitLikeProjectSourceParentOverride) DeepCopy() *GitLikeProjectSourceParentOverride { - if in == nil { - return nil - } - out := new(GitLikeProjectSourceParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GitLikeProjectSourcePluginOverride) DeepCopyInto(out *GitLikeProjectSourcePluginOverride) { - *out = *in - out.CommonProjectSourcePluginOverride = in.CommonProjectSourcePluginOverride - if in.CheckoutFrom != nil { - in, out := &in.CheckoutFrom, &out.CheckoutFrom - *out = new(CheckoutFromPluginOverride) - **out = **in - } - if in.Remotes != nil { - in, out := &in.Remotes, &out.Remotes - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitLikeProjectSourcePluginOverride. -func (in *GitLikeProjectSourcePluginOverride) DeepCopy() *GitLikeProjectSourcePluginOverride { - if in == nil { - return nil - } - out := new(GitLikeProjectSourcePluginOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GitLikeProjectSourcePluginOverrideParentOverride) DeepCopyInto(out *GitLikeProjectSourcePluginOverrideParentOverride) { - *out = *in - out.CommonProjectSourcePluginOverrideParentOverride = in.CommonProjectSourcePluginOverrideParentOverride - if in.CheckoutFrom != nil { - in, out := &in.CheckoutFrom, &out.CheckoutFrom - *out = new(CheckoutFromPluginOverrideParentOverride) - **out = **in - } - if in.Remotes != nil { - in, out := &in.Remotes, &out.Remotes - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitLikeProjectSourcePluginOverrideParentOverride. -func (in *GitLikeProjectSourcePluginOverrideParentOverride) DeepCopy() *GitLikeProjectSourcePluginOverrideParentOverride { - if in == nil { - return nil - } - out := new(GitLikeProjectSourcePluginOverrideParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GitProjectSource) DeepCopyInto(out *GitProjectSource) { - *out = *in - in.GitLikeProjectSource.DeepCopyInto(&out.GitLikeProjectSource) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitProjectSource. -func (in *GitProjectSource) DeepCopy() *GitProjectSource { - if in == nil { - return nil - } - out := new(GitProjectSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GitProjectSourceParentOverride) DeepCopyInto(out *GitProjectSourceParentOverride) { - *out = *in - in.GitLikeProjectSourceParentOverride.DeepCopyInto(&out.GitLikeProjectSourceParentOverride) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitProjectSourceParentOverride. -func (in *GitProjectSourceParentOverride) DeepCopy() *GitProjectSourceParentOverride { - if in == nil { - return nil - } - out := new(GitProjectSourceParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GitProjectSourcePluginOverride) DeepCopyInto(out *GitProjectSourcePluginOverride) { - *out = *in - in.GitLikeProjectSourcePluginOverride.DeepCopyInto(&out.GitLikeProjectSourcePluginOverride) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitProjectSourcePluginOverride. -func (in *GitProjectSourcePluginOverride) DeepCopy() *GitProjectSourcePluginOverride { - if in == nil { - return nil - } - out := new(GitProjectSourcePluginOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GitProjectSourcePluginOverrideParentOverride) DeepCopyInto(out *GitProjectSourcePluginOverrideParentOverride) { - *out = *in - in.GitLikeProjectSourcePluginOverrideParentOverride.DeepCopyInto(&out.GitLikeProjectSourcePluginOverrideParentOverride) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitProjectSourcePluginOverrideParentOverride. -func (in *GitProjectSourcePluginOverrideParentOverride) DeepCopy() *GitProjectSourcePluginOverrideParentOverride { - if in == nil { - return nil - } - out := new(GitProjectSourcePluginOverrideParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Image) DeepCopyInto(out *Image) { - *out = *in - in.ImageUnion.DeepCopyInto(&out.ImageUnion) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image. -func (in *Image) DeepCopy() *Image { - if in == nil { - return nil - } - out := new(Image) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageComponent) DeepCopyInto(out *ImageComponent) { - *out = *in - out.BaseComponent = in.BaseComponent - in.Image.DeepCopyInto(&out.Image) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageComponent. -func (in *ImageComponent) DeepCopy() *ImageComponent { - if in == nil { - return nil - } - out := new(ImageComponent) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageComponentParentOverride) DeepCopyInto(out *ImageComponentParentOverride) { - *out = *in - out.BaseComponentParentOverride = in.BaseComponentParentOverride - in.ImageParentOverride.DeepCopyInto(&out.ImageParentOverride) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageComponentParentOverride. -func (in *ImageComponentParentOverride) DeepCopy() *ImageComponentParentOverride { - if in == nil { - return nil - } - out := new(ImageComponentParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageComponentPluginOverride) DeepCopyInto(out *ImageComponentPluginOverride) { - *out = *in - out.BaseComponentPluginOverride = in.BaseComponentPluginOverride - in.ImagePluginOverride.DeepCopyInto(&out.ImagePluginOverride) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageComponentPluginOverride. -func (in *ImageComponentPluginOverride) DeepCopy() *ImageComponentPluginOverride { - if in == nil { - return nil - } - out := new(ImageComponentPluginOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageComponentPluginOverrideParentOverride) DeepCopyInto(out *ImageComponentPluginOverrideParentOverride) { - *out = *in - out.BaseComponentPluginOverrideParentOverride = in.BaseComponentPluginOverrideParentOverride - in.ImagePluginOverrideParentOverride.DeepCopyInto(&out.ImagePluginOverrideParentOverride) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageComponentPluginOverrideParentOverride. -func (in *ImageComponentPluginOverrideParentOverride) DeepCopy() *ImageComponentPluginOverrideParentOverride { - if in == nil { - return nil - } - out := new(ImageComponentPluginOverrideParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageParentOverride) DeepCopyInto(out *ImageParentOverride) { - *out = *in - in.ImageUnionParentOverride.DeepCopyInto(&out.ImageUnionParentOverride) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageParentOverride. -func (in *ImageParentOverride) DeepCopy() *ImageParentOverride { - if in == nil { - return nil - } - out := new(ImageParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImagePluginOverride) DeepCopyInto(out *ImagePluginOverride) { - *out = *in - in.ImageUnionPluginOverride.DeepCopyInto(&out.ImageUnionPluginOverride) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePluginOverride. -func (in *ImagePluginOverride) DeepCopy() *ImagePluginOverride { - if in == nil { - return nil - } - out := new(ImagePluginOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImagePluginOverrideParentOverride) DeepCopyInto(out *ImagePluginOverrideParentOverride) { - *out = *in - in.ImageUnionPluginOverrideParentOverride.DeepCopyInto(&out.ImageUnionPluginOverrideParentOverride) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePluginOverrideParentOverride. -func (in *ImagePluginOverrideParentOverride) DeepCopy() *ImagePluginOverrideParentOverride { - if in == nil { - return nil - } - out := new(ImagePluginOverrideParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageUnion) DeepCopyInto(out *ImageUnion) { - *out = *in - if in.Dockerfile != nil { - in, out := &in.Dockerfile, &out.Dockerfile - *out = new(DockerfileImage) - (*in).DeepCopyInto(*out) - } - if in.AutoBuild != nil { - in, out := &in.AutoBuild, &out.AutoBuild - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageUnion. -func (in *ImageUnion) DeepCopy() *ImageUnion { - if in == nil { - return nil - } - out := new(ImageUnion) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageUnionParentOverride) DeepCopyInto(out *ImageUnionParentOverride) { - *out = *in - if in.Dockerfile != nil { - in, out := &in.Dockerfile, &out.Dockerfile - *out = new(DockerfileImageParentOverride) - (*in).DeepCopyInto(*out) - } - if in.AutoBuild != nil { - in, out := &in.AutoBuild, &out.AutoBuild - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageUnionParentOverride. -func (in *ImageUnionParentOverride) DeepCopy() *ImageUnionParentOverride { - if in == nil { - return nil - } - out := new(ImageUnionParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageUnionPluginOverride) DeepCopyInto(out *ImageUnionPluginOverride) { - *out = *in - if in.Dockerfile != nil { - in, out := &in.Dockerfile, &out.Dockerfile - *out = new(DockerfileImagePluginOverride) - (*in).DeepCopyInto(*out) - } - if in.AutoBuild != nil { - in, out := &in.AutoBuild, &out.AutoBuild - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageUnionPluginOverride. -func (in *ImageUnionPluginOverride) DeepCopy() *ImageUnionPluginOverride { - if in == nil { - return nil - } - out := new(ImageUnionPluginOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageUnionPluginOverrideParentOverride) DeepCopyInto(out *ImageUnionPluginOverrideParentOverride) { - *out = *in - if in.Dockerfile != nil { - in, out := &in.Dockerfile, &out.Dockerfile - *out = new(DockerfileImagePluginOverrideParentOverride) - (*in).DeepCopyInto(*out) - } - if in.AutoBuild != nil { - in, out := &in.AutoBuild, &out.AutoBuild - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageUnionPluginOverrideParentOverride. -func (in *ImageUnionPluginOverrideParentOverride) DeepCopy() *ImageUnionPluginOverrideParentOverride { - if in == nil { - return nil - } - out := new(ImageUnionPluginOverrideParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImportReference) DeepCopyInto(out *ImportReference) { - *out = *in - in.ImportReferenceUnion.DeepCopyInto(&out.ImportReferenceUnion) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImportReference. -func (in *ImportReference) DeepCopy() *ImportReference { - if in == nil { - return nil - } - out := new(ImportReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImportReferenceParentOverride) DeepCopyInto(out *ImportReferenceParentOverride) { - *out = *in - in.ImportReferenceUnionParentOverride.DeepCopyInto(&out.ImportReferenceUnionParentOverride) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImportReferenceParentOverride. -func (in *ImportReferenceParentOverride) DeepCopy() *ImportReferenceParentOverride { - if in == nil { - return nil - } - out := new(ImportReferenceParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImportReferenceUnion) DeepCopyInto(out *ImportReferenceUnion) { - *out = *in - if in.Kubernetes != nil { - in, out := &in.Kubernetes, &out.Kubernetes - *out = new(KubernetesCustomResourceImportReference) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImportReferenceUnion. -func (in *ImportReferenceUnion) DeepCopy() *ImportReferenceUnion { - if in == nil { - return nil - } - out := new(ImportReferenceUnion) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImportReferenceUnionParentOverride) DeepCopyInto(out *ImportReferenceUnionParentOverride) { - *out = *in - if in.Kubernetes != nil { - in, out := &in.Kubernetes, &out.Kubernetes - *out = new(KubernetesCustomResourceImportReferenceParentOverride) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImportReferenceUnionParentOverride. -func (in *ImportReferenceUnionParentOverride) DeepCopy() *ImportReferenceUnionParentOverride { - if in == nil { - return nil - } - out := new(ImportReferenceUnionParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *K8sLikeComponent) DeepCopyInto(out *K8sLikeComponent) { - *out = *in - out.BaseComponent = in.BaseComponent - out.K8sLikeComponentLocation = in.K8sLikeComponentLocation - if in.DeployByDefault != nil { - in, out := &in.DeployByDefault, &out.DeployByDefault - *out = new(bool) - **out = **in - } - if in.Endpoints != nil { - in, out := &in.Endpoints, &out.Endpoints - *out = make([]Endpoint, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K8sLikeComponent. -func (in *K8sLikeComponent) DeepCopy() *K8sLikeComponent { - if in == nil { - return nil - } - out := new(K8sLikeComponent) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *K8sLikeComponentLocation) DeepCopyInto(out *K8sLikeComponentLocation) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K8sLikeComponentLocation. -func (in *K8sLikeComponentLocation) DeepCopy() *K8sLikeComponentLocation { - if in == nil { - return nil - } - out := new(K8sLikeComponentLocation) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *K8sLikeComponentLocationParentOverride) DeepCopyInto(out *K8sLikeComponentLocationParentOverride) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K8sLikeComponentLocationParentOverride. -func (in *K8sLikeComponentLocationParentOverride) DeepCopy() *K8sLikeComponentLocationParentOverride { - if in == nil { - return nil - } - out := new(K8sLikeComponentLocationParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *K8sLikeComponentLocationPluginOverride) DeepCopyInto(out *K8sLikeComponentLocationPluginOverride) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K8sLikeComponentLocationPluginOverride. -func (in *K8sLikeComponentLocationPluginOverride) DeepCopy() *K8sLikeComponentLocationPluginOverride { - if in == nil { - return nil - } - out := new(K8sLikeComponentLocationPluginOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *K8sLikeComponentLocationPluginOverrideParentOverride) DeepCopyInto(out *K8sLikeComponentLocationPluginOverrideParentOverride) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K8sLikeComponentLocationPluginOverrideParentOverride. -func (in *K8sLikeComponentLocationPluginOverrideParentOverride) DeepCopy() *K8sLikeComponentLocationPluginOverrideParentOverride { - if in == nil { - return nil - } - out := new(K8sLikeComponentLocationPluginOverrideParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *K8sLikeComponentParentOverride) DeepCopyInto(out *K8sLikeComponentParentOverride) { - *out = *in - out.BaseComponentParentOverride = in.BaseComponentParentOverride - out.K8sLikeComponentLocationParentOverride = in.K8sLikeComponentLocationParentOverride - if in.DeployByDefault != nil { - in, out := &in.DeployByDefault, &out.DeployByDefault - *out = new(bool) - **out = **in - } - if in.Endpoints != nil { - in, out := &in.Endpoints, &out.Endpoints - *out = make([]EndpointParentOverride, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K8sLikeComponentParentOverride. -func (in *K8sLikeComponentParentOverride) DeepCopy() *K8sLikeComponentParentOverride { - if in == nil { - return nil - } - out := new(K8sLikeComponentParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *K8sLikeComponentPluginOverride) DeepCopyInto(out *K8sLikeComponentPluginOverride) { - *out = *in - out.BaseComponentPluginOverride = in.BaseComponentPluginOverride - out.K8sLikeComponentLocationPluginOverride = in.K8sLikeComponentLocationPluginOverride - if in.DeployByDefault != nil { - in, out := &in.DeployByDefault, &out.DeployByDefault - *out = new(bool) - **out = **in - } - if in.Endpoints != nil { - in, out := &in.Endpoints, &out.Endpoints - *out = make([]EndpointPluginOverride, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K8sLikeComponentPluginOverride. -func (in *K8sLikeComponentPluginOverride) DeepCopy() *K8sLikeComponentPluginOverride { - if in == nil { - return nil - } - out := new(K8sLikeComponentPluginOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *K8sLikeComponentPluginOverrideParentOverride) DeepCopyInto(out *K8sLikeComponentPluginOverrideParentOverride) { - *out = *in - out.BaseComponentPluginOverrideParentOverride = in.BaseComponentPluginOverrideParentOverride - out.K8sLikeComponentLocationPluginOverrideParentOverride = in.K8sLikeComponentLocationPluginOverrideParentOverride - if in.DeployByDefault != nil { - in, out := &in.DeployByDefault, &out.DeployByDefault - *out = new(bool) - **out = **in - } - if in.Endpoints != nil { - in, out := &in.Endpoints, &out.Endpoints - *out = make([]EndpointPluginOverrideParentOverride, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new K8sLikeComponentPluginOverrideParentOverride. -func (in *K8sLikeComponentPluginOverrideParentOverride) DeepCopy() *K8sLikeComponentPluginOverrideParentOverride { - if in == nil { - return nil - } - out := new(K8sLikeComponentPluginOverrideParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubernetesComponent) DeepCopyInto(out *KubernetesComponent) { - *out = *in - in.K8sLikeComponent.DeepCopyInto(&out.K8sLikeComponent) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesComponent. -func (in *KubernetesComponent) DeepCopy() *KubernetesComponent { - if in == nil { - return nil - } - out := new(KubernetesComponent) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubernetesComponentParentOverride) DeepCopyInto(out *KubernetesComponentParentOverride) { - *out = *in - in.K8sLikeComponentParentOverride.DeepCopyInto(&out.K8sLikeComponentParentOverride) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesComponentParentOverride. -func (in *KubernetesComponentParentOverride) DeepCopy() *KubernetesComponentParentOverride { - if in == nil { - return nil - } - out := new(KubernetesComponentParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubernetesComponentPluginOverride) DeepCopyInto(out *KubernetesComponentPluginOverride) { - *out = *in - in.K8sLikeComponentPluginOverride.DeepCopyInto(&out.K8sLikeComponentPluginOverride) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesComponentPluginOverride. -func (in *KubernetesComponentPluginOverride) DeepCopy() *KubernetesComponentPluginOverride { - if in == nil { - return nil - } - out := new(KubernetesComponentPluginOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubernetesComponentPluginOverrideParentOverride) DeepCopyInto(out *KubernetesComponentPluginOverrideParentOverride) { - *out = *in - in.K8sLikeComponentPluginOverrideParentOverride.DeepCopyInto(&out.K8sLikeComponentPluginOverrideParentOverride) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesComponentPluginOverrideParentOverride. -func (in *KubernetesComponentPluginOverrideParentOverride) DeepCopy() *KubernetesComponentPluginOverrideParentOverride { - if in == nil { - return nil - } - out := new(KubernetesComponentPluginOverrideParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubernetesCustomResourceImportReference) DeepCopyInto(out *KubernetesCustomResourceImportReference) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesCustomResourceImportReference. -func (in *KubernetesCustomResourceImportReference) DeepCopy() *KubernetesCustomResourceImportReference { - if in == nil { - return nil - } - out := new(KubernetesCustomResourceImportReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubernetesCustomResourceImportReferenceParentOverride) DeepCopyInto(out *KubernetesCustomResourceImportReferenceParentOverride) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesCustomResourceImportReferenceParentOverride. -func (in *KubernetesCustomResourceImportReferenceParentOverride) DeepCopy() *KubernetesCustomResourceImportReferenceParentOverride { - if in == nil { - return nil - } - out := new(KubernetesCustomResourceImportReferenceParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LabeledCommand) DeepCopyInto(out *LabeledCommand) { - *out = *in - in.BaseCommand.DeepCopyInto(&out.BaseCommand) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabeledCommand. -func (in *LabeledCommand) DeepCopy() *LabeledCommand { - if in == nil { - return nil - } - out := new(LabeledCommand) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LabeledCommandParentOverride) DeepCopyInto(out *LabeledCommandParentOverride) { - *out = *in - in.BaseCommandParentOverride.DeepCopyInto(&out.BaseCommandParentOverride) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabeledCommandParentOverride. -func (in *LabeledCommandParentOverride) DeepCopy() *LabeledCommandParentOverride { - if in == nil { - return nil - } - out := new(LabeledCommandParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LabeledCommandPluginOverride) DeepCopyInto(out *LabeledCommandPluginOverride) { - *out = *in - in.BaseCommandPluginOverride.DeepCopyInto(&out.BaseCommandPluginOverride) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabeledCommandPluginOverride. -func (in *LabeledCommandPluginOverride) DeepCopy() *LabeledCommandPluginOverride { - if in == nil { - return nil - } - out := new(LabeledCommandPluginOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LabeledCommandPluginOverrideParentOverride) DeepCopyInto(out *LabeledCommandPluginOverrideParentOverride) { - *out = *in - in.BaseCommandPluginOverrideParentOverride.DeepCopyInto(&out.BaseCommandPluginOverrideParentOverride) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabeledCommandPluginOverrideParentOverride. -func (in *LabeledCommandPluginOverrideParentOverride) DeepCopy() *LabeledCommandPluginOverrideParentOverride { - if in == nil { - return nil - } - out := new(LabeledCommandPluginOverrideParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OpenshiftComponent) DeepCopyInto(out *OpenshiftComponent) { - *out = *in - in.K8sLikeComponent.DeepCopyInto(&out.K8sLikeComponent) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenshiftComponent. -func (in *OpenshiftComponent) DeepCopy() *OpenshiftComponent { - if in == nil { - return nil - } - out := new(OpenshiftComponent) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OpenshiftComponentParentOverride) DeepCopyInto(out *OpenshiftComponentParentOverride) { - *out = *in - in.K8sLikeComponentParentOverride.DeepCopyInto(&out.K8sLikeComponentParentOverride) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenshiftComponentParentOverride. -func (in *OpenshiftComponentParentOverride) DeepCopy() *OpenshiftComponentParentOverride { - if in == nil { - return nil - } - out := new(OpenshiftComponentParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OpenshiftComponentPluginOverride) DeepCopyInto(out *OpenshiftComponentPluginOverride) { - *out = *in - in.K8sLikeComponentPluginOverride.DeepCopyInto(&out.K8sLikeComponentPluginOverride) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenshiftComponentPluginOverride. -func (in *OpenshiftComponentPluginOverride) DeepCopy() *OpenshiftComponentPluginOverride { - if in == nil { - return nil - } - out := new(OpenshiftComponentPluginOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OpenshiftComponentPluginOverrideParentOverride) DeepCopyInto(out *OpenshiftComponentPluginOverrideParentOverride) { - *out = *in - in.K8sLikeComponentPluginOverrideParentOverride.DeepCopyInto(&out.K8sLikeComponentPluginOverrideParentOverride) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenshiftComponentPluginOverrideParentOverride. -func (in *OpenshiftComponentPluginOverrideParentOverride) DeepCopy() *OpenshiftComponentPluginOverrideParentOverride { - if in == nil { - return nil - } - out := new(OpenshiftComponentPluginOverrideParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OverrideDirective) DeepCopyInto(out *OverrideDirective) { - *out = *in - if in.DeleteFromPrimitiveList != nil { - in, out := &in.DeleteFromPrimitiveList, &out.DeleteFromPrimitiveList - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SetElementOrder != nil { - in, out := &in.SetElementOrder, &out.SetElementOrder - *out = make([]string, len(*in)) - copy(*out, *in) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverrideDirective. -func (in *OverrideDirective) DeepCopy() *OverrideDirective { - if in == nil { - return nil - } - out := new(OverrideDirective) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OverridesBase) DeepCopyInto(out *OverridesBase) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverridesBase. -func (in *OverridesBase) DeepCopy() *OverridesBase { - if in == nil { - return nil - } - out := new(OverridesBase) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OverridesBaseParentOverride) DeepCopyInto(out *OverridesBaseParentOverride) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OverridesBaseParentOverride. -func (in *OverridesBaseParentOverride) DeepCopy() *OverridesBaseParentOverride { - if in == nil { - return nil - } - out := new(OverridesBaseParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Parent) DeepCopyInto(out *Parent) { - *out = *in - in.ImportReference.DeepCopyInto(&out.ImportReference) - in.ParentOverrides.DeepCopyInto(&out.ParentOverrides) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Parent. -func (in *Parent) DeepCopy() *Parent { - if in == nil { - return nil - } - out := new(Parent) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ParentOverrides) DeepCopyInto(out *ParentOverrides) { - *out = *in - out.OverridesBase = in.OverridesBase - if in.Variables != nil { - in, out := &in.Variables, &out.Variables - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Attributes != nil { - in, out := &in.Attributes, &out.Attributes - *out = make(attributes.Attributes, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - if in.Components != nil { - in, out := &in.Components, &out.Components - *out = make([]ComponentParentOverride, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Projects != nil { - in, out := &in.Projects, &out.Projects - *out = make([]ProjectParentOverride, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.StarterProjects != nil { - in, out := &in.StarterProjects, &out.StarterProjects - *out = make([]StarterProjectParentOverride, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Commands != nil { - in, out := &in.Commands, &out.Commands - *out = make([]CommandParentOverride, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParentOverrides. -func (in *ParentOverrides) DeepCopy() *ParentOverrides { - if in == nil { - return nil - } - out := new(ParentOverrides) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PluginComponent) DeepCopyInto(out *PluginComponent) { - *out = *in - out.BaseComponent = in.BaseComponent - in.ImportReference.DeepCopyInto(&out.ImportReference) - in.PluginOverrides.DeepCopyInto(&out.PluginOverrides) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginComponent. -func (in *PluginComponent) DeepCopy() *PluginComponent { - if in == nil { - return nil - } - out := new(PluginComponent) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PluginComponentParentOverride) DeepCopyInto(out *PluginComponentParentOverride) { - *out = *in - out.BaseComponentParentOverride = in.BaseComponentParentOverride - in.ImportReferenceParentOverride.DeepCopyInto(&out.ImportReferenceParentOverride) - in.PluginOverridesParentOverride.DeepCopyInto(&out.PluginOverridesParentOverride) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginComponentParentOverride. -func (in *PluginComponentParentOverride) DeepCopy() *PluginComponentParentOverride { - if in == nil { - return nil - } - out := new(PluginComponentParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PluginOverrides) DeepCopyInto(out *PluginOverrides) { - *out = *in - out.OverridesBase = in.OverridesBase - if in.Components != nil { - in, out := &in.Components, &out.Components - *out = make([]ComponentPluginOverride, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Commands != nil { - in, out := &in.Commands, &out.Commands - *out = make([]CommandPluginOverride, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginOverrides. -func (in *PluginOverrides) DeepCopy() *PluginOverrides { - if in == nil { - return nil - } - out := new(PluginOverrides) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PluginOverridesParentOverride) DeepCopyInto(out *PluginOverridesParentOverride) { - *out = *in - out.OverridesBaseParentOverride = in.OverridesBaseParentOverride - if in.Components != nil { - in, out := &in.Components, &out.Components - *out = make([]ComponentPluginOverrideParentOverride, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Commands != nil { - in, out := &in.Commands, &out.Commands - *out = make([]CommandPluginOverrideParentOverride, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginOverridesParentOverride. -func (in *PluginOverridesParentOverride) DeepCopy() *PluginOverridesParentOverride { - if in == nil { - return nil - } - out := new(PluginOverridesParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Project) DeepCopyInto(out *Project) { - *out = *in - if in.Attributes != nil { - in, out := &in.Attributes, &out.Attributes - *out = make(attributes.Attributes, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - in.ProjectSource.DeepCopyInto(&out.ProjectSource) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Project. -func (in *Project) DeepCopy() *Project { - if in == nil { - return nil - } - out := new(Project) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProjectParentOverride) DeepCopyInto(out *ProjectParentOverride) { - *out = *in - if in.Attributes != nil { - in, out := &in.Attributes, &out.Attributes - *out = make(attributes.Attributes, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - in.ProjectSourceParentOverride.DeepCopyInto(&out.ProjectSourceParentOverride) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectParentOverride. -func (in *ProjectParentOverride) DeepCopy() *ProjectParentOverride { - if in == nil { - return nil - } - out := new(ProjectParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProjectSource) DeepCopyInto(out *ProjectSource) { - *out = *in - if in.Git != nil { - in, out := &in.Git, &out.Git - *out = new(GitProjectSource) - (*in).DeepCopyInto(*out) - } - if in.Zip != nil { - in, out := &in.Zip, &out.Zip - *out = new(ZipProjectSource) - **out = **in - } - if in.Custom != nil { - in, out := &in.Custom, &out.Custom - *out = new(CustomProjectSource) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectSource. -func (in *ProjectSource) DeepCopy() *ProjectSource { - if in == nil { - return nil - } - out := new(ProjectSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProjectSourceParentOverride) DeepCopyInto(out *ProjectSourceParentOverride) { - *out = *in - if in.Git != nil { - in, out := &in.Git, &out.Git - *out = new(GitProjectSourceParentOverride) - (*in).DeepCopyInto(*out) - } - if in.Zip != nil { - in, out := &in.Zip, &out.Zip - *out = new(ZipProjectSourceParentOverride) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectSourceParentOverride. -func (in *ProjectSourceParentOverride) DeepCopy() *ProjectSourceParentOverride { - if in == nil { - return nil - } - out := new(ProjectSourceParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StarterProject) DeepCopyInto(out *StarterProject) { - *out = *in - if in.Attributes != nil { - in, out := &in.Attributes, &out.Attributes - *out = make(attributes.Attributes, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - in.ProjectSource.DeepCopyInto(&out.ProjectSource) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StarterProject. -func (in *StarterProject) DeepCopy() *StarterProject { - if in == nil { - return nil - } - out := new(StarterProject) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StarterProjectParentOverride) DeepCopyInto(out *StarterProjectParentOverride) { - *out = *in - if in.Attributes != nil { - in, out := &in.Attributes, &out.Attributes - *out = make(attributes.Attributes, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - in.ProjectSourceParentOverride.DeepCopyInto(&out.ProjectSourceParentOverride) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StarterProjectParentOverride. -func (in *StarterProjectParentOverride) DeepCopy() *StarterProjectParentOverride { - if in == nil { - return nil - } - out := new(StarterProjectParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Volume) DeepCopyInto(out *Volume) { - *out = *in - if in.Ephemeral != nil { - in, out := &in.Ephemeral, &out.Ephemeral - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Volume. -func (in *Volume) DeepCopy() *Volume { - if in == nil { - return nil - } - out := new(Volume) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeComponent) DeepCopyInto(out *VolumeComponent) { - *out = *in - out.BaseComponent = in.BaseComponent - in.Volume.DeepCopyInto(&out.Volume) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeComponent. -func (in *VolumeComponent) DeepCopy() *VolumeComponent { - if in == nil { - return nil - } - out := new(VolumeComponent) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeComponentParentOverride) DeepCopyInto(out *VolumeComponentParentOverride) { - *out = *in - out.BaseComponentParentOverride = in.BaseComponentParentOverride - in.VolumeParentOverride.DeepCopyInto(&out.VolumeParentOverride) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeComponentParentOverride. -func (in *VolumeComponentParentOverride) DeepCopy() *VolumeComponentParentOverride { - if in == nil { - return nil - } - out := new(VolumeComponentParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeComponentPluginOverride) DeepCopyInto(out *VolumeComponentPluginOverride) { - *out = *in - out.BaseComponentPluginOverride = in.BaseComponentPluginOverride - in.VolumePluginOverride.DeepCopyInto(&out.VolumePluginOverride) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeComponentPluginOverride. -func (in *VolumeComponentPluginOverride) DeepCopy() *VolumeComponentPluginOverride { - if in == nil { - return nil - } - out := new(VolumeComponentPluginOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeComponentPluginOverrideParentOverride) DeepCopyInto(out *VolumeComponentPluginOverrideParentOverride) { - *out = *in - out.BaseComponentPluginOverrideParentOverride = in.BaseComponentPluginOverrideParentOverride - in.VolumePluginOverrideParentOverride.DeepCopyInto(&out.VolumePluginOverrideParentOverride) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeComponentPluginOverrideParentOverride. -func (in *VolumeComponentPluginOverrideParentOverride) DeepCopy() *VolumeComponentPluginOverrideParentOverride { - if in == nil { - return nil - } - out := new(VolumeComponentPluginOverrideParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeMount) DeepCopyInto(out *VolumeMount) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeMount. -func (in *VolumeMount) DeepCopy() *VolumeMount { - if in == nil { - return nil - } - out := new(VolumeMount) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeMountParentOverride) DeepCopyInto(out *VolumeMountParentOverride) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeMountParentOverride. -func (in *VolumeMountParentOverride) DeepCopy() *VolumeMountParentOverride { - if in == nil { - return nil - } - out := new(VolumeMountParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeMountPluginOverride) DeepCopyInto(out *VolumeMountPluginOverride) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeMountPluginOverride. -func (in *VolumeMountPluginOverride) DeepCopy() *VolumeMountPluginOverride { - if in == nil { - return nil - } - out := new(VolumeMountPluginOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeMountPluginOverrideParentOverride) DeepCopyInto(out *VolumeMountPluginOverrideParentOverride) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeMountPluginOverrideParentOverride. -func (in *VolumeMountPluginOverrideParentOverride) DeepCopy() *VolumeMountPluginOverrideParentOverride { - if in == nil { - return nil - } - out := new(VolumeMountPluginOverrideParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeParentOverride) DeepCopyInto(out *VolumeParentOverride) { - *out = *in - if in.Ephemeral != nil { - in, out := &in.Ephemeral, &out.Ephemeral - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeParentOverride. -func (in *VolumeParentOverride) DeepCopy() *VolumeParentOverride { - if in == nil { - return nil - } - out := new(VolumeParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumePluginOverride) DeepCopyInto(out *VolumePluginOverride) { - *out = *in - if in.Ephemeral != nil { - in, out := &in.Ephemeral, &out.Ephemeral - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumePluginOverride. -func (in *VolumePluginOverride) DeepCopy() *VolumePluginOverride { - if in == nil { - return nil - } - out := new(VolumePluginOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumePluginOverrideParentOverride) DeepCopyInto(out *VolumePluginOverrideParentOverride) { - *out = *in - if in.Ephemeral != nil { - in, out := &in.Ephemeral, &out.Ephemeral - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumePluginOverrideParentOverride. -func (in *VolumePluginOverrideParentOverride) DeepCopy() *VolumePluginOverrideParentOverride { - if in == nil { - return nil - } - out := new(VolumePluginOverrideParentOverride) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ZipProjectSource) DeepCopyInto(out *ZipProjectSource) { - *out = *in - out.CommonProjectSource = in.CommonProjectSource -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZipProjectSource. -func (in *ZipProjectSource) DeepCopy() *ZipProjectSource { - if in == nil { - return nil - } - out := new(ZipProjectSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ZipProjectSourceParentOverride) DeepCopyInto(out *ZipProjectSourceParentOverride) { - *out = *in - out.CommonProjectSourceParentOverride = in.CommonProjectSourceParentOverride -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ZipProjectSourceParentOverride. -func (in *ZipProjectSourceParentOverride) DeepCopy() *ZipProjectSourceParentOverride { - if in == nil { - return nil - } - out := new(ZipProjectSourceParentOverride) - in.DeepCopyInto(out) - return out -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.getters.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.getters.go deleted file mode 100644 index e6a5a0ac744..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.getters.go +++ /dev/null @@ -1,53 +0,0 @@ -package v1alpha2 - -// GetIsDefault returns the value of the boolean property. If unset, it's the default value specified in the devfile:default:value marker -func (in *CommandGroup) GetIsDefault() bool { - return getBoolOrDefault(in.IsDefault, false) -} - -// GetHotReloadCapable returns the value of the boolean property. If unset, it's the default value specified in the devfile:default:value marker -func (in *ExecCommand) GetHotReloadCapable() bool { - return getBoolOrDefault(in.HotReloadCapable, false) -} - -// GetParallel returns the value of the boolean property. If unset, it's the default value specified in the devfile:default:value marker -func (in *CompositeCommand) GetParallel() bool { - return getBoolOrDefault(in.Parallel, false) -} - -// GetDedicatedPod returns the value of the boolean property. If unset, it's the default value specified in the devfile:default:value marker -func (in *Container) GetDedicatedPod() bool { - return getBoolOrDefault(in.DedicatedPod, false) -} - -// GetAutoBuild returns the value of the boolean property. If unset, it's the default value specified in the devfile:default:value marker -func (in *ImageUnion) GetAutoBuild() bool { - return getBoolOrDefault(in.AutoBuild, false) -} - -// GetRootRequired returns the value of the boolean property. If unset, it's the default value specified in the devfile:default:value marker -func (in *Dockerfile) GetRootRequired() bool { - return getBoolOrDefault(in.RootRequired, false) -} - -// GetDeployByDefault returns the value of the boolean property. If unset, it's the default value specified in the devfile:default:value marker -func (in *K8sLikeComponent) GetDeployByDefault() bool { - return getBoolOrDefault(in.DeployByDefault, false) -} - -// GetEphemeral returns the value of the boolean property. If unset, it's the default value specified in the devfile:default:value marker -func (in *Volume) GetEphemeral() bool { - return getBoolOrDefault(in.Ephemeral, false) -} - -// GetSecure returns the value of the boolean property. If unset, it's the default value specified in the devfile:default:value marker -func (in *Endpoint) GetSecure() bool { - return getBoolOrDefault(in.Secure, false) -} - -func getBoolOrDefault(input *bool, defaultVal bool) bool { - if input != nil { - return *input - } - return defaultVal -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.keyed_definitions.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.keyed_definitions.go deleted file mode 100644 index 27a85e013aa..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.keyed_definitions.go +++ /dev/null @@ -1,49 +0,0 @@ -package v1alpha2 - -func (keyed Component) Key() string { - return keyed.Name -} - -func (keyed Project) Key() string { - return keyed.Name -} - -func (keyed StarterProject) Key() string { - return keyed.Name -} - -func (keyed Command) Key() string { - return keyed.Id -} - -func (keyed ComponentParentOverride) Key() string { - return keyed.Name -} - -func (keyed ProjectParentOverride) Key() string { - return keyed.Name -} - -func (keyed StarterProjectParentOverride) Key() string { - return keyed.Name -} - -func (keyed CommandParentOverride) Key() string { - return keyed.Id -} - -func (keyed ComponentPluginOverrideParentOverride) Key() string { - return keyed.Name -} - -func (keyed CommandPluginOverrideParentOverride) Key() string { - return keyed.Id -} - -func (keyed ComponentPluginOverride) Key() string { - return keyed.Name -} - -func (keyed CommandPluginOverride) Key() string { - return keyed.Id -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.parent_overrides.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.parent_overrides.go deleted file mode 100644 index af9775945b6..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.parent_overrides.go +++ /dev/null @@ -1,1492 +0,0 @@ -package v1alpha2 - -import ( - attributes "github.com/devfile/api/v2/pkg/attributes" -) - -// +devfile:jsonschema:generate -type ParentOverrides struct { - OverridesBase `json:",inline"` - - // Overrides of variables encapsulated in a parent devfile. - // Overriding is done according to K8S strategic merge patch standard rules. - // +optional - // +patchStrategy=merge - Variables map[string]string `json:"variables,omitempty" patchStrategy:"merge"` - - // Overrides of attributes encapsulated in a parent devfile. - // Overriding is done according to K8S strategic merge patch standard rules. - // +optional - // +patchStrategy=merge - // +kubebuilder:validation:Type=object - // +kubebuilder:pruning:PreserveUnknownFields - // +kubebuilder:validation:Schemaless - Attributes attributes.Attributes `json:"attributes,omitempty" patchStrategy:"merge"` - - // Overrides of components encapsulated in a parent devfile or a plugin. - // Overriding is done according to K8S strategic merge patch standard rules. - // +optional - // +patchMergeKey=name - // +patchStrategy=merge - // +devfile:toplevellist - Components []ComponentParentOverride `json:"components,omitempty" patchStrategy:"merge" patchMergeKey:"name"` - - // Overrides of projects encapsulated in a parent devfile. - // Overriding is done according to K8S strategic merge patch standard rules. - // +optional - // +patchMergeKey=name - // +patchStrategy=merge - // +devfile:toplevellist - Projects []ProjectParentOverride `json:"projects,omitempty" patchStrategy:"merge" patchMergeKey:"name"` - - // Overrides of starterProjects encapsulated in a parent devfile. - // Overriding is done according to K8S strategic merge patch standard rules. - // +optional - // +patchMergeKey=name - // +patchStrategy=merge - // +devfile:toplevellist - StarterProjects []StarterProjectParentOverride `json:"starterProjects,omitempty" patchStrategy:"merge" patchMergeKey:"name"` - - // Overrides of commands encapsulated in a parent devfile or a plugin. - // Overriding is done according to K8S strategic merge patch standard rules. - // +optional - // +patchMergeKey=id - // +patchStrategy=merge - // +devfile:toplevellist - Commands []CommandParentOverride `json:"commands,omitempty" patchStrategy:"merge" patchMergeKey:"id"` -} - -//+k8s:openapi-gen=true -type ComponentParentOverride struct { - - // Mandatory name that allows referencing the component - // from other elements (such as commands) or from an external - // devfile that may reference this component through a parent or a plugin. - // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ - // +kubebuilder:validation:MaxLength=63 - Name string `json:"name"` - - // Map of implementation-dependant free-form YAML attributes. - // +optional - // +kubebuilder:validation:Type=object - // +kubebuilder:pruning:PreserveUnknownFields - // +kubebuilder:validation:Schemaless - Attributes attributes.Attributes `json:"attributes,omitempty"` - ComponentUnionParentOverride `json:",inline"` -} - -type ProjectParentOverride struct { - - // Project name - // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ - // +kubebuilder:validation:MaxLength=63 - Name string `json:"name"` - - // Map of implementation-dependant free-form YAML attributes. - // +optional - // +kubebuilder:validation:Type=object - // +kubebuilder:pruning:PreserveUnknownFields - // +kubebuilder:validation:Schemaless - Attributes attributes.Attributes `json:"attributes,omitempty"` - - // Path relative to the root of the projects to which this project should be cloned into. This is a unix-style relative path (i.e. uses forward slashes). The path is invalid if it is absolute or tries to escape the project root through the usage of '..'. If not specified, defaults to the project name. - // +optional - ClonePath string `json:"clonePath,omitempty"` - - ProjectSourceParentOverride `json:",inline"` -} - -type StarterProjectParentOverride struct { - - // Project name - // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ - // +kubebuilder:validation:MaxLength=63 - Name string `json:"name"` - - // Map of implementation-dependant free-form YAML attributes. - // +optional - // +kubebuilder:validation:Type=object - // +kubebuilder:pruning:PreserveUnknownFields - // +kubebuilder:validation:Schemaless - Attributes attributes.Attributes `json:"attributes,omitempty"` - - // Description of a starter project - // +optional - Description string `json:"description,omitempty"` - - // Sub-directory from a starter project to be used as root for starter project. - // +optional - SubDir string `json:"subDir,omitempty"` - - ProjectSourceParentOverride `json:",inline"` -} - -type CommandParentOverride struct { - - // Mandatory identifier that allows referencing - // this command in composite commands, from - // a parent, or in events. - // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ - // +kubebuilder:validation:MaxLength=63 - Id string `json:"id"` - - // Map of implementation-dependant free-form YAML attributes. - // +optional - // +kubebuilder:validation:Type=object - // +kubebuilder:pruning:PreserveUnknownFields - // +kubebuilder:validation:Schemaless - Attributes attributes.Attributes `json:"attributes,omitempty"` - CommandUnionParentOverride `json:",inline"` -} - -// +union -type ComponentUnionParentOverride struct { - - // +kubebuilder:validation:Enum=Container;Kubernetes;Openshift;Volume;Image;Plugin - // Type of component - // - // +unionDiscriminator - // +optional - ComponentType ComponentTypeParentOverride `json:"componentType,omitempty"` - - // Allows adding and configuring devworkspace-related containers - // +optional - Container *ContainerComponentParentOverride `json:"container,omitempty"` - - // Allows importing into the devworkspace the Kubernetes resources - // defined in a given manifest. For example this allows reusing the Kubernetes - // definitions used to deploy some runtime components in production. - // - // +optional - Kubernetes *KubernetesComponentParentOverride `json:"kubernetes,omitempty"` - - // Allows importing into the devworkspace the OpenShift resources - // defined in a given manifest. For example this allows reusing the OpenShift - // definitions used to deploy some runtime components in production. - // - // +optional - Openshift *OpenshiftComponentParentOverride `json:"openshift,omitempty"` - - // Allows specifying the definition of a volume - // shared by several other components - // +optional - Volume *VolumeComponentParentOverride `json:"volume,omitempty"` - - // Allows specifying the definition of an image for outer loop builds - // +optional - Image *ImageComponentParentOverride `json:"image,omitempty"` - - // Allows importing a plugin. - // - // Plugins are mainly imported devfiles that contribute components, commands - // and events as a consistent single unit. They are defined in either YAML files - // following the devfile syntax, - // or as `DevWorkspaceTemplate` Kubernetes Custom Resources - // +optional - // +devfile:overrides:include:omitInPlugin=true - Plugin *PluginComponentParentOverride `json:"plugin,omitempty"` -} - -// +union -type ProjectSourceParentOverride struct { - - // +kubebuilder:validation:Enum=Git;Zip - // Type of project source - // + - // +unionDiscriminator - // +optional - SourceType ProjectSourceTypeParentOverride `json:"sourceType,omitempty"` - - // Project's Git source - // +optional - Git *GitProjectSourceParentOverride `json:"git,omitempty"` - - // Project's Zip source - // +optional - Zip *ZipProjectSourceParentOverride `json:"zip,omitempty"` -} - -// +union -type CommandUnionParentOverride struct { - - // +kubebuilder:validation:Enum=Exec;Apply;Composite - // Type of devworkspace command - // +unionDiscriminator - // +optional - CommandType CommandTypeParentOverride `json:"commandType,omitempty"` - - // CLI Command executed in an existing component container - // +optional - Exec *ExecCommandParentOverride `json:"exec,omitempty"` - - // Command that consists in applying a given component definition, - // typically bound to a devworkspace event. - // - // For example, when an `apply` command is bound to a `preStart` event, - // and references a `container` component, it will start the container as a - // K8S initContainer in the devworkspace POD, unless the component has its - // `dedicatedPod` field set to `true`. - // - // When no `apply` command exist for a given component, - // it is assumed the component will be applied at devworkspace start - // by default, unless `deployByDefault` for that component is set to false. - // +optional - Apply *ApplyCommandParentOverride `json:"apply,omitempty"` - - // Composite command that allows executing several sub-commands - // either sequentially or concurrently - // +optional - Composite *CompositeCommandParentOverride `json:"composite,omitempty"` -} - -// ComponentType describes the type of component. -// Only one of the following component type may be specified. -type ComponentTypeParentOverride string - -// Component that allows the developer to add a configured container into their devworkspace -type ContainerComponentParentOverride struct { - BaseComponentParentOverride `json:",inline"` - ContainerParentOverride `json:",inline"` - Endpoints []EndpointParentOverride `json:"endpoints,omitempty" patchStrategy:"merge" patchMergeKey:"name"` -} - -// Component that allows partly importing Kubernetes resources into the devworkspace POD -type KubernetesComponentParentOverride struct { - K8sLikeComponentParentOverride `json:",inline"` -} - -// Component that allows partly importing Openshift resources into the devworkspace POD -type OpenshiftComponentParentOverride struct { - K8sLikeComponentParentOverride `json:",inline"` -} - -// Component that allows the developer to declare and configure a volume into their devworkspace -type VolumeComponentParentOverride struct { - BaseComponentParentOverride `json:",inline"` - VolumeParentOverride `json:",inline"` -} - -// Component that allows the developer to build a runtime image for outerloop -type ImageComponentParentOverride struct { - BaseComponentParentOverride `json:",inline"` - ImageParentOverride `json:",inline"` -} - -type PluginComponentParentOverride struct { - BaseComponentParentOverride `json:",inline"` - ImportReferenceParentOverride `json:",inline"` - PluginOverridesParentOverride `json:",inline"` -} - -// ProjectSourceType describes the type of Project sources. -// Only one of the following project sources may be specified. -// If none of the following policies is specified, the default one -// is AllowConcurrent. -type ProjectSourceTypeParentOverride string - -type GitProjectSourceParentOverride struct { - GitLikeProjectSourceParentOverride `json:",inline"` -} - -type ZipProjectSourceParentOverride struct { - CommonProjectSourceParentOverride `json:",inline"` - - // Zip project's source location address. Should be file path of the archive, e.g. file://$FILE_PATH - // +required - Location string `json:"location,omitempty"` -} - -// CommandType describes the type of command. -// Only one of the following command type may be specified. -type CommandTypeParentOverride string - -type ExecCommandParentOverride struct { - LabeledCommandParentOverride `json:",inline"` - - // +optional - // The actual command-line string - // - // Special variables that can be used: - // - // - `$PROJECTS_ROOT`: A path where projects sources are mounted as defined by container component's sourceMapping. - // - // - `$PROJECT_SOURCE`: A path to a project source ($PROJECTS_ROOT/). If there are multiple projects, this will point to the directory of the first one. - CommandLine string `json:"commandLine,omitempty"` - - // +optional - // Describes component to which given action relates - // - Component string `json:"component,omitempty"` - - // Working directory where the command should be executed - // - // Special variables that can be used: - // - // - `$PROJECTS_ROOT`: A path where projects sources are mounted as defined by container component's sourceMapping. - // - // - `$PROJECT_SOURCE`: A path to a project source ($PROJECTS_ROOT/). If there are multiple projects, this will point to the directory of the first one. - // +optional - WorkingDir string `json:"workingDir,omitempty"` - - // +optional - // +patchMergeKey=name - // +patchStrategy=merge - // Optional list of environment variables that have to be set - // before running the command - Env []EnvVarParentOverride `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name"` - - // +optional - // Whether the command is capable to reload itself when source code changes. - // If set to `true` the command won't be restarted and it is expected to handle file changes on its own. - // - // Default value is `false` - HotReloadCapable *bool `json:"hotReloadCapable,omitempty"` -} - -type ApplyCommandParentOverride struct { - LabeledCommandParentOverride `json:",inline"` - - // +optional - // Describes component that will be applied - // - Component string `json:"component,omitempty"` -} - -type CompositeCommandParentOverride struct { - LabeledCommandParentOverride `json:",inline"` - - // The commands that comprise this composite command - Commands []string `json:"commands,omitempty" patchStrategy:"replace"` - - // Indicates if the sub-commands should be executed concurrently - // +optional - Parallel *bool `json:"parallel,omitempty"` -} - -// DevWorkspace component: Anything that will bring additional features / tooling / behaviour / context -// to the devworkspace, in order to make working in it easier. -type BaseComponentParentOverride struct { -} - -type ContainerParentOverride struct { - // +optional - Image string `json:"image,omitempty"` - - // +optional - // +patchMergeKey=name - // +patchStrategy=merge - // Environment variables used in this container. - // - // The following variables are reserved and cannot be overridden via env: - // - // - `$PROJECTS_ROOT` - // - // - `$PROJECT_SOURCE` - Env []EnvVarParentOverride `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name"` - - // +optional - // Annotations that should be added to specific resources for this container - Annotation *AnnotationParentOverride `json:"annotation,omitempty" patchStrategy:"merge" patchMergeKey:"name"` - - // +optional - // List of volumes mounts that should be mounted is this container. - VolumeMounts []VolumeMountParentOverride `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"name"` - - // +optional - MemoryLimit string `json:"memoryLimit,omitempty"` - - // +optional - MemoryRequest string `json:"memoryRequest,omitempty"` - - // +optional - CpuLimit string `json:"cpuLimit,omitempty"` - - // +optional - CpuRequest string `json:"cpuRequest,omitempty"` - - // The command to run in the dockerimage component instead of the default one provided in the image. - // - // Defaults to an empty array, meaning use whatever is defined in the image. - // +optional - Command []string `json:"command,omitempty" patchStrategy:"replace"` - - // The arguments to supply to the command running the dockerimage component. The arguments are supplied either to the default command provided in the image or to the overridden command. - // - // Defaults to an empty array, meaning use whatever is defined in the image. - // +optional - Args []string `json:"args,omitempty" patchStrategy:"replace"` - - // Toggles whether or not the project source code should - // be mounted in the component. - // - // Defaults to true for all component types except plugins and components that set `dedicatedPod` to true. - // +optional - MountSources *bool `json:"mountSources,omitempty"` - - // Optional specification of the path in the container where - // project sources should be transferred/mounted when `mountSources` is `true`. - // When omitted, the default value of /projects is used. - // +optional - SourceMapping string `json:"sourceMapping,omitempty"` - - // Specify if a container should run in its own separated pod, - // instead of running as part of the main development environment pod. - // - // Default value is `false` - // +optional - DedicatedPod *bool `json:"dedicatedPod,omitempty"` -} - -type EndpointParentOverride struct { - - // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ - // +kubebuilder:validation:MaxLength=63 - Name string `json:"name"` - - // +optional - // Port number to be used within the container component. The same port cannot - // be used by two different container components. - TargetPort int `json:"targetPort,omitempty"` - - // Describes how the endpoint should be exposed on the network. - // - // - `public` means that the endpoint will be exposed on the public network, typically through - // a K8S ingress or an OpenShift route. - // - // - `internal` means that the endpoint will be exposed internally outside of the main devworkspace POD, - // typically by K8S services, to be consumed by other elements running - // on the same cloud internal network. - // - // - `none` means that the endpoint will not be exposed and will only be accessible - // inside the main devworkspace POD, on a local address. - // - // Default value is `public` - // +optional - Exposure EndpointExposureParentOverride `json:"exposure,omitempty"` - - // Describes the application and transport protocols of the traffic that will go through this endpoint. - // - // - `http`: Endpoint will have `http` traffic, typically on a TCP connection. - // It will be automaticaly promoted to `https` when the `secure` field is set to `true`. - // - // - `https`: Endpoint will have `https` traffic, typically on a TCP connection. - // - // - `ws`: Endpoint will have `ws` traffic, typically on a TCP connection. - // It will be automaticaly promoted to `wss` when the `secure` field is set to `true`. - // - // - `wss`: Endpoint will have `wss` traffic, typically on a TCP connection. - // - // - `tcp`: Endpoint will have traffic on a TCP connection, without specifying an application protocol. - // - // - `udp`: Endpoint will have traffic on an UDP connection, without specifying an application protocol. - // - // Default value is `http` - // +optional - Protocol EndpointProtocolParentOverride `json:"protocol,omitempty"` - - // Describes whether the endpoint should be secured and protected by some - // authentication process. This requires a protocol of `https` or `wss`. - // +optional - Secure *bool `json:"secure,omitempty"` - - // Path of the endpoint URL - // +optional - Path string `json:"path,omitempty"` - - // Map of implementation-dependant string-based free-form attributes. - // - // Examples of Che-specific attributes: - // - // - cookiesAuthEnabled: "true" / "false", - // - // - type: "terminal" / "ide" / "ide-dev", - // +optional - // +kubebuilder:validation:Type=object - // +kubebuilder:pruning:PreserveUnknownFields - // +kubebuilder:validation:Schemaless - Attributes attributes.Attributes `json:"attributes,omitempty"` - - // +optional - // Annotations to be added to Kubernetes Ingress or Openshift Route - Annotations map[string]string `json:"annotation,omitempty" patchStrategy:"merge" patchMergeKey:"name"` -} - -type K8sLikeComponentParentOverride struct { - BaseComponentParentOverride `json:",inline"` - K8sLikeComponentLocationParentOverride `json:",inline"` - - // Defines if the component should be deployed during startup. - // - // Default value is `false` - // +optional - DeployByDefault *bool `json:"deployByDefault,omitempty"` - - Endpoints []EndpointParentOverride `json:"endpoints,omitempty" patchStrategy:"merge" patchMergeKey:"name"` -} - -// Volume that should be mounted to a component container -type VolumeParentOverride struct { - - // +optional - // Size of the volume - Size string `json:"size,omitempty"` - - // +optional - // Ephemeral volumes are not stored persistently across restarts. Defaults - // to false - Ephemeral *bool `json:"ephemeral,omitempty"` -} - -type ImageParentOverride struct { - - // +optional - // Name of the image for the resulting outerloop build - ImageName string `json:"imageName,omitempty"` - ImageUnionParentOverride `json:",inline"` -} - -type ImportReferenceParentOverride struct { - ImportReferenceUnionParentOverride `json:",inline"` - - // Registry URL to pull the parent devfile from when using id in the parent reference. - // To ensure the parent devfile gets resolved consistently in different environments, - // it is recommended to always specify the `registryUrl` when `id` is used. - // +optional - RegistryUrl string `json:"registryUrl,omitempty"` - - // Specific stack/sample version to pull the parent devfile from, when using id in the parent reference. - // To specify `version`, `id` must be defined and used as the import reference source. - // `version` can be either a specific stack version, or `latest`. - // If no `version` specified, default version will be used. - // +optional - // +kubebuilder:validation:Pattern=^(latest)|(([1-9])\.([0-9]+)\.([0-9]+)(\-[0-9a-z-]+(\.[0-9a-z-]+)*)?(\+[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?)$ - Version string `json:"version,omitempty"` -} - -type PluginOverridesParentOverride struct { - OverridesBaseParentOverride `json:",inline"` - - // Overrides of components encapsulated in a parent devfile or a plugin. - // Overriding is done according to K8S strategic merge patch standard rules. - // +optional - // +patchMergeKey=name - // +patchStrategy=merge - // +devfile:toplevellist - Components []ComponentPluginOverrideParentOverride `json:"components,omitempty" patchStrategy:"merge" patchMergeKey:"name"` - - // Overrides of commands encapsulated in a parent devfile or a plugin. - // Overriding is done according to K8S strategic merge patch standard rules. - // +optional - // +patchMergeKey=id - // +patchStrategy=merge - // +devfile:toplevellist - Commands []CommandPluginOverrideParentOverride `json:"commands,omitempty" patchStrategy:"merge" patchMergeKey:"id"` -} - -type GitLikeProjectSourceParentOverride struct { - CommonProjectSourceParentOverride `json:",inline"` - - // Defines from what the project should be checked out. Required if there are more than one remote configured - // +optional - CheckoutFrom *CheckoutFromParentOverride `json:"checkoutFrom,omitempty"` - - // +optional - // The remotes map which should be initialized in the git project. - // Projects must have at least one remote configured while StarterProjects & Image Component's Git source can only have at most one remote configured. - Remotes map[string]string `json:"remotes,omitempty"` -} - -type CommonProjectSourceParentOverride struct { -} - -type LabeledCommandParentOverride struct { - BaseCommandParentOverride `json:",inline"` - - // +optional - // Optional label that provides a label for this command - // to be used in Editor UI menus for example - Label string `json:"label,omitempty"` -} - -type EnvVarParentOverride struct { - Name string `json:"name" yaml:"name"` - // +optional - Value string `json:"value,omitempty" yaml:"value"` -} - -// Annotation specifies the annotations to be added to specific resources -type AnnotationParentOverride struct { - - // +optional - // Annotations to be added to deployment - Deployment map[string]string `json:"deployment,omitempty" patchStrategy:"merge" patchMergeKey:"name"` - - // +optional - // Annotations to be added to service - Service map[string]string `json:"service,omitempty" patchStrategy:"merge" patchMergeKey:"name"` -} - -// Volume that should be mounted to a component container -type VolumeMountParentOverride struct { - - // The volume mount name is the name of an existing `Volume` component. - // If several containers mount the same volume name - // then they will reuse the same volume and will be able to access to the same files. - // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ - // +kubebuilder:validation:MaxLength=63 - Name string `json:"name"` - - // The path in the component container where the volume should be mounted. - // If not path is mentioned, default path is the is `/`. - // +optional - Path string `json:"path,omitempty"` -} - -// EndpointExposure describes the way an endpoint is exposed on the network. -// Only one of the following exposures may be specified: public, internal, none. -// +kubebuilder:validation:Enum=public;internal;none -type EndpointExposureParentOverride string - -// EndpointProtocol defines the application and transport protocols of the traffic that will go through this endpoint. -// Only one of the following protocols may be specified: http, ws, tcp, udp. -// +kubebuilder:validation:Enum=http;https;ws;wss;tcp;udp -type EndpointProtocolParentOverride string - -// +union -type K8sLikeComponentLocationParentOverride struct { - - // +kubebuilder:validation:Enum=Uri;Inlined - // Type of Kubernetes-like location - // + - // +unionDiscriminator - // +optional - LocationType K8sLikeComponentLocationTypeParentOverride `json:"locationType,omitempty"` - - // Location in a file fetched from a uri. - // +optional - Uri string `json:"uri,omitempty"` - - // Inlined manifest - // +optional - Inlined string `json:"inlined,omitempty"` -} - -// +union -type ImageUnionParentOverride struct { - - // +kubebuilder:validation:Enum=Dockerfile;AutoBuild - // Type of image - // - // +unionDiscriminator - // +optional - ImageType ImageTypeParentOverride `json:"imageType,omitempty"` - - // Allows specifying dockerfile type build - // +optional - Dockerfile *DockerfileImageParentOverride `json:"dockerfile,omitempty"` - - // Defines if the image should be built during startup. - // - // Default value is `false` - // +optional - AutoBuild *bool `json:"autoBuild,omitempty"` -} - -// Location from where the an import reference is retrieved -// +union -type ImportReferenceUnionParentOverride struct { - - // +kubebuilder:validation:Enum=Uri;Id;Kubernetes - // type of location from where the referenced template structure should be retrieved - // + - // +unionDiscriminator - // +optional - ImportReferenceType ImportReferenceTypeParentOverride `json:"importReferenceType,omitempty"` - - // URI Reference of a parent devfile YAML file. - // It can be a full URL or a relative URI with the current devfile as the base URI. - // +optional - Uri string `json:"uri,omitempty"` - - // Id in a registry that contains a Devfile yaml file - // +optional - Id string `json:"id,omitempty"` - - // Reference to a Kubernetes CRD of type DevWorkspaceTemplate - // +optional - Kubernetes *KubernetesCustomResourceImportReferenceParentOverride `json:"kubernetes,omitempty"` -} - -// OverridesBase is used in the Overrides generator in order to provide a common base for the generated Overrides -// So please be careful when renaming -type OverridesBaseParentOverride struct{} - -//+k8s:openapi-gen=true -type ComponentPluginOverrideParentOverride struct { - - // Mandatory name that allows referencing the component - // from other elements (such as commands) or from an external - // devfile that may reference this component through a parent or a plugin. - // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ - // +kubebuilder:validation:MaxLength=63 - Name string `json:"name"` - - // Map of implementation-dependant free-form YAML attributes. - // +optional - // +kubebuilder:validation:Type=object - // +kubebuilder:pruning:PreserveUnknownFields - // +kubebuilder:validation:Schemaless - Attributes attributes.Attributes `json:"attributes,omitempty"` - ComponentUnionPluginOverrideParentOverride `json:",inline"` -} - -type CommandPluginOverrideParentOverride struct { - - // Mandatory identifier that allows referencing - // this command in composite commands, from - // a parent, or in events. - // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ - // +kubebuilder:validation:MaxLength=63 - Id string `json:"id"` - - // Map of implementation-dependant free-form YAML attributes. - // +optional - // +kubebuilder:validation:Type=object - // +kubebuilder:pruning:PreserveUnknownFields - // +kubebuilder:validation:Schemaless - Attributes attributes.Attributes `json:"attributes,omitempty"` - CommandUnionPluginOverrideParentOverride `json:",inline"` -} - -type CheckoutFromParentOverride struct { - - // The revision to checkout from. Should be branch name, tag or commit id. - // Default branch is used if missing or specified revision is not found. - // +optional - Revision string `json:"revision,omitempty"` - - // The remote name should be used as init. Required if there are more than one remote configured - // +optional - Remote string `json:"remote,omitempty"` -} - -type BaseCommandParentOverride struct { - - // +optional - // Defines the group this command is part of - Group *CommandGroupParentOverride `json:"group,omitempty"` -} - -// K8sLikeComponentLocationType describes the type of -// the location the configuration is fetched from. -// Only one of the following component type may be specified. -type K8sLikeComponentLocationTypeParentOverride string - -// ImageType describes the type of image. -// Only one of the following image type may be specified. -type ImageTypeParentOverride string - -// Dockerfile Image type to specify the outerloop build using a Dockerfile -type DockerfileImageParentOverride struct { - BaseImageParentOverride `json:",inline"` - DockerfileSrcParentOverride `json:",inline"` - DockerfileParentOverride `json:",inline"` -} - -// ImportReferenceType describes the type of location -// from where the referenced template structure should be retrieved. -// Only one of the following parent locations may be specified. -type ImportReferenceTypeParentOverride string - -type KubernetesCustomResourceImportReferenceParentOverride struct { - // +optional - Name string `json:"name,omitempty"` - - // +optional - Namespace string `json:"namespace,omitempty"` -} - -// +union -type ComponentUnionPluginOverrideParentOverride struct { - - // +kubebuilder:validation:Enum=Container;Kubernetes;Openshift;Volume;Image - // Type of component - // - // +unionDiscriminator - // +optional - ComponentType ComponentTypePluginOverrideParentOverride `json:"componentType,omitempty"` - - // Allows adding and configuring devworkspace-related containers - // +optional - Container *ContainerComponentPluginOverrideParentOverride `json:"container,omitempty"` - - // Allows importing into the devworkspace the Kubernetes resources - // defined in a given manifest. For example this allows reusing the Kubernetes - // definitions used to deploy some runtime components in production. - // - // +optional - Kubernetes *KubernetesComponentPluginOverrideParentOverride `json:"kubernetes,omitempty"` - - // Allows importing into the devworkspace the OpenShift resources - // defined in a given manifest. For example this allows reusing the OpenShift - // definitions used to deploy some runtime components in production. - // - // +optional - Openshift *OpenshiftComponentPluginOverrideParentOverride `json:"openshift,omitempty"` - - // Allows specifying the definition of a volume - // shared by several other components - // +optional - Volume *VolumeComponentPluginOverrideParentOverride `json:"volume,omitempty"` - - // Allows specifying the definition of an image for outer loop builds - // +optional - Image *ImageComponentPluginOverrideParentOverride `json:"image,omitempty"` -} - -// +union -type CommandUnionPluginOverrideParentOverride struct { - - // +kubebuilder:validation:Enum=Exec;Apply;Composite - // Type of devworkspace command - // +unionDiscriminator - // +optional - CommandType CommandTypePluginOverrideParentOverride `json:"commandType,omitempty"` - - // CLI Command executed in an existing component container - // +optional - Exec *ExecCommandPluginOverrideParentOverride `json:"exec,omitempty"` - - // Command that consists in applying a given component definition, - // typically bound to a devworkspace event. - // - // For example, when an `apply` command is bound to a `preStart` event, - // and references a `container` component, it will start the container as a - // K8S initContainer in the devworkspace POD, unless the component has its - // `dedicatedPod` field set to `true`. - // - // When no `apply` command exist for a given component, - // it is assumed the component will be applied at devworkspace start - // by default, unless `deployByDefault` for that component is set to false. - // +optional - Apply *ApplyCommandPluginOverrideParentOverride `json:"apply,omitempty"` - - // Composite command that allows executing several sub-commands - // either sequentially or concurrently - // +optional - Composite *CompositeCommandPluginOverrideParentOverride `json:"composite,omitempty"` -} - -type CommandGroupParentOverride struct { - - // +optional - // Kind of group the command is part of - Kind CommandGroupKindParentOverride `json:"kind,omitempty"` - - // +optional - // Identifies the default command for a given group kind - IsDefault *bool `json:"isDefault,omitempty"` -} - -type BaseImageParentOverride struct { -} - -// +union -type DockerfileSrcParentOverride struct { - - // +kubebuilder:validation:Enum=Uri;DevfileRegistry;Git - // Type of Dockerfile src - // + - // +unionDiscriminator - // +optional - SrcType DockerfileSrcTypeParentOverride `json:"srcType,omitempty"` - - // URI Reference of a Dockerfile. - // It can be a full URL or a relative URI from the current devfile as the base URI. - // +optional - Uri string `json:"uri,omitempty"` - - // Dockerfile's Devfile Registry source - // +optional - DevfileRegistry *DockerfileDevfileRegistrySourceParentOverride `json:"devfileRegistry,omitempty"` - - // Dockerfile's Git source - // +optional - Git *DockerfileGitProjectSourceParentOverride `json:"git,omitempty"` -} - -type DockerfileParentOverride struct { - - // Path of source directory to establish build context. Defaults to ${PROJECT_SOURCE} in the container - // +optional - BuildContext string `json:"buildContext,omitempty"` - - // The arguments to supply to the dockerfile build. - // +optional - Args []string `json:"args,omitempty" patchStrategy:"replace"` - - // Specify if a privileged builder pod is required. - // - // Default value is `false` - // +optional - RootRequired *bool `json:"rootRequired,omitempty"` -} - -// ComponentType describes the type of component. -// Only one of the following component type may be specified. -type ComponentTypePluginOverrideParentOverride string - -// Component that allows the developer to add a configured container into their devworkspace -type ContainerComponentPluginOverrideParentOverride struct { - BaseComponentPluginOverrideParentOverride `json:",inline"` - ContainerPluginOverrideParentOverride `json:",inline"` - Endpoints []EndpointPluginOverrideParentOverride `json:"endpoints,omitempty" patchStrategy:"merge" patchMergeKey:"name"` -} - -// Component that allows partly importing Kubernetes resources into the devworkspace POD -type KubernetesComponentPluginOverrideParentOverride struct { - K8sLikeComponentPluginOverrideParentOverride `json:",inline"` -} - -// Component that allows partly importing Openshift resources into the devworkspace POD -type OpenshiftComponentPluginOverrideParentOverride struct { - K8sLikeComponentPluginOverrideParentOverride `json:",inline"` -} - -// Component that allows the developer to declare and configure a volume into their devworkspace -type VolumeComponentPluginOverrideParentOverride struct { - BaseComponentPluginOverrideParentOverride `json:",inline"` - VolumePluginOverrideParentOverride `json:",inline"` -} - -// Component that allows the developer to build a runtime image for outerloop -type ImageComponentPluginOverrideParentOverride struct { - BaseComponentPluginOverrideParentOverride `json:",inline"` - ImagePluginOverrideParentOverride `json:",inline"` -} - -// CommandType describes the type of command. -// Only one of the following command type may be specified. -type CommandTypePluginOverrideParentOverride string - -type ExecCommandPluginOverrideParentOverride struct { - LabeledCommandPluginOverrideParentOverride `json:",inline"` - - // +optional - // The actual command-line string - // - // Special variables that can be used: - // - // - `$PROJECTS_ROOT`: A path where projects sources are mounted as defined by container component's sourceMapping. - // - // - `$PROJECT_SOURCE`: A path to a project source ($PROJECTS_ROOT/). If there are multiple projects, this will point to the directory of the first one. - CommandLine string `json:"commandLine,omitempty"` - - // +optional - // Describes component to which given action relates - // - Component string `json:"component,omitempty"` - - // Working directory where the command should be executed - // - // Special variables that can be used: - // - // - `$PROJECTS_ROOT`: A path where projects sources are mounted as defined by container component's sourceMapping. - // - // - `$PROJECT_SOURCE`: A path to a project source ($PROJECTS_ROOT/). If there are multiple projects, this will point to the directory of the first one. - // +optional - WorkingDir string `json:"workingDir,omitempty"` - - // +optional - // +patchMergeKey=name - // +patchStrategy=merge - // Optional list of environment variables that have to be set - // before running the command - Env []EnvVarPluginOverrideParentOverride `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name"` - - // +optional - // Whether the command is capable to reload itself when source code changes. - // If set to `true` the command won't be restarted and it is expected to handle file changes on its own. - // - // Default value is `false` - HotReloadCapable *bool `json:"hotReloadCapable,omitempty"` -} - -type ApplyCommandPluginOverrideParentOverride struct { - LabeledCommandPluginOverrideParentOverride `json:",inline"` - - // +optional - // Describes component that will be applied - // - Component string `json:"component,omitempty"` -} - -type CompositeCommandPluginOverrideParentOverride struct { - LabeledCommandPluginOverrideParentOverride `json:",inline"` - - // The commands that comprise this composite command - Commands []string `json:"commands,omitempty" patchStrategy:"replace"` - - // Indicates if the sub-commands should be executed concurrently - // +optional - Parallel *bool `json:"parallel,omitempty"` -} - -// CommandGroupKind describes the kind of command group. -// +kubebuilder:validation:Enum=build;run;test;debug;deploy -type CommandGroupKindParentOverride string - -// DockerfileSrcType describes the type of -// the src for the Dockerfile outerloop build. -// Only one of the following location type may be specified. -type DockerfileSrcTypeParentOverride string - -type DockerfileDevfileRegistrySourceParentOverride struct { - - // +optional - // Id in a devfile registry that contains a Dockerfile. The src in the OCI registry - // required for the Dockerfile build will be downloaded for building the image. - Id string `json:"id,omitempty"` - - // Devfile Registry URL to pull the Dockerfile from when using the Devfile Registry as Dockerfile src. - // To ensure the Dockerfile gets resolved consistently in different environments, - // it is recommended to always specify the `devfileRegistryUrl` when `Id` is used. - // +optional - RegistryUrl string `json:"registryUrl,omitempty"` -} - -type DockerfileGitProjectSourceParentOverride struct { - - // Git src for the Dockerfile build. The src required for the Dockerfile build will need to be - // cloned for building the image. - GitProjectSourceParentOverride `json:",inline"` - - // Location of the Dockerfile in the Git repository when using git as Dockerfile src. - // Defaults to Dockerfile. - // +optional - FileLocation string `json:"fileLocation,omitempty"` -} - -// DevWorkspace component: Anything that will bring additional features / tooling / behaviour / context -// to the devworkspace, in order to make working in it easier. -type BaseComponentPluginOverrideParentOverride struct { -} - -type ContainerPluginOverrideParentOverride struct { - - // +optional - Image string `json:"image,omitempty"` - - // +optional - // +patchMergeKey=name - // +patchStrategy=merge - // Environment variables used in this container. - // - // The following variables are reserved and cannot be overridden via env: - // - // - `$PROJECTS_ROOT` - // - // - `$PROJECT_SOURCE` - Env []EnvVarPluginOverrideParentOverride `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name"` - - // +optional - // Annotations that should be added to specific resources for this container - Annotation *AnnotationPluginOverrideParentOverride `json:"annotation,omitempty" patchStrategy:"merge" patchMergeKey:"name"` - - // +optional - // List of volumes mounts that should be mounted is this container. - VolumeMounts []VolumeMountPluginOverrideParentOverride `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"name"` - - // +optional - MemoryLimit string `json:"memoryLimit,omitempty"` - - // +optional - MemoryRequest string `json:"memoryRequest,omitempty"` - - // +optional - CpuLimit string `json:"cpuLimit,omitempty"` - - // +optional - CpuRequest string `json:"cpuRequest,omitempty"` - - // The command to run in the dockerimage component instead of the default one provided in the image. - // - // Defaults to an empty array, meaning use whatever is defined in the image. - // +optional - Command []string `json:"command,omitempty" patchStrategy:"replace"` - - // The arguments to supply to the command running the dockerimage component. The arguments are supplied either to the default command provided in the image or to the overridden command. - // - // Defaults to an empty array, meaning use whatever is defined in the image. - // +optional - Args []string `json:"args,omitempty" patchStrategy:"replace"` - - // Toggles whether or not the project source code should - // be mounted in the component. - // - // Defaults to true for all component types except plugins and components that set `dedicatedPod` to true. - // +optional - MountSources *bool `json:"mountSources,omitempty"` - - // Optional specification of the path in the container where - // project sources should be transferred/mounted when `mountSources` is `true`. - // When omitted, the default value of /projects is used. - // +optional - SourceMapping string `json:"sourceMapping,omitempty"` - - // Specify if a container should run in its own separated pod, - // instead of running as part of the main development environment pod. - // - // Default value is `false` - // +optional - DedicatedPod *bool `json:"dedicatedPod,omitempty"` -} - -type EndpointPluginOverrideParentOverride struct { - - // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ - // +kubebuilder:validation:MaxLength=63 - Name string `json:"name"` - - // +optional - // Port number to be used within the container component. The same port cannot - // be used by two different container components. - TargetPort int `json:"targetPort,omitempty"` - - // Describes how the endpoint should be exposed on the network. - // - // - `public` means that the endpoint will be exposed on the public network, typically through - // a K8S ingress or an OpenShift route. - // - // - `internal` means that the endpoint will be exposed internally outside of the main devworkspace POD, - // typically by K8S services, to be consumed by other elements running - // on the same cloud internal network. - // - // - `none` means that the endpoint will not be exposed and will only be accessible - // inside the main devworkspace POD, on a local address. - // - // Default value is `public` - // +optional - Exposure EndpointExposurePluginOverrideParentOverride `json:"exposure,omitempty"` - - // Describes the application and transport protocols of the traffic that will go through this endpoint. - // - // - `http`: Endpoint will have `http` traffic, typically on a TCP connection. - // It will be automaticaly promoted to `https` when the `secure` field is set to `true`. - // - // - `https`: Endpoint will have `https` traffic, typically on a TCP connection. - // - // - `ws`: Endpoint will have `ws` traffic, typically on a TCP connection. - // It will be automaticaly promoted to `wss` when the `secure` field is set to `true`. - // - // - `wss`: Endpoint will have `wss` traffic, typically on a TCP connection. - // - // - `tcp`: Endpoint will have traffic on a TCP connection, without specifying an application protocol. - // - // - `udp`: Endpoint will have traffic on an UDP connection, without specifying an application protocol. - // - // Default value is `http` - // +optional - Protocol EndpointProtocolPluginOverrideParentOverride `json:"protocol,omitempty"` - - // Describes whether the endpoint should be secured and protected by some - // authentication process. This requires a protocol of `https` or `wss`. - // +optional - Secure *bool `json:"secure,omitempty"` - - // Path of the endpoint URL - // +optional - Path string `json:"path,omitempty"` - - // Map of implementation-dependant string-based free-form attributes. - // - // Examples of Che-specific attributes: - // - // - cookiesAuthEnabled: "true" / "false", - // - // - type: "terminal" / "ide" / "ide-dev", - // +optional - // +kubebuilder:validation:Type=object - // +kubebuilder:pruning:PreserveUnknownFields - // +kubebuilder:validation:Schemaless - Attributes attributes.Attributes `json:"attributes,omitempty"` - - // +optional - // Annotations to be added to Kubernetes Ingress or Openshift Route - Annotations map[string]string `json:"annotation,omitempty" patchStrategy:"merge" patchMergeKey:"name"` -} - -type K8sLikeComponentPluginOverrideParentOverride struct { - BaseComponentPluginOverrideParentOverride `json:",inline"` - K8sLikeComponentLocationPluginOverrideParentOverride `json:",inline"` - - // Defines if the component should be deployed during startup. - // - // Default value is `false` - // +optional - DeployByDefault *bool `json:"deployByDefault,omitempty"` - - Endpoints []EndpointPluginOverrideParentOverride `json:"endpoints,omitempty" patchStrategy:"merge" patchMergeKey:"name"` -} - -// Volume that should be mounted to a component container -type VolumePluginOverrideParentOverride struct { - - // +optional - // Size of the volume - Size string `json:"size,omitempty"` - - // +optional - // Ephemeral volumes are not stored persistently across restarts. Defaults - // to false - Ephemeral *bool `json:"ephemeral,omitempty"` -} - -type ImagePluginOverrideParentOverride struct { - - // +optional - // Name of the image for the resulting outerloop build - ImageName string `json:"imageName,omitempty"` - ImageUnionPluginOverrideParentOverride `json:",inline"` -} - -type LabeledCommandPluginOverrideParentOverride struct { - BaseCommandPluginOverrideParentOverride `json:",inline"` - - // +optional - // Optional label that provides a label for this command - // to be used in Editor UI menus for example - Label string `json:"label,omitempty"` -} - -type EnvVarPluginOverrideParentOverride struct { - Name string `json:"name" yaml:"name"` - - // +optional - Value string `json:"value,omitempty" yaml:"value"` -} - -// Annotation specifies the annotations to be added to specific resources -type AnnotationPluginOverrideParentOverride struct { - - // +optional - // Annotations to be added to deployment - Deployment map[string]string `json:"deployment,omitempty" patchStrategy:"merge" patchMergeKey:"name"` - - // +optional - // Annotations to be added to service - Service map[string]string `json:"service,omitempty" patchStrategy:"merge" patchMergeKey:"name"` -} - -// Volume that should be mounted to a component container -type VolumeMountPluginOverrideParentOverride struct { - - // The volume mount name is the name of an existing `Volume` component. - // If several containers mount the same volume name - // then they will reuse the same volume and will be able to access to the same files. - // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ - // +kubebuilder:validation:MaxLength=63 - Name string `json:"name"` - - // The path in the component container where the volume should be mounted. - // If not path is mentioned, default path is the is `/`. - // +optional - Path string `json:"path,omitempty"` -} - -// EndpointExposure describes the way an endpoint is exposed on the network. -// Only one of the following exposures may be specified: public, internal, none. -// +kubebuilder:validation:Enum=public;internal;none -type EndpointExposurePluginOverrideParentOverride string - -// EndpointProtocol defines the application and transport protocols of the traffic that will go through this endpoint. -// Only one of the following protocols may be specified: http, ws, tcp, udp. -// +kubebuilder:validation:Enum=http;https;ws;wss;tcp;udp -type EndpointProtocolPluginOverrideParentOverride string - -// +union -type K8sLikeComponentLocationPluginOverrideParentOverride struct { - - // +kubebuilder:validation:Enum=Uri;Inlined - // Type of Kubernetes-like location - // + - // +unionDiscriminator - // +optional - LocationType K8sLikeComponentLocationTypePluginOverrideParentOverride `json:"locationType,omitempty"` - - // Location in a file fetched from a uri. - // +optional - Uri string `json:"uri,omitempty"` - - // Inlined manifest - // +optional - Inlined string `json:"inlined,omitempty"` -} - -// +union -type ImageUnionPluginOverrideParentOverride struct { - - // +kubebuilder:validation:Enum=Dockerfile;AutoBuild - // Type of image - // - // +unionDiscriminator - // +optional - ImageType ImageTypePluginOverrideParentOverride `json:"imageType,omitempty"` - - // Allows specifying dockerfile type build - // +optional - Dockerfile *DockerfileImagePluginOverrideParentOverride `json:"dockerfile,omitempty"` - - // Defines if the image should be built during startup. - // - // Default value is `false` - // +optional - AutoBuild *bool `json:"autoBuild,omitempty"` -} - -type BaseCommandPluginOverrideParentOverride struct { - - // +optional - // Defines the group this command is part of - Group *CommandGroupPluginOverrideParentOverride `json:"group,omitempty"` -} - -// K8sLikeComponentLocationType describes the type of -// the location the configuration is fetched from. -// Only one of the following component type may be specified. -type K8sLikeComponentLocationTypePluginOverrideParentOverride string - -// ImageType describes the type of image. -// Only one of the following image type may be specified. -type ImageTypePluginOverrideParentOverride string - -// Dockerfile Image type to specify the outerloop build using a Dockerfile -type DockerfileImagePluginOverrideParentOverride struct { - BaseImagePluginOverrideParentOverride `json:",inline"` - DockerfileSrcPluginOverrideParentOverride `json:",inline"` - DockerfilePluginOverrideParentOverride `json:",inline"` -} - -type CommandGroupPluginOverrideParentOverride struct { - - // +optional - // Kind of group the command is part of - Kind CommandGroupKindPluginOverrideParentOverride `json:"kind,omitempty"` - - // +optional - // Identifies the default command for a given group kind - IsDefault *bool `json:"isDefault,omitempty"` -} - -type BaseImagePluginOverrideParentOverride struct { -} - -// +union -type DockerfileSrcPluginOverrideParentOverride struct { - - // +kubebuilder:validation:Enum=Uri;DevfileRegistry;Git - // Type of Dockerfile src - // + - // +unionDiscriminator - // +optional - SrcType DockerfileSrcTypePluginOverrideParentOverride `json:"srcType,omitempty"` - - // URI Reference of a Dockerfile. - // It can be a full URL or a relative URI from the current devfile as the base URI. - // +optional - Uri string `json:"uri,omitempty"` - - // Dockerfile's Devfile Registry source - // +optional - DevfileRegistry *DockerfileDevfileRegistrySourcePluginOverrideParentOverride `json:"devfileRegistry,omitempty"` - - // Dockerfile's Git source - // +optional - Git *DockerfileGitProjectSourcePluginOverrideParentOverride `json:"git,omitempty"` -} - -type DockerfilePluginOverrideParentOverride struct { - - // Path of source directory to establish build context. Defaults to ${PROJECT_SOURCE} in the container - // +optional - BuildContext string `json:"buildContext,omitempty"` - - // The arguments to supply to the dockerfile build. - // +optional - Args []string `json:"args,omitempty" patchStrategy:"replace"` - - // Specify if a privileged builder pod is required. - // - // Default value is `false` - // +optional - RootRequired *bool `json:"rootRequired,omitempty"` -} - -// CommandGroupKind describes the kind of command group. -// +kubebuilder:validation:Enum=build;run;test;debug;deploy -type CommandGroupKindPluginOverrideParentOverride string - -// DockerfileSrcType describes the type of -// the src for the Dockerfile outerloop build. -// Only one of the following location type may be specified. -type DockerfileSrcTypePluginOverrideParentOverride string - -type DockerfileDevfileRegistrySourcePluginOverrideParentOverride struct { - - // +optional - // Id in a devfile registry that contains a Dockerfile. The src in the OCI registry - // required for the Dockerfile build will be downloaded for building the image. - Id string `json:"id,omitempty"` - - // Devfile Registry URL to pull the Dockerfile from when using the Devfile Registry as Dockerfile src. - // To ensure the Dockerfile gets resolved consistently in different environments, - // it is recommended to always specify the `devfileRegistryUrl` when `Id` is used. - // +optional - RegistryUrl string `json:"registryUrl,omitempty"` -} - -type DockerfileGitProjectSourcePluginOverrideParentOverride struct { - - // Git src for the Dockerfile build. The src required for the Dockerfile build will need to be - // cloned for building the image. - GitProjectSourcePluginOverrideParentOverride `json:",inline"` - - // Location of the Dockerfile in the Git repository when using git as Dockerfile src. - // Defaults to Dockerfile. - // +optional - FileLocation string `json:"fileLocation,omitempty"` -} - -type GitProjectSourcePluginOverrideParentOverride struct { - GitLikeProjectSourcePluginOverrideParentOverride `json:",inline"` -} - -type GitLikeProjectSourcePluginOverrideParentOverride struct { - CommonProjectSourcePluginOverrideParentOverride `json:",inline"` - - // Defines from what the project should be checked out. Required if there are more than one remote configured - // +optional - CheckoutFrom *CheckoutFromPluginOverrideParentOverride `json:"checkoutFrom,omitempty"` - - // +optional - // The remotes map which should be initialized in the git project. - // Projects must have at least one remote configured while StarterProjects & Image Component's Git source can only have at most one remote configured. - Remotes map[string]string `json:"remotes,omitempty"` -} - -type CommonProjectSourcePluginOverrideParentOverride struct { -} - -type CheckoutFromPluginOverrideParentOverride struct { - - // The revision to checkout from. Should be branch name, tag or commit id. - // Default branch is used if missing or specified revision is not found. - // +optional - Revision string `json:"revision,omitempty"` - - // The remote name should be used as init. Required if there are more than one remote configured - // +optional - Remote string `json:"remote,omitempty"` -} - -func (overrides ParentOverrides) isOverride() {} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.plugin_overrides.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.plugin_overrides.go deleted file mode 100644 index 65056ab920f..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.plugin_overrides.go +++ /dev/null @@ -1,656 +0,0 @@ -package v1alpha2 - -import ( - attributes "github.com/devfile/api/v2/pkg/attributes" -) - -// +devfile:jsonschema:generate -type PluginOverrides struct { - OverridesBase `json:",inline"` - - // Overrides of components encapsulated in a parent devfile or a plugin. - // Overriding is done according to K8S strategic merge patch standard rules. - // +optional - // +patchMergeKey=name - // +patchStrategy=merge - // +devfile:toplevellist - Components []ComponentPluginOverride `json:"components,omitempty" patchStrategy:"merge" patchMergeKey:"name"` - - // Overrides of commands encapsulated in a parent devfile or a plugin. - // Overriding is done according to K8S strategic merge patch standard rules. - // +optional - // +patchMergeKey=id - // +patchStrategy=merge - // +devfile:toplevellist - Commands []CommandPluginOverride `json:"commands,omitempty" patchStrategy:"merge" patchMergeKey:"id"` -} - -//+k8s:openapi-gen=true -type ComponentPluginOverride struct { - - // Mandatory name that allows referencing the component - // from other elements (such as commands) or from an external - // devfile that may reference this component through a parent or a plugin. - // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ - // +kubebuilder:validation:MaxLength=63 - Name string `json:"name"` - - // Map of implementation-dependant free-form YAML attributes. - // +optional - // +kubebuilder:validation:Type=object - // +kubebuilder:pruning:PreserveUnknownFields - // +kubebuilder:validation:Schemaless - Attributes attributes.Attributes `json:"attributes,omitempty"` - ComponentUnionPluginOverride `json:",inline"` -} - -type CommandPluginOverride struct { - - // Mandatory identifier that allows referencing - // this command in composite commands, from - // a parent, or in events. - // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ - // +kubebuilder:validation:MaxLength=63 - Id string `json:"id"` - - // Map of implementation-dependant free-form YAML attributes. - // +optional - // +kubebuilder:validation:Type=object - // +kubebuilder:pruning:PreserveUnknownFields - // +kubebuilder:validation:Schemaless - Attributes attributes.Attributes `json:"attributes,omitempty"` - CommandUnionPluginOverride `json:",inline"` -} - -// +union -type ComponentUnionPluginOverride struct { - - // +kubebuilder:validation:Enum=Container;Kubernetes;Openshift;Volume;Image - // Type of component - // - // +unionDiscriminator - // +optional - ComponentType ComponentTypePluginOverride `json:"componentType,omitempty"` - - // Allows adding and configuring devworkspace-related containers - // +optional - Container *ContainerComponentPluginOverride `json:"container,omitempty"` - - // Allows importing into the devworkspace the Kubernetes resources - // defined in a given manifest. For example this allows reusing the Kubernetes - // definitions used to deploy some runtime components in production. - // - // +optional - Kubernetes *KubernetesComponentPluginOverride `json:"kubernetes,omitempty"` - - // Allows importing into the devworkspace the OpenShift resources - // defined in a given manifest. For example this allows reusing the OpenShift - // definitions used to deploy some runtime components in production. - // - // +optional - Openshift *OpenshiftComponentPluginOverride `json:"openshift,omitempty"` - - // Allows specifying the definition of a volume - // shared by several other components - // +optional - Volume *VolumeComponentPluginOverride `json:"volume,omitempty"` - - // Allows specifying the definition of an image for outer loop builds - // +optional - Image *ImageComponentPluginOverride `json:"image,omitempty"` -} - -// +union -type CommandUnionPluginOverride struct { - - // +kubebuilder:validation:Enum=Exec;Apply;Composite - // Type of devworkspace command - // +unionDiscriminator - // +optional - CommandType CommandTypePluginOverride `json:"commandType,omitempty"` - - // CLI Command executed in an existing component container - // +optional - Exec *ExecCommandPluginOverride `json:"exec,omitempty"` - - // Command that consists in applying a given component definition, - // typically bound to a devworkspace event. - // - // For example, when an `apply` command is bound to a `preStart` event, - // and references a `container` component, it will start the container as a - // K8S initContainer in the devworkspace POD, unless the component has its - // `dedicatedPod` field set to `true`. - // - // When no `apply` command exist for a given component, - // it is assumed the component will be applied at devworkspace start - // by default, unless `deployByDefault` for that component is set to false. - // +optional - Apply *ApplyCommandPluginOverride `json:"apply,omitempty"` - - // Composite command that allows executing several sub-commands - // either sequentially or concurrently - // +optional - Composite *CompositeCommandPluginOverride `json:"composite,omitempty"` -} - -// ComponentType describes the type of component. -// Only one of the following component type may be specified. -type ComponentTypePluginOverride string - -// Component that allows the developer to add a configured container into their devworkspace -type ContainerComponentPluginOverride struct { - BaseComponentPluginOverride `json:",inline"` - ContainerPluginOverride `json:",inline"` - Endpoints []EndpointPluginOverride `json:"endpoints,omitempty" patchStrategy:"merge" patchMergeKey:"name"` -} - -// Component that allows partly importing Kubernetes resources into the devworkspace POD -type KubernetesComponentPluginOverride struct { - K8sLikeComponentPluginOverride `json:",inline"` -} - -// Component that allows partly importing Openshift resources into the devworkspace POD -type OpenshiftComponentPluginOverride struct { - K8sLikeComponentPluginOverride `json:",inline"` -} - -// Component that allows the developer to declare and configure a volume into their devworkspace -type VolumeComponentPluginOverride struct { - BaseComponentPluginOverride `json:",inline"` - VolumePluginOverride `json:",inline"` -} - -// Component that allows the developer to build a runtime image for outerloop -type ImageComponentPluginOverride struct { - BaseComponentPluginOverride `json:",inline"` - ImagePluginOverride `json:",inline"` -} - -// CommandType describes the type of command. -// Only one of the following command type may be specified. -type CommandTypePluginOverride string - -type ExecCommandPluginOverride struct { - LabeledCommandPluginOverride `json:",inline"` - - // +optional - // The actual command-line string - // - // Special variables that can be used: - // - // - `$PROJECTS_ROOT`: A path where projects sources are mounted as defined by container component's sourceMapping. - // - // - `$PROJECT_SOURCE`: A path to a project source ($PROJECTS_ROOT/). If there are multiple projects, this will point to the directory of the first one. - CommandLine string `json:"commandLine,omitempty"` - - // +optional - // Describes component to which given action relates - // - Component string `json:"component,omitempty"` - - // Working directory where the command should be executed - // - // Special variables that can be used: - // - // - `$PROJECTS_ROOT`: A path where projects sources are mounted as defined by container component's sourceMapping. - // - // - `$PROJECT_SOURCE`: A path to a project source ($PROJECTS_ROOT/). If there are multiple projects, this will point to the directory of the first one. - // +optional - WorkingDir string `json:"workingDir,omitempty"` - - // +optional - // +patchMergeKey=name - // +patchStrategy=merge - // Optional list of environment variables that have to be set - // before running the command - Env []EnvVarPluginOverride `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name"` - - // +optional - // Whether the command is capable to reload itself when source code changes. - // If set to `true` the command won't be restarted and it is expected to handle file changes on its own. - // - // Default value is `false` - HotReloadCapable *bool `json:"hotReloadCapable,omitempty"` -} - -type ApplyCommandPluginOverride struct { - LabeledCommandPluginOverride `json:",inline"` - - // +optional - // Describes component that will be applied - // - Component string `json:"component,omitempty"` -} - -type CompositeCommandPluginOverride struct { - LabeledCommandPluginOverride `json:",inline"` - - // The commands that comprise this composite command - Commands []string `json:"commands,omitempty" patchStrategy:"replace"` - - // Indicates if the sub-commands should be executed concurrently - // +optional - Parallel *bool `json:"parallel,omitempty"` -} - -// DevWorkspace component: Anything that will bring additional features / tooling / behaviour / context -// to the devworkspace, in order to make working in it easier. -type BaseComponentPluginOverride struct { -} - -type ContainerPluginOverride struct { - // +optional - Image string `json:"image,omitempty"` - - // +optional - // +patchMergeKey=name - // +patchStrategy=merge - // Environment variables used in this container. - // - // The following variables are reserved and cannot be overridden via env: - // - // - `$PROJECTS_ROOT` - // - // - `$PROJECT_SOURCE` - Env []EnvVarPluginOverride `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name"` - - // +optional - // Annotations that should be added to specific resources for this container - Annotation *AnnotationPluginOverride `json:"annotation,omitempty" patchStrategy:"merge" patchMergeKey:"name"` - - // +optional - // List of volumes mounts that should be mounted is this container. - VolumeMounts []VolumeMountPluginOverride `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"name"` - - // +optional - MemoryLimit string `json:"memoryLimit,omitempty"` - - // +optional - MemoryRequest string `json:"memoryRequest,omitempty"` - - // +optional - CpuLimit string `json:"cpuLimit,omitempty"` - - // +optional - CpuRequest string `json:"cpuRequest,omitempty"` - - // The command to run in the dockerimage component instead of the default one provided in the image. - // - // Defaults to an empty array, meaning use whatever is defined in the image. - // +optional - Command []string `json:"command,omitempty" patchStrategy:"replace"` - - // The arguments to supply to the command running the dockerimage component. The arguments are supplied either to the default command provided in the image or to the overridden command. - // - // Defaults to an empty array, meaning use whatever is defined in the image. - // +optional - Args []string `json:"args,omitempty" patchStrategy:"replace"` - - // Toggles whether or not the project source code should - // be mounted in the component. - // - // Defaults to true for all component types except plugins and components that set `dedicatedPod` to true. - // +optional - MountSources *bool `json:"mountSources,omitempty"` - - // Optional specification of the path in the container where - // project sources should be transferred/mounted when `mountSources` is `true`. - // When omitted, the default value of /projects is used. - // +optional - SourceMapping string `json:"sourceMapping,omitempty"` - - // Specify if a container should run in its own separated pod, - // instead of running as part of the main development environment pod. - // - // Default value is `false` - // +optional - DedicatedPod *bool `json:"dedicatedPod,omitempty"` -} - -type EndpointPluginOverride struct { - - // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ - // +kubebuilder:validation:MaxLength=63 - Name string `json:"name"` - - // +optional - // Port number to be used within the container component. The same port cannot - // be used by two different container components. - TargetPort int `json:"targetPort,omitempty"` - - // Describes how the endpoint should be exposed on the network. - // - // - `public` means that the endpoint will be exposed on the public network, typically through - // a K8S ingress or an OpenShift route. - // - // - `internal` means that the endpoint will be exposed internally outside of the main devworkspace POD, - // typically by K8S services, to be consumed by other elements running - // on the same cloud internal network. - // - // - `none` means that the endpoint will not be exposed and will only be accessible - // inside the main devworkspace POD, on a local address. - // - // Default value is `public` - // +optional - Exposure EndpointExposurePluginOverride `json:"exposure,omitempty"` - - // Describes the application and transport protocols of the traffic that will go through this endpoint. - // - // - `http`: Endpoint will have `http` traffic, typically on a TCP connection. - // It will be automaticaly promoted to `https` when the `secure` field is set to `true`. - // - // - `https`: Endpoint will have `https` traffic, typically on a TCP connection. - // - // - `ws`: Endpoint will have `ws` traffic, typically on a TCP connection. - // It will be automaticaly promoted to `wss` when the `secure` field is set to `true`. - // - // - `wss`: Endpoint will have `wss` traffic, typically on a TCP connection. - // - // - `tcp`: Endpoint will have traffic on a TCP connection, without specifying an application protocol. - // - // - `udp`: Endpoint will have traffic on an UDP connection, without specifying an application protocol. - // - // Default value is `http` - // +optional - Protocol EndpointProtocolPluginOverride `json:"protocol,omitempty"` - - // Describes whether the endpoint should be secured and protected by some - // authentication process. This requires a protocol of `https` or `wss`. - // +optional - Secure *bool `json:"secure,omitempty"` - - // Path of the endpoint URL - // +optional - Path string `json:"path,omitempty"` - - // Map of implementation-dependant string-based free-form attributes. - // - // Examples of Che-specific attributes: - // - // - cookiesAuthEnabled: "true" / "false", - // - // - type: "terminal" / "ide" / "ide-dev", - // +optional - // +kubebuilder:validation:Type=object - // +kubebuilder:pruning:PreserveUnknownFields - // +kubebuilder:validation:Schemaless - Attributes attributes.Attributes `json:"attributes,omitempty"` - - // +optional - // Annotations to be added to Kubernetes Ingress or Openshift Route - Annotations map[string]string `json:"annotation,omitempty" patchStrategy:"merge" patchMergeKey:"name"` -} - -type K8sLikeComponentPluginOverride struct { - BaseComponentPluginOverride `json:",inline"` - K8sLikeComponentLocationPluginOverride `json:",inline"` - - // Defines if the component should be deployed during startup. - // - // Default value is `false` - // +optional - DeployByDefault *bool `json:"deployByDefault,omitempty"` - - Endpoints []EndpointPluginOverride `json:"endpoints,omitempty" patchStrategy:"merge" patchMergeKey:"name"` -} - -// Volume that should be mounted to a component container -type VolumePluginOverride struct { - - // +optional - // Size of the volume - Size string `json:"size,omitempty"` - - // +optional - // Ephemeral volumes are not stored persistently across restarts. Defaults - // to false - Ephemeral *bool `json:"ephemeral,omitempty"` -} - -type ImagePluginOverride struct { - - // +optional - // Name of the image for the resulting outerloop build - ImageName string `json:"imageName,omitempty"` - ImageUnionPluginOverride `json:",inline"` -} - -type LabeledCommandPluginOverride struct { - BaseCommandPluginOverride `json:",inline"` - - // +optional - // Optional label that provides a label for this command - // to be used in Editor UI menus for example - Label string `json:"label,omitempty"` -} - -type EnvVarPluginOverride struct { - Name string `json:"name" yaml:"name"` - // +optional - Value string `json:"value,omitempty" yaml:"value"` -} - -// Annotation specifies the annotations to be added to specific resources -type AnnotationPluginOverride struct { - - // +optional - // Annotations to be added to deployment - Deployment map[string]string `json:"deployment,omitempty" patchStrategy:"merge" patchMergeKey:"name"` - - // +optional - // Annotations to be added to service - Service map[string]string `json:"service,omitempty" patchStrategy:"merge" patchMergeKey:"name"` -} - -// Volume that should be mounted to a component container -type VolumeMountPluginOverride struct { - - // The volume mount name is the name of an existing `Volume` component. - // If several containers mount the same volume name - // then they will reuse the same volume and will be able to access to the same files. - // +kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ - // +kubebuilder:validation:MaxLength=63 - Name string `json:"name"` - - // The path in the component container where the volume should be mounted. - // If not path is mentioned, default path is the is `/`. - // +optional - Path string `json:"path,omitempty"` -} - -// EndpointExposure describes the way an endpoint is exposed on the network. -// Only one of the following exposures may be specified: public, internal, none. -// +kubebuilder:validation:Enum=public;internal;none -type EndpointExposurePluginOverride string - -// EndpointProtocol defines the application and transport protocols of the traffic that will go through this endpoint. -// Only one of the following protocols may be specified: http, ws, tcp, udp. -// +kubebuilder:validation:Enum=http;https;ws;wss;tcp;udp -type EndpointProtocolPluginOverride string - -// +union -type K8sLikeComponentLocationPluginOverride struct { - - // +kubebuilder:validation:Enum=Uri;Inlined - // Type of Kubernetes-like location - // + - // +unionDiscriminator - // +optional - LocationType K8sLikeComponentLocationTypePluginOverride `json:"locationType,omitempty"` - - // Location in a file fetched from a uri. - // +optional - Uri string `json:"uri,omitempty"` - - // Inlined manifest - // +optional - Inlined string `json:"inlined,omitempty"` -} - -// +union -type ImageUnionPluginOverride struct { - - // +kubebuilder:validation:Enum=Dockerfile;AutoBuild - // Type of image - // - // +unionDiscriminator - // +optional - ImageType ImageTypePluginOverride `json:"imageType,omitempty"` - - // Allows specifying dockerfile type build - // +optional - Dockerfile *DockerfileImagePluginOverride `json:"dockerfile,omitempty"` - - // Defines if the image should be built during startup. - // - // Default value is `false` - // +optional - AutoBuild *bool `json:"autoBuild,omitempty"` -} - -type BaseCommandPluginOverride struct { - - // +optional - // Defines the group this command is part of - Group *CommandGroupPluginOverride `json:"group,omitempty"` -} - -// K8sLikeComponentLocationType describes the type of -// the location the configuration is fetched from. -// Only one of the following component type may be specified. -type K8sLikeComponentLocationTypePluginOverride string - -// ImageType describes the type of image. -// Only one of the following image type may be specified. -type ImageTypePluginOverride string - -// Dockerfile Image type to specify the outerloop build using a Dockerfile -type DockerfileImagePluginOverride struct { - BaseImagePluginOverride `json:",inline"` - DockerfileSrcPluginOverride `json:",inline"` - DockerfilePluginOverride `json:",inline"` -} - -type CommandGroupPluginOverride struct { - - // +optional - // Kind of group the command is part of - Kind CommandGroupKindPluginOverride `json:"kind,omitempty"` - - // +optional - // Identifies the default command for a given group kind - IsDefault *bool `json:"isDefault,omitempty"` -} - -type BaseImagePluginOverride struct { -} - -// +union -type DockerfileSrcPluginOverride struct { - - // +kubebuilder:validation:Enum=Uri;DevfileRegistry;Git - // Type of Dockerfile src - // + - // +unionDiscriminator - // +optional - SrcType DockerfileSrcTypePluginOverride `json:"srcType,omitempty"` - - // URI Reference of a Dockerfile. - // It can be a full URL or a relative URI from the current devfile as the base URI. - // +optional - Uri string `json:"uri,omitempty"` - - // Dockerfile's Devfile Registry source - // +optional - DevfileRegistry *DockerfileDevfileRegistrySourcePluginOverride `json:"devfileRegistry,omitempty"` - - // Dockerfile's Git source - // +optional - Git *DockerfileGitProjectSourcePluginOverride `json:"git,omitempty"` -} - -type DockerfilePluginOverride struct { - - // Path of source directory to establish build context. Defaults to ${PROJECT_SOURCE} in the container - // +optional - BuildContext string `json:"buildContext,omitempty"` - - // The arguments to supply to the dockerfile build. - // +optional - Args []string `json:"args,omitempty" patchStrategy:"replace"` - - // Specify if a privileged builder pod is required. - // - // Default value is `false` - // +optional - RootRequired *bool `json:"rootRequired,omitempty"` -} - -// CommandGroupKind describes the kind of command group. -// +kubebuilder:validation:Enum=build;run;test;debug;deploy -type CommandGroupKindPluginOverride string - -// DockerfileSrcType describes the type of -// the src for the Dockerfile outerloop build. -// Only one of the following location type may be specified. -type DockerfileSrcTypePluginOverride string - -type DockerfileDevfileRegistrySourcePluginOverride struct { - - // +optional - // Id in a devfile registry that contains a Dockerfile. The src in the OCI registry - // required for the Dockerfile build will be downloaded for building the image. - Id string `json:"id,omitempty"` - - // Devfile Registry URL to pull the Dockerfile from when using the Devfile Registry as Dockerfile src. - // To ensure the Dockerfile gets resolved consistently in different environments, - // it is recommended to always specify the `devfileRegistryUrl` when `Id` is used. - // +optional - RegistryUrl string `json:"registryUrl,omitempty"` -} - -type DockerfileGitProjectSourcePluginOverride struct { - - // Git src for the Dockerfile build. The src required for the Dockerfile build will need to be - // cloned for building the image. - GitProjectSourcePluginOverride `json:",inline"` - - // Location of the Dockerfile in the Git repository when using git as Dockerfile src. - // Defaults to Dockerfile. - // +optional - FileLocation string `json:"fileLocation,omitempty"` -} - -type GitProjectSourcePluginOverride struct { - GitLikeProjectSourcePluginOverride `json:",inline"` -} - -type GitLikeProjectSourcePluginOverride struct { - CommonProjectSourcePluginOverride `json:",inline"` - - // Defines from what the project should be checked out. Required if there are more than one remote configured - // +optional - CheckoutFrom *CheckoutFromPluginOverride `json:"checkoutFrom,omitempty"` - - // +optional - // The remotes map which should be initialized in the git project. - // Projects must have at least one remote configured while StarterProjects & Image Component's Git source can only have at most one remote configured. - Remotes map[string]string `json:"remotes,omitempty"` -} - -type CommonProjectSourcePluginOverride struct { -} - -type CheckoutFromPluginOverride struct { - - // The revision to checkout from. Should be branch name, tag or commit id. - // Default branch is used if missing or specified revision is not found. - // +optional - Revision string `json:"revision,omitempty"` - - // The remote name should be used as init. Required if there are more than one remote configured - // +optional - Remote string `json:"remote,omitempty"` -} - -func (overrides PluginOverrides) isOverride() {} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.toplevellistcontainer_definitions.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.toplevellistcontainer_definitions.go deleted file mode 100644 index 45be709ffe9..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.toplevellistcontainer_definitions.go +++ /dev/null @@ -1,33 +0,0 @@ -package v1alpha2 - -func (container DevWorkspaceTemplateSpecContent) GetToplevelLists() TopLevelLists { - return TopLevelLists{ - "Components": extractKeys(container.Components), - "Projects": extractKeys(container.Projects), - "StarterProjects": extractKeys(container.StarterProjects), - "Commands": extractKeys(container.Commands), - } -} - -func (container ParentOverrides) GetToplevelLists() TopLevelLists { - return TopLevelLists{ - "Components": extractKeys(container.Components), - "Projects": extractKeys(container.Projects), - "StarterProjects": extractKeys(container.StarterProjects), - "Commands": extractKeys(container.Commands), - } -} - -func (container PluginOverridesParentOverride) GetToplevelLists() TopLevelLists { - return TopLevelLists{ - "Components": extractKeys(container.Components), - "Commands": extractKeys(container.Commands), - } -} - -func (container PluginOverrides) GetToplevelLists() TopLevelLists { - return TopLevelLists{ - "Components": extractKeys(container.Components), - "Commands": extractKeys(container.Commands), - } -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.union_definitions.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.union_definitions.go deleted file mode 100644 index 417d47e12df..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2/zz_generated.union_definitions.go +++ /dev/null @@ -1,536 +0,0 @@ -package v1alpha2 - -import ( - "reflect" -) - -var commandUnion reflect.Type = reflect.TypeOf(CommandUnionVisitor{}) - -func (union CommandUnion) Visit(visitor CommandUnionVisitor) error { - return visitUnion(union, visitor) -} -func (union *CommandUnion) discriminator() *string { - return (*string)(&union.CommandType) -} -func (union *CommandUnion) Normalize() error { - return normalizeUnion(union, commandUnion) -} -func (union *CommandUnion) Simplify() { - simplifyUnion(union, commandUnion) -} - -// +k8s:deepcopy-gen=false -type CommandUnionVisitor struct { - Exec func(*ExecCommand) error - Apply func(*ApplyCommand) error - Composite func(*CompositeCommand) error - Custom func(*CustomCommand) error -} - -var imageUnion reflect.Type = reflect.TypeOf(ImageUnionVisitor{}) - -func (union ImageUnion) Visit(visitor ImageUnionVisitor) error { - return visitUnion(union, visitor) -} -func (union *ImageUnion) discriminator() *string { - return (*string)(&union.ImageType) -} -func (union *ImageUnion) Normalize() error { - return normalizeUnion(union, imageUnion) -} -func (union *ImageUnion) Simplify() { - simplifyUnion(union, imageUnion) -} - -// +k8s:deepcopy-gen=false -type ImageUnionVisitor struct { - Dockerfile func(*DockerfileImage) error - AutoBuild func(*bool) error -} - -var dockerfileSrc reflect.Type = reflect.TypeOf(DockerfileSrcVisitor{}) - -func (union DockerfileSrc) Visit(visitor DockerfileSrcVisitor) error { - return visitUnion(union, visitor) -} -func (union *DockerfileSrc) discriminator() *string { - return (*string)(&union.SrcType) -} -func (union *DockerfileSrc) Normalize() error { - return normalizeUnion(union, dockerfileSrc) -} -func (union *DockerfileSrc) Simplify() { - simplifyUnion(union, dockerfileSrc) -} - -// +k8s:deepcopy-gen=false -type DockerfileSrcVisitor struct { - Uri func(string) error - DevfileRegistry func(*DockerfileDevfileRegistrySource) error - Git func(*DockerfileGitProjectSource) error -} - -var k8sLikeComponentLocation reflect.Type = reflect.TypeOf(K8sLikeComponentLocationVisitor{}) - -func (union K8sLikeComponentLocation) Visit(visitor K8sLikeComponentLocationVisitor) error { - return visitUnion(union, visitor) -} -func (union *K8sLikeComponentLocation) discriminator() *string { - return (*string)(&union.LocationType) -} -func (union *K8sLikeComponentLocation) Normalize() error { - return normalizeUnion(union, k8sLikeComponentLocation) -} -func (union *K8sLikeComponentLocation) Simplify() { - simplifyUnion(union, k8sLikeComponentLocation) -} - -// +k8s:deepcopy-gen=false -type K8sLikeComponentLocationVisitor struct { - Uri func(string) error - Inlined func(string) error -} - -var componentUnion reflect.Type = reflect.TypeOf(ComponentUnionVisitor{}) - -func (union ComponentUnion) Visit(visitor ComponentUnionVisitor) error { - return visitUnion(union, visitor) -} -func (union *ComponentUnion) discriminator() *string { - return (*string)(&union.ComponentType) -} -func (union *ComponentUnion) Normalize() error { - return normalizeUnion(union, componentUnion) -} -func (union *ComponentUnion) Simplify() { - simplifyUnion(union, componentUnion) -} - -// +k8s:deepcopy-gen=false -type ComponentUnionVisitor struct { - Container func(*ContainerComponent) error - Kubernetes func(*KubernetesComponent) error - Openshift func(*OpenshiftComponent) error - Volume func(*VolumeComponent) error - Image func(*ImageComponent) error - Plugin func(*PluginComponent) error - Custom func(*CustomComponent) error -} - -var importReferenceUnion reflect.Type = reflect.TypeOf(ImportReferenceUnionVisitor{}) - -func (union ImportReferenceUnion) Visit(visitor ImportReferenceUnionVisitor) error { - return visitUnion(union, visitor) -} -func (union *ImportReferenceUnion) discriminator() *string { - return (*string)(&union.ImportReferenceType) -} -func (union *ImportReferenceUnion) Normalize() error { - return normalizeUnion(union, importReferenceUnion) -} -func (union *ImportReferenceUnion) Simplify() { - simplifyUnion(union, importReferenceUnion) -} - -// +k8s:deepcopy-gen=false -type ImportReferenceUnionVisitor struct { - Uri func(string) error - Id func(string) error - Kubernetes func(*KubernetesCustomResourceImportReference) error -} - -var projectSource reflect.Type = reflect.TypeOf(ProjectSourceVisitor{}) - -func (union ProjectSource) Visit(visitor ProjectSourceVisitor) error { - return visitUnion(union, visitor) -} -func (union *ProjectSource) discriminator() *string { - return (*string)(&union.SourceType) -} -func (union *ProjectSource) Normalize() error { - return normalizeUnion(union, projectSource) -} -func (union *ProjectSource) Simplify() { - simplifyUnion(union, projectSource) -} - -// +k8s:deepcopy-gen=false -type ProjectSourceVisitor struct { - Git func(*GitProjectSource) error - Zip func(*ZipProjectSource) error - Custom func(*CustomProjectSource) error -} - -var componentUnionParentOverride reflect.Type = reflect.TypeOf(ComponentUnionParentOverrideVisitor{}) - -func (union ComponentUnionParentOverride) Visit(visitor ComponentUnionParentOverrideVisitor) error { - return visitUnion(union, visitor) -} -func (union *ComponentUnionParentOverride) discriminator() *string { - return (*string)(&union.ComponentType) -} -func (union *ComponentUnionParentOverride) Normalize() error { - return normalizeUnion(union, componentUnionParentOverride) -} -func (union *ComponentUnionParentOverride) Simplify() { - simplifyUnion(union, componentUnionParentOverride) -} - -// +k8s:deepcopy-gen=false -type ComponentUnionParentOverrideVisitor struct { - Container func(*ContainerComponentParentOverride) error - Kubernetes func(*KubernetesComponentParentOverride) error - Openshift func(*OpenshiftComponentParentOverride) error - Volume func(*VolumeComponentParentOverride) error - Image func(*ImageComponentParentOverride) error - Plugin func(*PluginComponentParentOverride) error -} - -var projectSourceParentOverride reflect.Type = reflect.TypeOf(ProjectSourceParentOverrideVisitor{}) - -func (union ProjectSourceParentOverride) Visit(visitor ProjectSourceParentOverrideVisitor) error { - return visitUnion(union, visitor) -} -func (union *ProjectSourceParentOverride) discriminator() *string { - return (*string)(&union.SourceType) -} -func (union *ProjectSourceParentOverride) Normalize() error { - return normalizeUnion(union, projectSourceParentOverride) -} -func (union *ProjectSourceParentOverride) Simplify() { - simplifyUnion(union, projectSourceParentOverride) -} - -// +k8s:deepcopy-gen=false -type ProjectSourceParentOverrideVisitor struct { - Git func(*GitProjectSourceParentOverride) error - Zip func(*ZipProjectSourceParentOverride) error -} - -var commandUnionParentOverride reflect.Type = reflect.TypeOf(CommandUnionParentOverrideVisitor{}) - -func (union CommandUnionParentOverride) Visit(visitor CommandUnionParentOverrideVisitor) error { - return visitUnion(union, visitor) -} -func (union *CommandUnionParentOverride) discriminator() *string { - return (*string)(&union.CommandType) -} -func (union *CommandUnionParentOverride) Normalize() error { - return normalizeUnion(union, commandUnionParentOverride) -} -func (union *CommandUnionParentOverride) Simplify() { - simplifyUnion(union, commandUnionParentOverride) -} - -// +k8s:deepcopy-gen=false -type CommandUnionParentOverrideVisitor struct { - Exec func(*ExecCommandParentOverride) error - Apply func(*ApplyCommandParentOverride) error - Composite func(*CompositeCommandParentOverride) error -} - -var k8sLikeComponentLocationParentOverride reflect.Type = reflect.TypeOf(K8sLikeComponentLocationParentOverrideVisitor{}) - -func (union K8sLikeComponentLocationParentOverride) Visit(visitor K8sLikeComponentLocationParentOverrideVisitor) error { - return visitUnion(union, visitor) -} -func (union *K8sLikeComponentLocationParentOverride) discriminator() *string { - return (*string)(&union.LocationType) -} -func (union *K8sLikeComponentLocationParentOverride) Normalize() error { - return normalizeUnion(union, k8sLikeComponentLocationParentOverride) -} -func (union *K8sLikeComponentLocationParentOverride) Simplify() { - simplifyUnion(union, k8sLikeComponentLocationParentOverride) -} - -// +k8s:deepcopy-gen=false -type K8sLikeComponentLocationParentOverrideVisitor struct { - Uri func(string) error - Inlined func(string) error -} - -var imageUnionParentOverride reflect.Type = reflect.TypeOf(ImageUnionParentOverrideVisitor{}) - -func (union ImageUnionParentOverride) Visit(visitor ImageUnionParentOverrideVisitor) error { - return visitUnion(union, visitor) -} -func (union *ImageUnionParentOverride) discriminator() *string { - return (*string)(&union.ImageType) -} -func (union *ImageUnionParentOverride) Normalize() error { - return normalizeUnion(union, imageUnionParentOverride) -} -func (union *ImageUnionParentOverride) Simplify() { - simplifyUnion(union, imageUnionParentOverride) -} - -// +k8s:deepcopy-gen=false -type ImageUnionParentOverrideVisitor struct { - Dockerfile func(*DockerfileImageParentOverride) error - AutoBuild func(*bool) error -} - -var importReferenceUnionParentOverride reflect.Type = reflect.TypeOf(ImportReferenceUnionParentOverrideVisitor{}) - -func (union ImportReferenceUnionParentOverride) Visit(visitor ImportReferenceUnionParentOverrideVisitor) error { - return visitUnion(union, visitor) -} -func (union *ImportReferenceUnionParentOverride) discriminator() *string { - return (*string)(&union.ImportReferenceType) -} -func (union *ImportReferenceUnionParentOverride) Normalize() error { - return normalizeUnion(union, importReferenceUnionParentOverride) -} -func (union *ImportReferenceUnionParentOverride) Simplify() { - simplifyUnion(union, importReferenceUnionParentOverride) -} - -// +k8s:deepcopy-gen=false -type ImportReferenceUnionParentOverrideVisitor struct { - Uri func(string) error - Id func(string) error - Kubernetes func(*KubernetesCustomResourceImportReferenceParentOverride) error -} - -var componentUnionPluginOverrideParentOverride reflect.Type = reflect.TypeOf(ComponentUnionPluginOverrideParentOverrideVisitor{}) - -func (union ComponentUnionPluginOverrideParentOverride) Visit(visitor ComponentUnionPluginOverrideParentOverrideVisitor) error { - return visitUnion(union, visitor) -} -func (union *ComponentUnionPluginOverrideParentOverride) discriminator() *string { - return (*string)(&union.ComponentType) -} -func (union *ComponentUnionPluginOverrideParentOverride) Normalize() error { - return normalizeUnion(union, componentUnionPluginOverrideParentOverride) -} -func (union *ComponentUnionPluginOverrideParentOverride) Simplify() { - simplifyUnion(union, componentUnionPluginOverrideParentOverride) -} - -// +k8s:deepcopy-gen=false -type ComponentUnionPluginOverrideParentOverrideVisitor struct { - Container func(*ContainerComponentPluginOverrideParentOverride) error - Kubernetes func(*KubernetesComponentPluginOverrideParentOverride) error - Openshift func(*OpenshiftComponentPluginOverrideParentOverride) error - Volume func(*VolumeComponentPluginOverrideParentOverride) error - Image func(*ImageComponentPluginOverrideParentOverride) error -} - -var commandUnionPluginOverrideParentOverride reflect.Type = reflect.TypeOf(CommandUnionPluginOverrideParentOverrideVisitor{}) - -func (union CommandUnionPluginOverrideParentOverride) Visit(visitor CommandUnionPluginOverrideParentOverrideVisitor) error { - return visitUnion(union, visitor) -} -func (union *CommandUnionPluginOverrideParentOverride) discriminator() *string { - return (*string)(&union.CommandType) -} -func (union *CommandUnionPluginOverrideParentOverride) Normalize() error { - return normalizeUnion(union, commandUnionPluginOverrideParentOverride) -} -func (union *CommandUnionPluginOverrideParentOverride) Simplify() { - simplifyUnion(union, commandUnionPluginOverrideParentOverride) -} - -// +k8s:deepcopy-gen=false -type CommandUnionPluginOverrideParentOverrideVisitor struct { - Exec func(*ExecCommandPluginOverrideParentOverride) error - Apply func(*ApplyCommandPluginOverrideParentOverride) error - Composite func(*CompositeCommandPluginOverrideParentOverride) error -} - -var dockerfileSrcParentOverride reflect.Type = reflect.TypeOf(DockerfileSrcParentOverrideVisitor{}) - -func (union DockerfileSrcParentOverride) Visit(visitor DockerfileSrcParentOverrideVisitor) error { - return visitUnion(union, visitor) -} -func (union *DockerfileSrcParentOverride) discriminator() *string { - return (*string)(&union.SrcType) -} -func (union *DockerfileSrcParentOverride) Normalize() error { - return normalizeUnion(union, dockerfileSrcParentOverride) -} -func (union *DockerfileSrcParentOverride) Simplify() { - simplifyUnion(union, dockerfileSrcParentOverride) -} - -// +k8s:deepcopy-gen=false -type DockerfileSrcParentOverrideVisitor struct { - Uri func(string) error - DevfileRegistry func(*DockerfileDevfileRegistrySourceParentOverride) error - Git func(*DockerfileGitProjectSourceParentOverride) error -} - -var k8sLikeComponentLocationPluginOverrideParentOverride reflect.Type = reflect.TypeOf(K8sLikeComponentLocationPluginOverrideParentOverrideVisitor{}) - -func (union K8sLikeComponentLocationPluginOverrideParentOverride) Visit(visitor K8sLikeComponentLocationPluginOverrideParentOverrideVisitor) error { - return visitUnion(union, visitor) -} -func (union *K8sLikeComponentLocationPluginOverrideParentOverride) discriminator() *string { - return (*string)(&union.LocationType) -} -func (union *K8sLikeComponentLocationPluginOverrideParentOverride) Normalize() error { - return normalizeUnion(union, k8sLikeComponentLocationPluginOverrideParentOverride) -} -func (union *K8sLikeComponentLocationPluginOverrideParentOverride) Simplify() { - simplifyUnion(union, k8sLikeComponentLocationPluginOverrideParentOverride) -} - -// +k8s:deepcopy-gen=false -type K8sLikeComponentLocationPluginOverrideParentOverrideVisitor struct { - Uri func(string) error - Inlined func(string) error -} - -var imageUnionPluginOverrideParentOverride reflect.Type = reflect.TypeOf(ImageUnionPluginOverrideParentOverrideVisitor{}) - -func (union ImageUnionPluginOverrideParentOverride) Visit(visitor ImageUnionPluginOverrideParentOverrideVisitor) error { - return visitUnion(union, visitor) -} -func (union *ImageUnionPluginOverrideParentOverride) discriminator() *string { - return (*string)(&union.ImageType) -} -func (union *ImageUnionPluginOverrideParentOverride) Normalize() error { - return normalizeUnion(union, imageUnionPluginOverrideParentOverride) -} -func (union *ImageUnionPluginOverrideParentOverride) Simplify() { - simplifyUnion(union, imageUnionPluginOverrideParentOverride) -} - -// +k8s:deepcopy-gen=false -type ImageUnionPluginOverrideParentOverrideVisitor struct { - Dockerfile func(*DockerfileImagePluginOverrideParentOverride) error - AutoBuild func(*bool) error -} - -var dockerfileSrcPluginOverrideParentOverride reflect.Type = reflect.TypeOf(DockerfileSrcPluginOverrideParentOverrideVisitor{}) - -func (union DockerfileSrcPluginOverrideParentOverride) Visit(visitor DockerfileSrcPluginOverrideParentOverrideVisitor) error { - return visitUnion(union, visitor) -} -func (union *DockerfileSrcPluginOverrideParentOverride) discriminator() *string { - return (*string)(&union.SrcType) -} -func (union *DockerfileSrcPluginOverrideParentOverride) Normalize() error { - return normalizeUnion(union, dockerfileSrcPluginOverrideParentOverride) -} -func (union *DockerfileSrcPluginOverrideParentOverride) Simplify() { - simplifyUnion(union, dockerfileSrcPluginOverrideParentOverride) -} - -// +k8s:deepcopy-gen=false -type DockerfileSrcPluginOverrideParentOverrideVisitor struct { - Uri func(string) error - DevfileRegistry func(*DockerfileDevfileRegistrySourcePluginOverrideParentOverride) error - Git func(*DockerfileGitProjectSourcePluginOverrideParentOverride) error -} - -var componentUnionPluginOverride reflect.Type = reflect.TypeOf(ComponentUnionPluginOverrideVisitor{}) - -func (union ComponentUnionPluginOverride) Visit(visitor ComponentUnionPluginOverrideVisitor) error { - return visitUnion(union, visitor) -} -func (union *ComponentUnionPluginOverride) discriminator() *string { - return (*string)(&union.ComponentType) -} -func (union *ComponentUnionPluginOverride) Normalize() error { - return normalizeUnion(union, componentUnionPluginOverride) -} -func (union *ComponentUnionPluginOverride) Simplify() { - simplifyUnion(union, componentUnionPluginOverride) -} - -// +k8s:deepcopy-gen=false -type ComponentUnionPluginOverrideVisitor struct { - Container func(*ContainerComponentPluginOverride) error - Kubernetes func(*KubernetesComponentPluginOverride) error - Openshift func(*OpenshiftComponentPluginOverride) error - Volume func(*VolumeComponentPluginOverride) error - Image func(*ImageComponentPluginOverride) error -} - -var commandUnionPluginOverride reflect.Type = reflect.TypeOf(CommandUnionPluginOverrideVisitor{}) - -func (union CommandUnionPluginOverride) Visit(visitor CommandUnionPluginOverrideVisitor) error { - return visitUnion(union, visitor) -} -func (union *CommandUnionPluginOverride) discriminator() *string { - return (*string)(&union.CommandType) -} -func (union *CommandUnionPluginOverride) Normalize() error { - return normalizeUnion(union, commandUnionPluginOverride) -} -func (union *CommandUnionPluginOverride) Simplify() { - simplifyUnion(union, commandUnionPluginOverride) -} - -// +k8s:deepcopy-gen=false -type CommandUnionPluginOverrideVisitor struct { - Exec func(*ExecCommandPluginOverride) error - Apply func(*ApplyCommandPluginOverride) error - Composite func(*CompositeCommandPluginOverride) error -} - -var k8sLikeComponentLocationPluginOverride reflect.Type = reflect.TypeOf(K8sLikeComponentLocationPluginOverrideVisitor{}) - -func (union K8sLikeComponentLocationPluginOverride) Visit(visitor K8sLikeComponentLocationPluginOverrideVisitor) error { - return visitUnion(union, visitor) -} -func (union *K8sLikeComponentLocationPluginOverride) discriminator() *string { - return (*string)(&union.LocationType) -} -func (union *K8sLikeComponentLocationPluginOverride) Normalize() error { - return normalizeUnion(union, k8sLikeComponentLocationPluginOverride) -} -func (union *K8sLikeComponentLocationPluginOverride) Simplify() { - simplifyUnion(union, k8sLikeComponentLocationPluginOverride) -} - -// +k8s:deepcopy-gen=false -type K8sLikeComponentLocationPluginOverrideVisitor struct { - Uri func(string) error - Inlined func(string) error -} - -var imageUnionPluginOverride reflect.Type = reflect.TypeOf(ImageUnionPluginOverrideVisitor{}) - -func (union ImageUnionPluginOverride) Visit(visitor ImageUnionPluginOverrideVisitor) error { - return visitUnion(union, visitor) -} -func (union *ImageUnionPluginOverride) discriminator() *string { - return (*string)(&union.ImageType) -} -func (union *ImageUnionPluginOverride) Normalize() error { - return normalizeUnion(union, imageUnionPluginOverride) -} -func (union *ImageUnionPluginOverride) Simplify() { - simplifyUnion(union, imageUnionPluginOverride) -} - -// +k8s:deepcopy-gen=false -type ImageUnionPluginOverrideVisitor struct { - Dockerfile func(*DockerfileImagePluginOverride) error - AutoBuild func(*bool) error -} - -var dockerfileSrcPluginOverride reflect.Type = reflect.TypeOf(DockerfileSrcPluginOverrideVisitor{}) - -func (union DockerfileSrcPluginOverride) Visit(visitor DockerfileSrcPluginOverrideVisitor) error { - return visitUnion(union, visitor) -} -func (union *DockerfileSrcPluginOverride) discriminator() *string { - return (*string)(&union.SrcType) -} -func (union *DockerfileSrcPluginOverride) Normalize() error { - return normalizeUnion(union, dockerfileSrcPluginOverride) -} -func (union *DockerfileSrcPluginOverride) Simplify() { - simplifyUnion(union, dockerfileSrcPluginOverride) -} - -// +k8s:deepcopy-gen=false -type DockerfileSrcPluginOverrideVisitor struct { - Uri func(string) error - DevfileRegistry func(*DockerfileDevfileRegistrySourcePluginOverride) error - Git func(*DockerfileGitProjectSourcePluginOverride) error -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/attributes/attributes.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/attributes/attributes.go deleted file mode 100644 index 43c77c4e6ee..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/attributes/attributes.go +++ /dev/null @@ -1,453 +0,0 @@ -package attributes - -import ( - "encoding/json" - "strconv" - - apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" -) - -// Attributes provides a way to add a map of arbitrary YAML/JSON -// objects. -// +kubebuilder:validation:Type=object -// +kubebuilder:validation:XPreserveUnknownFields -type Attributes map[string]apiext.JSON - -// MarshalJSON implements custom JSON marshaling -// to support free-form attributes -func (attributes Attributes) MarshalJSON() ([]byte, error) { - return json.Marshal(map[string]apiext.JSON(attributes)) -} - -// UnmarshalJSON implements custom JSON unmarshalling -// to support free-form attributes -func (attributes *Attributes) UnmarshalJSON(data []byte) error { - return json.Unmarshal(data, (*map[string]apiext.JSON)(attributes)) -} - -// Exists returns `true` if the attribute with the given key -// exists in the attributes map. -func (attributes Attributes) Exists(key string) bool { - _, exists := attributes[key] - return exists -} - -type convertPrimitiveFunc func(attributes Attributes, key string, attributeType string) (interface{}, error) - -func (attributes Attributes) getPrimitive(key string, zeroValue interface{}, resultType string, convert convertPrimitiveFunc, errorHolder *error) interface{} { - var err error - if attribute, exists := attributes[key]; exists { - var result interface{} - switch resultType { - case "string": - primitiveResult := new(string) - err = json.Unmarshal(attribute.Raw, primitiveResult) - result = *primitiveResult - case "boolean": - primitiveResult := new(bool) - err = json.Unmarshal(attribute.Raw, primitiveResult) - result = *primitiveResult - case "number": - primitiveResult := new(float64) - err = json.Unmarshal(attribute.Raw, primitiveResult) - result = *primitiveResult - } - if err == nil { - return result - } - - switch typeError := err.(type) { - case *json.UnmarshalTypeError: - convertedValue, retryError := convert(attributes, key, typeError.Value) - if retryError == nil && convertedValue != nil { - return convertedValue - } - } - } else { - err = &KeyNotFoundError{Key: key} - } - if errorHolder != nil { - *errorHolder = err - } - return zeroValue -} - -// GetString allows returning the attribute with the given key -// as a string. If the attribute JSON/YAML content is -// not a JSON string (or a primitive type that can be converted into a string), -// then the result will be the empty string and an error will be raised. -// -// An optional error holder can be passed as an argument -// to receive any error that might have be raised during the attribute -// decoding -func (attributes Attributes) GetString(key string, errorHolder *error) string { - return attributes.getPrimitive( - key, - "", - "string", - func(attributes Attributes, key string, attributeType string) (interface{}, error) { - var convertedValue interface{} - var retryError error - switch attributeType { - case "bool": - convertedValue = strconv.FormatBool(attributes.GetBoolean(key, &retryError)) - case "number": - convertedValue = strconv.FormatFloat(attributes.GetNumber(key, &retryError), 'g', -1, 64) - } - return convertedValue, retryError - }, - errorHolder).(string) -} - -// GetNumber allows returning the attribute with the given key -// as a float64. If the attribute JSON/YAML content is -// not a JSON number (or a JSON string that can be converted into a JSON number), -// then the result will be the zero value and an error is raised. -// -// An optional error holder can be passed as an argument -// to receive any error that might have be raised during the attribute -// decoding -func (attributes Attributes) GetNumber(key string, errorHolder *error) float64 { - return attributes.getPrimitive( - key, - 0.0, - "number", - func(attributes Attributes, key string, attributeType string) (interface{}, error) { - var convertedValue interface{} - var retryError error - switch attributeType { - case "string": - var convError error - convertedValue, convError = strconv.ParseFloat(attributes.GetString(key, &retryError), 64) - if retryError == nil { - retryError = convError - } - } - return convertedValue, retryError - }, - errorHolder).(float64) -} - -// GetBoolean allows returning the attribute with the given key -// as a bool. If the attribute JSON/YAML content is -// not a JSON boolean (or a JSON string that can be converted into a JSON boolean), -// then the result will be the `false` zero value and an error is raised. -// -// String values can be converted to boolean values according to the following rules: -// -// - strings "1", "t", "T", "TRUE", "true", and "True" will be converted to a `true` boolean -// -// - strings "0, "f", "F", "FALSE", "false", "False" will be converted to a `false` boolean -// -// - any other string value will raise an error. -// -// An optional error holder can be passed as an argument -// to receive any error that might have be raised during the attribute -// decoding -func (attributes Attributes) GetBoolean(key string, errorHolder *error) bool { - return attributes.getPrimitive( - key, - false, - "boolean", - func(attributes Attributes, key string, attributeType string) (interface{}, error) { - var convertedValue interface{} - var retryError error - switch attributeType { - case "string": - var convError error - convertedValue, convError = strconv.ParseBool(attributes.GetString(key, &retryError)) - if retryError == nil { - retryError = convError - } - } - return convertedValue, retryError - }, - errorHolder).(bool) -} - -// Get allows returning the attribute with the given key -// as an interface. The underlying type of the returned interface -// depends on the JSON/YAML content of the attribute. It can be either a simple type -// like a string, a float64 or a bool, either a structured type like -// a map of interfaces or an array of interfaces. -// -// An optional error holder can be passed as an argument -// to receive any error that might have occurred during the attribute -// decoding -func (attributes Attributes) Get(key string, errorHolder *error) interface{} { - if attribute, exists := attributes[key]; exists { - container := &[]interface{}{} - err := json.Unmarshal([]byte("[ "+string(attribute.Raw)+" ]"), container) - if err != nil && errorHolder != nil { - *errorHolder = err - } - if len(*container) > 0 { - return (*container)[0] - } - } else if !exists && errorHolder != nil { - *errorHolder = &KeyNotFoundError{Key: key} - } - return nil -} - -// GetInto allows decoding the attribute with the given key -// into a given interface. The provided interface should be a pointer -// to a struct, to an array, or to any simple type. -// -// An error is returned if the provided interface type is not compatible -// with the attribute content -func (attributes Attributes) GetInto(key string, into interface{}) error { - var err error - if attribute, exists := attributes[key]; exists { - err = json.Unmarshal(attribute.Raw, into) - } else { - err = &KeyNotFoundError{Key: key} - } - return err -} - -// Strings allows returning only the attributes whose content -// is a JSON string. -// -// An optional error holder can be passed as an argument -// to receive any error that might have be raised during the attribute -// decoding -func (attributes Attributes) Strings(errorHolder *error) map[string]string { - result := map[string]string{} - for key := range attributes { - // Here only the last error is returned. - // Let's keep it simple and avoid adding a dependency - // on an external package just for gathering errors. - if value, isRightType := attributes.Get(key, errorHolder).(string); isRightType { - result[key] = value - } - } - return result -} - -// Numbers allows returning only the attributes whose content -// is a JSON number. -// -// An optional error holder can be passed as an argument -// to receive any error that might have be raised during the attribute -// decoding -func (attributes Attributes) Numbers(errorHolder *error) map[string]float64 { - result := map[string]float64{} - for key := range attributes { - // Here only the last error is returned. - // Let's keep it simple and avoid adding a dependency - // on an external package just for gathering errors. - if value, isRightType := attributes.Get(key, errorHolder).(float64); isRightType { - result[key] = value - } - } - return result -} - -// Booleans allows returning only the attributes whose content -// is a JSON boolean. -// -// An optional error holder can be passed as an argument -// to receive any error that might have be raised during the attribute -// decoding -func (attributes Attributes) Booleans(errorHolder *error) map[string]bool { - result := map[string]bool{} - for key := range attributes { - // Here only the last error is returned. - // Let's keep it simple and avoid adding a dependency - // on an external package just for gathering errors - if value, isRightType := attributes.Get(key, errorHolder).(bool); isRightType { - result[key] = value - } - } - return result -} - -// Into allows decoding the whole attributes map -// into a given interface. The provided interface should be either a pointer -// to a struct, or to a map. -// -// An error is returned if the provided interface type is not compatible -// with the structure of the attributes -func (attributes Attributes) Into(into interface{}) error { - if attributes == nil { - return nil - } - - rawJSON, err := json.Marshal(attributes) - if err != nil { - return err - } - - err = json.Unmarshal(rawJSON, into) - return err -} - -// AsInterface allows returning the whole attributes map... -// as an interface. When the attributes are not empty, -// the returned interface will be a map -// of interfaces. -// -// An optional error holder can be passed as an argument -// to receive any error that might have occured during the attributes -// decoding -func (attributes Attributes) AsInterface(errorHolder *error) interface{} { - rawJSON, err := json.Marshal(attributes) - if err != nil && errorHolder != nil { - *errorHolder = err - return nil - } - - container := &[]interface{}{} - err = json.Unmarshal([]byte("[ "+string(rawJSON)+" ]"), container) - if err != nil && errorHolder != nil { - *errorHolder = err - return nil - } - - return (*container)[0] -} - -// PutString allows adding a string attribute to the -// current map of attributes -func (attributes Attributes) PutString(key string, value string) Attributes { - rawJSON, _ := json.Marshal(value) - attributes[key] = apiext.JSON{ - Raw: rawJSON, - } - return attributes -} - -// FromStringMap allows adding into the current map of attributes all -// the attributes contained in the given string map -func (attributes Attributes) FromStringMap(strings map[string]string) Attributes { - for key, value := range strings { - attributes.PutString(key, value) - } - return attributes -} - -// PutFloat allows adding a float attribute to the -// current map of attributes -func (attributes Attributes) PutFloat(key string, value float64) Attributes { - rawJSON, _ := json.Marshal(value) - attributes[key] = apiext.JSON{ - Raw: rawJSON, - } - return attributes -} - -// FromFloatMap allows adding into the current map of attributes all -// the attributes contained in the given map of floats -func (attributes Attributes) FromFloatMap(strings map[string]float64) Attributes { - for key, value := range strings { - attributes.PutFloat(key, value) - } - return attributes -} - -// PutInteger allows adding an integer attribute to the -// current map of attributes -func (attributes Attributes) PutInteger(key string, value int) Attributes { - rawJSON, _ := json.Marshal(value) - attributes[key] = apiext.JSON{ - Raw: rawJSON, - } - return attributes -} - -// FromIntegerMap allows adding into the current map of attributes all -// the attributes contained in the given map of integers -func (attributes Attributes) FromIntegerMap(strings map[string]int) Attributes { - for key, value := range strings { - rawJSON, _ := json.Marshal(value) - attributes[key] = apiext.JSON{ - Raw: rawJSON, - } - } - return attributes -} - -// PutBoolean allows adding a boolean attribute to the -// current map of attributes -func (attributes Attributes) PutBoolean(key string, value bool) Attributes { - rawJSON, _ := json.Marshal(value) - attributes[key] = apiext.JSON{ - Raw: rawJSON, - } - return attributes -} - -// FromBooleanMap allows adding into the current map of attributes all -// the attributes contained in the given map of booleans -func (attributes Attributes) FromBooleanMap(strings map[string]bool) Attributes { - for key, value := range strings { - rawJSON, _ := json.Marshal(value) - attributes[key] = apiext.JSON{ - Raw: rawJSON, - } - } - return attributes -} - -// Put allows adding an attribute to the -// current map of attributes. -// The attribute is provided as an interface, and can be any value -// that supports Json Marshaling. -// -// An optional error holder can be passed as an argument -// to receive any error that might have occured during the attributes -// decoding -func (attributes Attributes) Put(key string, value interface{}, errorHolder *error) Attributes { - rawJSON, err := json.Marshal(value) - if err != nil && errorHolder != nil { - *errorHolder = err - } - - attributes[key] = apiext.JSON{ - Raw: rawJSON, - } - return attributes -} - -// FromMap allows adding into the current map of attributes all -// the attributes contained in the given map of interfaces -// each attribute of the given map is provided as an interface, and can be any value -// that supports Json Marshaling. -// -// An optional error holder can be passed as an argument -// to receive any error that might have occured during the attributes -// decoding -func (attributes Attributes) FromMap(strings map[string]interface{}, errorHolder *error) Attributes { - for key, value := range strings { - // Here only the last error is returned. - // Let's keep it simple and avoid adding a dependency - // on an external package just for gathering errors. - attributes.Put(key, value, errorHolder) - } - return attributes -} - -// FromInterface allows completing the map of attributes from the given interface. -// The given interface, and can be any value -// that supports Json Marshaling and will be marshalled as a JSON object. -// -// This is especially useful to create attributes from well-known, but -// implementation- dependent Go structures. -// -// An optional error holder can be passed as an argument -// to receive any error that might have occured during the attributes -// decoding -func (attributes Attributes) FromInterface(structure interface{}, errorHolder *error) Attributes { - newAttributes := Attributes{} - completeJSON, err := json.Marshal(structure) - if err != nil && errorHolder != nil { - *errorHolder = err - } - - err = json.Unmarshal(completeJSON, &newAttributes) - for key, value := range newAttributes { - attributes[key] = value - } - return attributes -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/attributes/errors.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/attributes/errors.go deleted file mode 100644 index 3c755241608..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/attributes/errors.go +++ /dev/null @@ -1,12 +0,0 @@ -package attributes - -import "fmt" - -// KeyNotFoundError returns an error if no key is found for the attribute -type KeyNotFoundError struct { - Key string -} - -func (e *KeyNotFoundError) Error() string { - return fmt.Sprintf("Attribute with key %q does not exist", e.Key) -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/devfile/header.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/devfile/header.go deleted file mode 100644 index 6606b068ae1..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/devfile/header.go +++ /dev/null @@ -1,92 +0,0 @@ -package devfile - -import ( - attributes "github.com/devfile/api/v2/pkg/attributes" -) - -// DevfileHeader describes the structure of the devfile-specific top-level fields -// that are not part of the K8S API structures -type DevfileHeader struct { - // Devfile schema version - // +kubebuilder:validation:Pattern=^([2-9])\.([0-9]+)\.([0-9]+)(\-[0-9a-z-]+(\.[0-9a-z-]+)*)?(\+[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?$ - SchemaVersion string `json:"schemaVersion"` - - // +kubebuilder:pruning:PreserveUnknownFields - // +optional - // Optional metadata - Metadata DevfileMetadata `json:"metadata,omitempty"` -} - -// Architecture describes the architecture type -// +kubebuilder:validation:Enum=amd64;arm64;ppc64le;s390x -type Architecture string - -const ( - AMD64 Architecture = "amd64" - ARM64 Architecture = "arm64" - PPC64LE Architecture = "ppc64le" - S390X Architecture = "s390x" -) - -type DevfileMetadata struct { - // Optional devfile name - // +optional - Name string `json:"name,omitempty"` - - // Optional semver-compatible version - // +optional - // +kubebuilder:validation:Pattern=^([0-9]+)\.([0-9]+)\.([0-9]+)(\-[0-9a-z-]+(\.[0-9a-z-]+)*)?(\+[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*)?$ - Version string `json:"version,omitempty"` - - // Map of implementation-dependant free-form YAML attributes. Deprecated, use the top-level attributes field instead. - // +optional - // +kubebuilder:validation:Type=object - // +kubebuilder:pruning:PreserveUnknownFields - // +kubebuilder:validation:Schemaless - Attributes attributes.Attributes `json:"attributes,omitempty"` - - // Optional devfile display name - // +optional - DisplayName string `json:"displayName,omitempty"` - - // Optional devfile description - // +optional - Description string `json:"description,omitempty"` - - // Optional devfile tags - // +optional - Tags []string `json:"tags,omitempty"` - - // Optional list of processor architectures that the devfile supports, empty list suggests that the devfile can be used on any architecture - // +optional - // +kubebuilder:validation:UniqueItems=true - Architectures []Architecture `json:"architectures,omitempty"` - - // Optional devfile icon, can be a URI or a relative path in the project - // +optional - Icon string `json:"icon,omitempty"` - - // Optional devfile global memory limit - // +optional - GlobalMemoryLimit string `json:"globalMemoryLimit,omitempty"` - - // Optional devfile project type - // +optional - ProjectType string `json:"projectType,omitempty"` - - // Optional devfile language - // +optional - Language string `json:"language,omitempty"` - - // Optional devfile website - // +optional - Website string `json:"website,omitempty"` - - // Optional devfile provider information - // +optional - Provider string `json:"provider,omitempty"` - - // Optional link to a page that provides support information - // +optional - SupportUrl string `json:"supportUrl,omitempty"` -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/utils/overriding/keys.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/utils/overriding/keys.go deleted file mode 100644 index b525e8a7b61..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/utils/overriding/keys.go +++ /dev/null @@ -1,81 +0,0 @@ -package overriding - -import ( - "fmt" - "reflect" - - dw "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" - attributesPkg "github.com/devfile/api/v2/pkg/attributes" - "github.com/hashicorp/go-multierror" - "k8s.io/apimachinery/pkg/util/sets" -) - -type checkFn func(elementType string, keysSets []sets.String) []error - -// checkKeys provides a generic way to apply some validation on the content of each type of top-level list -// contained in the `toplevelListContainers` passed in argument. -// -// For each type of top-level list, the `keysSets` argument that will be passed to the `doCheck` function -// contains the the key sets that correspond to the `toplevelListContainers` passed to this method, -// in the same order. -func checkKeys(doCheck checkFn, toplevelListContainers ...dw.TopLevelListContainer) error { - var errors *multierror.Error - - // intermediate storage for the conversion []map[string]KeyedList -> map[string][]sets.String - listTypeToKeys := map[string][]sets.String{} - - // Flatten []map[string]KeyedList -> map[string][]KeyedList based on map keys and convert each KeyedList - // into a sets.String - for _, topLevelListContainer := range toplevelListContainers { - topLevelList := topLevelListContainer.GetToplevelLists() - for listType, listElem := range topLevelList { - listTypeToKeys[listType] = append(listTypeToKeys[listType], sets.NewString(listElem.GetKeys()...)) - } - - value := reflect.ValueOf(topLevelListContainer) - - var variableValue reflect.Value - var attributeValue reflect.Value - - // toplevelListContainers can contain either a pointer or a struct and needs to be safeguarded when using reflect - if value.Kind() == reflect.Ptr { - variableValue = value.Elem().FieldByName("Variables") - attributeValue = value.Elem().FieldByName("Attributes") - } else { - variableValue = value.FieldByName("Variables") - attributeValue = value.FieldByName("Attributes") - } - - if variableValue.IsValid() && variableValue.Kind() == reflect.Map { - mapIter := variableValue.MapRange() - - var variableKeys []string - for mapIter.Next() { - k := mapIter.Key() - v := mapIter.Value() - if k.Kind() != reflect.String || v.Kind() != reflect.String { - return fmt.Errorf("unable to fetch top-level Variables, top-level Variables should be map of strings") - } - variableKeys = append(variableKeys, k.String()) - } - listTypeToKeys["Variables"] = append(listTypeToKeys["Variables"], sets.NewString(variableKeys...)) - } - - if attributeValue.IsValid() && attributeValue.CanInterface() { - attributes, ok := attributeValue.Interface().(attributesPkg.Attributes) - if !ok { - return fmt.Errorf("unable to fetch top-level Attributes from the devfile data") - } - var attributeKeys []string - for k := range attributes { - attributeKeys = append(attributeKeys, k) - } - listTypeToKeys["Attributes"] = append(listTypeToKeys["Attributes"], sets.NewString(attributeKeys...)) - } - } - - for listType, keySets := range listTypeToKeys { - errors = multierror.Append(errors, doCheck(listType, keySets)...) - } - return errors.ErrorOrNil() -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/utils/overriding/merging.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/utils/overriding/merging.go deleted file mode 100644 index 279e54de91a..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/utils/overriding/merging.go +++ /dev/null @@ -1,245 +0,0 @@ -package overriding - -import ( - "fmt" - "reflect" - "strings" - - dw "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" - "github.com/devfile/api/v2/pkg/attributes" - "k8s.io/apimachinery/pkg/util/json" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/yaml" -) - -// MergeDevWorkspaceTemplateSpec implements the merging logic of a main devfile content with flattened, already-overridden parent devfiles or plugins. -// On a `main` `DevWorkspaceTemplateSpec` (which is the core part of a devfile, without the `apiVersion` and `metadata`), -// it allows adding all the new overridden elements provided by flattened parent and plugins -// -// Returns non-nil error if there are duplicate (== with same key) commands, components or projects between the -// main content and the parent or plugins. -// -// The result is a transformed `DevWorkspaceTemplateSpec` object, that does not contain any `plugin` component -// (since they are expected to be provided as flattened overridden devfiles in the arguments) -func MergeDevWorkspaceTemplateSpec( - mainContent *dw.DevWorkspaceTemplateSpecContent, - parentFlattenedContent *dw.DevWorkspaceTemplateSpecContent, - pluginFlattenedContents ...*dw.DevWorkspaceTemplateSpecContent) (*dw.DevWorkspaceTemplateSpecContent, error) { - - allContents := []*dw.DevWorkspaceTemplateSpecContent{} - if parentFlattenedContent != nil { - allContents = append(allContents, parentFlattenedContent) - } - if len(pluginFlattenedContents) > 0 { - allContents = append(allContents, pluginFlattenedContents...) - } - allContents = append(allContents, mainContent) - - // Check for conflicts - if parentFlattenedContent != nil { - if err := ensureNoConflictWithParent(mainContent, parentFlattenedContent); err != nil { - return nil, err - } - } - if len(pluginFlattenedContents) > 0 { - if err := ensureNoConflictsWithPlugins(mainContent, pluginFlattenedContents...); err != nil { - return nil, err - } - if parentFlattenedContent != nil { - // also need to ensure no conflict between parent and plugins - if err := ensureNoConflictsWithPlugins(parentFlattenedContent, pluginFlattenedContents...); err != nil { - return nil, err - } - } - } - - result := dw.DevWorkspaceTemplateSpecContent{} - - // Merge top-level lists (Commands, Projects, Components, etc ...) - - topLevelListsNames := result.GetToplevelLists() - topLevelListsByContent := []dw.TopLevelLists{} - for _, content := range allContents { - topLevelListsByContent = append(topLevelListsByContent, content.GetToplevelLists()) - } - - resultValue := reflect.ValueOf(&result).Elem() - for toplevelListName := range topLevelListsNames { - listType, fieldExists := resultValue.Type().FieldByName(toplevelListName) - if !fieldExists { - return nil, fmt.Errorf("field '%v' is unknown in %v struct (this should never happen)", toplevelListName, reflect.TypeOf(resultValue).Name()) - } - if listType.Type.Kind() != reflect.Slice { - return nil, fmt.Errorf("field '%v' in %v struct is not a slice (this should never happen)", toplevelListName, reflect.TypeOf(resultValue).Name()) - } - listElementType := listType.Type.Elem() - resultToplevelListValue := resultValue.FieldByName(toplevelListName) - for contentIndex, content := range allContents { - toplevelLists := topLevelListsByContent[contentIndex] - keyedList := toplevelLists[toplevelListName] - for _, keyed := range keyedList { - if content == mainContent { - if component, isComponent := keyed.(dw.Component); isComponent && - component.Plugin != nil { - continue - } - } - if resultToplevelListValue.IsNil() { - resultToplevelListValue.Set(reflect.MakeSlice(reflect.SliceOf(listElementType), 0, len(keyedList))) - } - resultToplevelListValue.Set(reflect.Append(resultToplevelListValue, reflect.ValueOf(keyed))) - } - } - } - - preStartCommands := sets.String{} - postStartCommands := sets.String{} - preStopCommands := sets.String{} - postStopCommands := sets.String{} - for _, content := range allContents { - if content.Events != nil { - if result.Events == nil { - result.Events = &dw.Events{} - } - preStartCommands = preStartCommands.Union(sets.NewString(content.Events.PreStart...)) - postStartCommands = postStartCommands.Union(sets.NewString(content.Events.PostStart...)) - preStopCommands = preStopCommands.Union(sets.NewString(content.Events.PreStop...)) - postStopCommands = postStopCommands.Union(sets.NewString(content.Events.PostStop...)) - } - - if len(content.Variables) > 0 { - if len(result.Variables) == 0 { - result.Variables = make(map[string]string) - } - for k, v := range content.Variables { - result.Variables[k] = v - } - } - - var err error - if len(content.Attributes) > 0 { - if len(result.Attributes) == 0 { - result.Attributes = attributes.Attributes{} - } - for k, v := range content.Attributes { - result.Attributes.Put(k, v, &err) - if err != nil { - return nil, err - } - } - } - } - - if result.Events != nil { - result.Events.PreStart = preStartCommands.List() - result.Events.PostStart = postStartCommands.List() - result.Events.PreStop = preStopCommands.List() - result.Events.PostStop = postStopCommands.List() - } - - return &result, nil -} - -// MergeDevWorkspaceTemplateSpecBytes implements the merging logic of a main devfile content with flattened, already-overridden parent devfiles or plugins. -// On an json or yaml document that contains the core content of the devfile (which is the core part of a devfile, without the `apiVersion` and `metadata`), -// it allows adding all the new overridden elements provided by flattened parent and plugins (also provided as json or yaml documents) -// -// It is not allowed for to have duplicate (== with same key) commands, components or projects between the main content and the parent or plugins. -// An error would be thrown -// -// The result is a transformed `DevfileWorkspaceTemplateSpec` object, that does not contain any `plugin` component -// (since they are expected to be provided as flattened overridden devfiles in the arguments) -func MergeDevWorkspaceTemplateSpecBytes(originalBytes []byte, flattenedParentBytes []byte, flattenPluginsBytes ...[]byte) (*dw.DevWorkspaceTemplateSpecContent, error) { - originalJson, err := yaml.ToJSON(originalBytes) - if err != nil { - return nil, err - } - - original := dw.DevWorkspaceTemplateSpecContent{} - err = json.Unmarshal(originalJson, &original) - if err != nil { - return nil, err - } - - flattenedParentJson, err := yaml.ToJSON(flattenedParentBytes) - if err != nil { - return nil, err - } - - flattenedParent := dw.DevWorkspaceTemplateSpecContent{} - err = json.Unmarshal(flattenedParentJson, &flattenedParent) - if err != nil { - return nil, err - } - - flattenedPlugins := []*dw.DevWorkspaceTemplateSpecContent{} - for _, flattenedPluginBytes := range flattenPluginsBytes { - flattenedPluginJson, err := yaml.ToJSON(flattenedPluginBytes) - if err != nil { - return nil, err - } - - flattenedPlugin := dw.DevWorkspaceTemplateSpecContent{} - err = json.Unmarshal(flattenedPluginJson, &flattenedPlugin) - if err != nil { - return nil, err - } - flattenedPlugins = append(flattenedPlugins, &flattenedPlugin) - } - - return MergeDevWorkspaceTemplateSpec(&original, &flattenedParent, flattenedPlugins...) -} - -func ensureNoConflictWithParent(mainContent *dw.DevWorkspaceTemplateSpecContent, parentflattenedContent *dw.DevWorkspaceTemplateSpecContent) error { - return checkKeys(func(elementType string, keysSets []sets.String) []error { - mainKeys := keysSets[0] - parentOrPluginKeys := keysSets[1] - overriddenElementsInMainContent := mainKeys.Intersection(parentOrPluginKeys) - if overriddenElementsInMainContent.Len() > 0 { - return []error{fmt.Errorf("Some %s are already defined in parent: %s. "+ - "If you want to override them, you should do it in the parent scope.", - elementType, - strings.Join(overriddenElementsInMainContent.List(), ", "))} - } - return []error{} - }, - mainContent, parentflattenedContent) -} - -func ensureNoConflictsWithPlugins(mainContent *dw.DevWorkspaceTemplateSpecContent, pluginFlattenedContents ...*dw.DevWorkspaceTemplateSpecContent) error { - getPluginKey := func(pluginIndex int) string { - index := 0 - for _, comp := range mainContent.Components { - if comp.Plugin != nil { - if pluginIndex == index { - return comp.Name - } - index++ - } - } - return "unknown" - } - - allSpecs := []dw.TopLevelListContainer{mainContent} - for _, pluginFlattenedContent := range pluginFlattenedContents { - allSpecs = append(allSpecs, pluginFlattenedContent) - } - return checkKeys(func(elementType string, keysSets []sets.String) []error { - mainKeys := keysSets[0] - pluginKeysSets := keysSets[1:] - errs := []error{} - for pluginNumber, pluginKeys := range pluginKeysSets { - overriddenElementsInMainContent := mainKeys.Intersection(pluginKeys) - - if overriddenElementsInMainContent.Len() > 0 { - errs = append(errs, fmt.Errorf("Some %s are already defined in plugin '%s': %s. "+ - "If you want to override them, you should do it in the plugin scope.", - elementType, - getPluginKey(pluginNumber), - strings.Join(overriddenElementsInMainContent.List(), ", "))) - } - } - return errs - }, - allSpecs...) -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/utils/overriding/overriding.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/utils/overriding/overriding.go deleted file mode 100644 index 1a6f1d25cd2..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/utils/overriding/overriding.go +++ /dev/null @@ -1,189 +0,0 @@ -package overriding - -import ( - "fmt" - "reflect" - "strings" - - dw "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" - unions "github.com/devfile/api/v2/pkg/utils/unions" - "k8s.io/apimachinery/pkg/util/json" - "k8s.io/apimachinery/pkg/util/sets" - strategicpatch "k8s.io/apimachinery/pkg/util/strategicpatch" - "k8s.io/apimachinery/pkg/util/yaml" -) - -// OverrideDevWorkspaceTemplateSpecBytes implements the overriding logic for parent devfiles or plugins. -// On a json or yaml document that contains the core content of the devfile (without the `apiVersion` and `metadata`), -// it allows applying a `patch` which is a document fragment of the same schema. -// -// The Overriding logic is implemented according to strategic merge patch rules, as defined here: -// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-api-machinery/strategic-merge-patch.md#background -// -// The result is a transformed `DevfileWorkspaceTemplateSpec` object that can be serialized back to yaml or json. -func OverrideDevWorkspaceTemplateSpecBytes(originalBytes []byte, patchBytes []byte) (*dw.DevWorkspaceTemplateSpecContent, error) { - originalJson, err := yaml.ToJSON(originalBytes) - if err != nil { - return nil, err - } - - original := dw.DevWorkspaceTemplateSpecContent{} - err = json.Unmarshal(originalJson, &original) - if err != nil { - return nil, err - } - - patchJson, err := yaml.ToJSON(patchBytes) - if err != nil { - return nil, err - } - - patch := dw.ParentOverrides{} - err = json.Unmarshal(patchJson, &patch) - if err != nil { - return nil, err - } - - return OverrideDevWorkspaceTemplateSpec(&original, &patch) - -} - -// OverrideDevWorkspaceTemplateSpec implements the overriding logic for parent devfiles or plugins. -// On an `original` `DevfileWorkspaceTemplateSpec` (which is the core part of a devfile, without the `apiVersion` and `metadata`), -// it allows applying a `patch` which is a `ParentOverrides` or a `PluginOverrides` object. -// -// The Overriding logic is implemented according to strategic merge patch rules, as defined here: -// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-api-machinery/strategic-merge-patch.md#background -// -// The result is a transformed `DevfileWorkspaceTemplateSpec` object. -func OverrideDevWorkspaceTemplateSpec(original *dw.DevWorkspaceTemplateSpecContent, patch dw.Overrides) (*dw.DevWorkspaceTemplateSpecContent, error) { - if err := ensureOnlyExistingElementsAreOverridden(original, patch); err != nil { - return nil, err - } - - if err := unions.Normalize(&original); err != nil { - return nil, err - } - if err := unions.Normalize(&patch); err != nil { - return nil, err - } - - normalizedOriginalBytes, err := json.Marshal(original) - if err != nil { - return nil, err - } - - originalMap, err := handleUnmarshal(normalizedOriginalBytes) - if err != nil { - return nil, err - } - - normalizedPatchBytes, err := json.Marshal(patch) - if err != nil { - return nil, err - } - patchMap, err := handleUnmarshal(normalizedPatchBytes) - if err != nil { - return nil, err - } - - schema, err := strategicpatch.NewPatchMetaFromStruct(original) - if err != nil { - return nil, err - } - - patchedMap, err := strategicpatch.StrategicMergeMapPatchUsingLookupPatchMeta(originalMap, patchMap, mapEnabledPatchMetaFromStruct{schema}) - if err != nil { - return nil, err - } - - patchedBytes, err := json.Marshal(patchedMap) - if err != nil { - return nil, err - } - - patched := dw.DevWorkspaceTemplateSpecContent{} - err = json.Unmarshal(patchedBytes, &patched) - if err != nil { - return nil, err - } - - if err = unions.Simplify(&patched); err != nil { - return nil, err - } - return &patched, nil -} - -func ensureOnlyExistingElementsAreOverridden(spec *dw.DevWorkspaceTemplateSpecContent, overrides dw.Overrides) error { - return checkKeys(func(elementType string, keysSets []sets.String) []error { - if len(keysSets) <= 1 { - return []error{} - } - specKeys := keysSets[0] - overlayKeys := keysSets[1] - newElementsInOverlay := overlayKeys.Difference(specKeys) - if newElementsInOverlay.Len() > 0 { - return []error{fmt.Errorf("Some %s do not override any existing element: %s. "+ - "They should be defined in the main body, as new elements, not in the overriding section", - elementType, - strings.Join(newElementsInOverlay.List(), ", "))} - } - return []error{} - }, - spec, overrides) -} - -type mapEnabledPatchMetaFromStruct struct { - delegate strategicpatch.PatchMetaFromStruct -} - -var _ strategicpatch.LookupPatchMeta = (*mapEnabledPatchMetaFromStruct)(nil) - -func (s *mapEnabledPatchMetaFromStruct) replaceMapWithSingleKeyStruct(key string, elementIsSlice bool) { - t := s.delegate.T - if t.Kind() == reflect.Ptr { - t = t.Elem() - } - - if t.Kind() == reflect.Map { - typeName := t.Name() - tag := `json:"` + key + `"` - elemType := t.Elem() - if typeName == "Attributes" { - if elementIsSlice { - elemType = reflect.SliceOf(reflect.StructOf([]reflect.StructField{})) - } else { - tag = tag + ` patchStrategy:"replace"` - elemType = reflect.StructOf([]reflect.StructField{}) - } - } - - s.delegate.T = reflect.StructOf([]reflect.StructField{{Name: strings.Title(key), Type: elemType, Tag: reflect.StructTag(tag)}}) - } -} - -func (s mapEnabledPatchMetaFromStruct) LookupPatchMetadataForStruct(key string) (strategicpatch.LookupPatchMeta, strategicpatch.PatchMeta, error) { - s.replaceMapWithSingleKeyStruct(key, false) - - schema, patchMeta, err := s.delegate.LookupPatchMetadataForStruct(key) - var lookupPatchMeta strategicpatch.LookupPatchMeta = nil - if schema != nil { - lookupPatchMeta = mapEnabledPatchMetaFromStruct{delegate: schema.(strategicpatch.PatchMetaFromStruct)} - } - return lookupPatchMeta, patchMeta, err -} - -func (s mapEnabledPatchMetaFromStruct) LookupPatchMetadataForSlice(key string) (strategicpatch.LookupPatchMeta, strategicpatch.PatchMeta, error) { - s.replaceMapWithSingleKeyStruct(key, true) - - schema, patchMeta, err := s.delegate.LookupPatchMetadataForSlice(key) - var lookupPatchMeta strategicpatch.LookupPatchMeta = nil - if schema != nil { - lookupPatchMeta = mapEnabledPatchMetaFromStruct{schema.(strategicpatch.PatchMetaFromStruct)} - } - return lookupPatchMeta, patchMeta, err -} - -func (s mapEnabledPatchMetaFromStruct) Name() string { - return s.delegate.Name() -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/utils/overriding/utils.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/utils/overriding/utils.go deleted file mode 100644 index 4e7d489a95b..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/utils/overriding/utils.go +++ /dev/null @@ -1,18 +0,0 @@ -package overriding - -import ( - "k8s.io/apimachinery/pkg/util/json" -) - -func handleUnmarshal(j []byte) (map[string]interface{}, error) { - if j == nil { - j = []byte("{}") - } - - m := map[string]interface{}{} - err := json.Unmarshal(j, &m) - if err != nil { - return nil, err - } - return m, nil -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/utils/unions/normalize.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/utils/unions/normalize.go deleted file mode 100644 index 96d6e486eae..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/utils/unions/normalize.go +++ /dev/null @@ -1,64 +0,0 @@ -package unions - -import ( - "reflect" - - dw "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" - "github.com/mitchellh/reflectwalk" -) - -type normalizer struct { -} - -func (n *normalizer) Struct(s reflect.Value) error { - if s.CanAddr() { - addr := s.Addr() - if addr.CanInterface() { - i := addr.Interface() - if u, ok := i.(dw.Union); ok { - u.Normalize() - } - } - } - return nil -} -func (n *normalizer) StructField(reflect.StructField, reflect.Value) error { - return nil -} - -type simplifier struct { -} - -func (n *simplifier) Struct(s reflect.Value) error { - if s.CanAddr() { - addr := s.Addr() - if addr.CanInterface() { - i := addr.Interface() - if u, ok := i.(dw.Union); ok { - u.Simplify() - } - } - } - return nil -} -func (n *simplifier) StructField(reflect.StructField, reflect.Value) error { - return nil -} - -// Normalize allows normalizing all the unions -// encountered while walking through the whole struct tree. -// Union normalizing works according to the following rules: -// - When only one field of the union is set and no discriminator is set, set the discriminator according to the union value. -// - When several fields are set and a discriminator is set, remove (== reset to zero value) all the values that do not match the discriminator. -// - When only one union value is set and it matches discriminator, just do nothing. -// - In other case, something is inconsistent or ambiguous: an error is thrown. -func Normalize(tree interface{}) error { - return reflectwalk.Walk(tree, &normalizer{}) -} - -// Simplify allows removing the discriminator of all unions -// encountered while walking through the whole struct tree, -// but after normalizing them if necessary. -func Simplify(tree interface{}) error { - return reflectwalk.Walk(tree, &simplifier{}) -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/validation/commands.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/validation/commands.go deleted file mode 100644 index 1ebf6d3161f..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/validation/commands.go +++ /dev/null @@ -1,187 +0,0 @@ -package validation - -import ( - "fmt" - "strings" - - "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" - "github.com/hashicorp/go-multierror" -) - -// ValidateCommands validates the devfile commands and checks: -// 1. there are no duplicate command ids -// 2. the command type is not invalid -// 3. if a command is part of a command group, there is a single default command -func ValidateCommands(commands []v1alpha2.Command, components []v1alpha2.Component) (returnedErr error) { - groupKindCommandMap := make(map[v1alpha2.CommandGroupKind][]v1alpha2.Command) - commandMap := getCommandsMap(commands) - - err := v1alpha2.CheckDuplicateKeys(commands) - if err != nil { - returnedErr = multierror.Append(returnedErr, err) - } - - for _, command := range commands { - // parentCommands is a map to keep a track of all the parent commands when validating the composite command's subcommands recursively - parentCommands := make(map[string]string) - err := validateCommand(command, parentCommands, commandMap, components) - if err != nil { - returnedErr = multierror.Append(returnedErr, resolveErrorMessageWithImportAttributes(err, command.Attributes)) - } - - commandGroup := getGroup(command) - if commandGroup != nil { - groupKindCommandMap[commandGroup.Kind] = append(groupKindCommandMap[commandGroup.Kind], command) - } - } - - for groupKind, commands := range groupKindCommandMap { - if err := validateGroup(commands, groupKind); err != nil { - returnedErr = multierror.Append(returnedErr, err) - } - } - - return returnedErr -} - -// validateCommand validates a given devfile command where parentCommands is a map to track all the parent commands when validating -// the composite command's subcommands recursively and devfileCommands is a map of command id to the devfile command -func validateCommand(command v1alpha2.Command, parentCommands map[string]string, devfileCommands map[string]v1alpha2.Command, components []v1alpha2.Component) error { - - switch { - case command.Composite != nil: - return validateCompositeCommand(&command, parentCommands, devfileCommands, components) - case command.Exec != nil || command.Apply != nil: - return validateCommandComponent(command, components) - default: - return &InvalidCommandTypeError{commandId: command.Id} - } - -} - -// validateGroup validates commands belonging to a specific group kind. If there are multiple commands belonging to the same group: -// 1. without any default, err out -// 2. with more than one default, err out -func validateGroup(commands []v1alpha2.Command, groupKind v1alpha2.CommandGroupKind) error { - defaultCommandCount := 0 - var defaultCommands []v1alpha2.Command - if len(commands) > 1 { - for _, command := range commands { - defaultVal := getGroup(command).IsDefault - if defaultVal != nil && *defaultVal { - defaultCommandCount++ - defaultCommands = append(defaultCommands, command) - } - } - } else { - return nil - } - - if defaultCommandCount == 0 { - return &MissingDefaultCmdWarning{groupKind: groupKind} - } else if defaultCommandCount > 1 { - var commandsReferenceList []string - for _, command := range defaultCommands { - commandsReferenceList = append(commandsReferenceList, - resolveErrorMessageWithImportAttributes(fmt.Errorf("command: %s", command.Id), command.Attributes).Error()) - } - commandsReference := strings.Join(commandsReferenceList, "; ") - // example: there should be exactly one default command, currently there are multiple commands; - // command: ; command: , imported from uri: http://127.0.0.1:8080, in parent overrides from main devfile" - return &MultipleDefaultCmdError{ - groupKind: groupKind, - commandsReference: commandsReference, - } - } - - return nil -} - -// getGroup returns the group the command belongs to, or nil if the command does not belong to a group -func getGroup(command v1alpha2.Command) *v1alpha2.CommandGroup { - switch { - case command.Composite != nil: - return command.Composite.Group - case command.Exec != nil: - return command.Exec.Group - case command.Apply != nil: - return command.Apply.Group - case command.Custom != nil: - return command.Custom.Group - - default: - return nil - } -} - -// validateCommandComponent validates the given exec or apply command, the command should map to a valid container component -func validateCommandComponent(command v1alpha2.Command, components []v1alpha2.Component) error { - - if command.Exec == nil && command.Apply == nil { - return &InvalidCommandError{commandId: command.Id, reason: "should be of type exec or apply"} - } - - var commandComponent string - if command.Exec != nil { - commandComponent = command.Exec.Component - } else if command.Apply != nil { - commandComponent = command.Apply.Component - } - - // exec command must map to a container component - // apply command must map to a container/kubernetes/openshift/image component - for _, component := range components { - if commandComponent == component.Name { - if component.Container != nil { - return nil - } - if command.Apply != nil && (component.Image != nil || component.Kubernetes != nil || component.Openshift != nil) { - return nil - } - break - } - } - return &InvalidCommandError{commandId: command.Id, reason: "command does not map to a valid component"} -} - -// validateCompositeCommand checks that the specified composite command is valid. The command: -// 1. should not reference itself via a subcommand -// 2. should not indirectly reference itself via a subcommand which is a composite command -// 3. should reference a valid devfile command -// 4. should have a valid exec sub command -// where parentCommands is a map to track all the parent commands when validating the composite command's subcommands recursilvely -// and devfileCommands is a map of command id to the devfile command -func validateCompositeCommand(command *v1alpha2.Command, parentCommands map[string]string, devfileCommands map[string]v1alpha2.Command, components []v1alpha2.Component) error { - - // Store the command ID in a map of parent commands - parentCommands[command.Id] = command.Id - - if command.Composite == nil { - return &InvalidCommandError{commandId: command.Id, reason: "should be of type composite"} - } - - // Loop over the commands and validate that each command points to a command that's in the devfile - for _, cmd := range command.Composite.Commands { - if strings.ToLower(cmd) == command.Id { - return &InvalidCommandError{commandId: command.Id, reason: "composite command cannot reference itself"} - } - - // Don't allow commands to indirectly reference themselves, so check if the command equals any of the parent commands in the command tree - _, ok := parentCommands[strings.ToLower(cmd)] - if ok { - return &InvalidCommandError{commandId: command.Id, reason: "composite command cannot indirectly reference itself"} - } - - subCommand, ok := devfileCommands[strings.ToLower(cmd)] - if !ok { - return &InvalidCommandError{commandId: command.Id, reason: fmt.Sprintf("the command %q mentioned in the composite command does not exist in the devfile", cmd)} - } - - err := validateCommand(subCommand, parentCommands, devfileCommands, components) - if err != nil { - return err - } - delete(parentCommands, cmd) - } - return nil -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/validation/components.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/validation/components.go deleted file mode 100644 index e72d3325597..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/validation/components.go +++ /dev/null @@ -1,209 +0,0 @@ -package validation - -import ( - "fmt" - "strings" - - "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" - "github.com/hashicorp/go-multierror" - "k8s.io/apimachinery/pkg/api/resource" -) - -const ( - // EnvProjectsSrc is the env defined for path to the project source in a component container - EnvProjectsSrc = "PROJECT_SOURCE" - - // EnvProjectsRoot is the env defined for project mount in a component container when component's mountSources=true - EnvProjectsRoot = "PROJECTS_ROOT" -) - -// ValidateComponents validates that the components -// 1. makes sure the container components reference a valid volume component if it uses volume mounts -// 2. makes sure the volume components are unique -// 3. checks the URI specified in openshift components and kubernetes components are with valid format -// 4. makes sure the component name is unique -// 5. makes sure the image dockerfile component git src has at most one remote -func ValidateComponents(components []v1alpha2.Component) (returnedErr error) { - - processedVolumes := make(map[string]bool) - processedVolumeMounts := make(map[string][]string) - processedEndPointName := make(map[string]bool) - processedEndPointPort := make(map[int]bool) - processedComponentWithVolumeMounts := make(map[string]v1alpha2.Component) - processedDeploymentAnnotations := make(map[string]string) - processedServiceAnnotations := make(map[string]string) - deploymentAnnotationDuplication := make(map[string]bool) - serviceAnnotationDuplication := make(map[string]bool) - - err := v1alpha2.CheckDuplicateKeys(components) - if err != nil { - returnedErr = multierror.Append(returnedErr, err) - } - - for _, component := range components { - switch { - case component.Container != nil: - // Process all the volume mounts in container components to validate them later - for _, volumeMount := range component.Container.VolumeMounts { - processedVolumeMounts[component.Name] = append(processedVolumeMounts[component.Name], volumeMount.Name) - processedComponentWithVolumeMounts[component.Name] = component - - } - - // Check if any containers are customizing the reserved PROJECT_SOURCE or PROJECTS_ROOT env - for _, env := range component.Container.Env { - if env.Name == EnvProjectsSrc { - reservedEnvErr := &ReservedEnvError{envName: EnvProjectsSrc, componentName: component.Name} - returnedErr = multierror.Append(returnedErr, reservedEnvErr) - } else if env.Name == EnvProjectsRoot { - reservedEnvErr := &ReservedEnvError{envName: EnvProjectsRoot, componentName: component.Name} - returnedErr = multierror.Append(returnedErr, reservedEnvErr) - } - } - var memoryLimit, cpuLimit, memoryRequest, cpuRequest resource.Quantity - if component.Container.MemoryLimit != "" { - memoryLimit, err = resource.ParseQuantity(component.Container.MemoryLimit) - if err != nil { - parseQuantityErr := &ParsingResourceRequirementError{resource: MemoryLimit, cmpName: component.Name, errMsg: err.Error()} - returnedErr = multierror.Append(returnedErr, parseQuantityErr) - } - } - if component.Container.CpuLimit != "" { - cpuLimit, err = resource.ParseQuantity(component.Container.CpuLimit) - if err != nil { - parseQuantityErr := &ParsingResourceRequirementError{resource: CpuLimit, cmpName: component.Name, errMsg: err.Error()} - returnedErr = multierror.Append(returnedErr, parseQuantityErr) - } - } - if component.Container.MemoryRequest != "" { - memoryRequest, err = resource.ParseQuantity(component.Container.MemoryRequest) - if err != nil { - parseQuantityErr := &ParsingResourceRequirementError{resource: MemoryRequest, cmpName: component.Name, errMsg: err.Error()} - returnedErr = multierror.Append(returnedErr, parseQuantityErr) - } else if !memoryLimit.IsZero() && memoryRequest.Cmp(memoryLimit) > 0 { - invalidResourceRequest := &InvalidResourceRequestError{cmpName: component.Name, errMsg: fmt.Sprintf("memoryRequest is greater than memoryLimit.")} - returnedErr = multierror.Append(returnedErr, invalidResourceRequest) - } - } - if component.Container.CpuRequest != "" { - cpuRequest, err = resource.ParseQuantity(component.Container.CpuRequest) - if err != nil { - parseQuantityErr := &ParsingResourceRequirementError{resource: CpuRequest, cmpName: component.Name, errMsg: err.Error()} - returnedErr = multierror.Append(returnedErr, parseQuantityErr) - } else if !cpuLimit.IsZero() && cpuRequest.Cmp(cpuLimit) > 0 { - invalidResourceRequest := &InvalidResourceRequestError{cmpName: component.Name, errMsg: fmt.Sprintf("cpuRequest is greater than cpuLimit.")} - returnedErr = multierror.Append(returnedErr, invalidResourceRequest) - } - } - - // if annotation is not empty and dedicatedPod is false - if component.Container.Annotation != nil && component.Container.DedicatedPod != nil && !(*component.Container.DedicatedPod) { - for key, value := range component.Container.Annotation.Deployment { - if processedVal, exist := processedDeploymentAnnotations[key]; exist && processedVal != value { - // only append the error for a single key once - if _, exist := deploymentAnnotationDuplication[key]; !exist { - annotationConflictErr := &AnnotationConflictError{annotationName: key, annotationType: DeploymentAnnotation} - returnedErr = multierror.Append(returnedErr, annotationConflictErr) - deploymentAnnotationDuplication[key] = true - } - } else { - processedDeploymentAnnotations[key] = value - } - } - - for key, value := range component.Container.Annotation.Service { - if processedVal, exist := processedServiceAnnotations[key]; exist && processedVal != value { - // only append the error for a single key once - if _, exist := serviceAnnotationDuplication[key]; !exist { - annotationConflictErr := &AnnotationConflictError{annotationName: key, annotationType: ServiceAnnotation} - returnedErr = multierror.Append(returnedErr, annotationConflictErr) - serviceAnnotationDuplication[key] = true - } - } else { - processedServiceAnnotations[key] = value - } - } - } - - err := validateEndpoints(component.Container.Endpoints, processedEndPointPort, processedEndPointName) - if len(err) > 0 { - for _, endpointErr := range err { - returnedErr = multierror.Append(returnedErr, resolveErrorMessageWithImportAttributes(endpointErr, component.Attributes)) - } - } - case component.Volume != nil: - processedVolumes[component.Name] = true - if len(component.Volume.Size) > 0 { - // We use the Kube API for validation because there are so many ways to - // express storage in Kubernetes. For reference, you may check doc - // https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - if _, err := resource.ParseQuantity(component.Volume.Size); err != nil { - invalidVolErr := &InvalidVolumeError{name: component.Name, reason: fmt.Sprintf("size %s for volume component is invalid, %v. Example - 2Gi, 1024Mi", component.Volume.Size, err)} - returnedErr = multierror.Append(returnedErr, resolveErrorMessageWithImportAttributes(invalidVolErr, component.Attributes)) - } - } - case component.Openshift != nil: - if component.Openshift.Uri != "" { - err := ValidateURI(component.Openshift.Uri) - if err != nil { - returnedErr = multierror.Append(returnedErr, resolveErrorMessageWithImportAttributes(err, component.Attributes)) - } - } - - err := validateEndpoints(component.Openshift.Endpoints, processedEndPointPort, processedEndPointName) - if len(err) > 0 { - for _, endpointErr := range err { - returnedErr = multierror.Append(returnedErr, resolveErrorMessageWithImportAttributes(endpointErr, component.Attributes)) - } - } - case component.Kubernetes != nil: - if component.Kubernetes.Uri != "" { - err := ValidateURI(component.Kubernetes.Uri) - if err != nil { - returnedErr = multierror.Append(returnedErr, resolveErrorMessageWithImportAttributes(err, component.Attributes)) - } - } - err := validateEndpoints(component.Kubernetes.Endpoints, processedEndPointPort, processedEndPointName) - if len(err) > 0 { - for _, endpointErr := range err { - returnedErr = multierror.Append(returnedErr, resolveErrorMessageWithImportAttributes(endpointErr, component.Attributes)) - } - } - case component.Image != nil: - var gitSource v1alpha2.GitLikeProjectSource - if component.Image.Dockerfile != nil && component.Image.Dockerfile.Git != nil { - gitSource = component.Image.Dockerfile.Git.GitLikeProjectSource - if err := validateSingleRemoteGitSrc("component", component.Name, gitSource); err != nil { - returnedErr = multierror.Append(returnedErr, resolveErrorMessageWithImportAttributes(err, component.Attributes)) - } - } - case component.Plugin != nil: - if component.Plugin.RegistryUrl != "" { - err := ValidateURI(component.Plugin.RegistryUrl) - if err != nil { - returnedErr = multierror.Append(returnedErr, resolveErrorMessageWithImportAttributes(err, component.Attributes)) - } - } - } - - } - - // Check if the volume mounts mentioned in the containers are referenced by a volume component - var invalidVolumeMountsErrList []string - for componentName, volumeMountNames := range processedVolumeMounts { - for _, volumeMountName := range volumeMountNames { - if !processedVolumes[volumeMountName] { - missingVolumeMountErr := fmt.Errorf("volume mount %s belonging to the container component %s", volumeMountName, componentName) - newErr := resolveErrorMessageWithImportAttributes(missingVolumeMountErr, processedComponentWithVolumeMounts[componentName].Attributes) - invalidVolumeMountsErrList = append(invalidVolumeMountsErrList, newErr.Error()) - } - } - } - - if len(invalidVolumeMountsErrList) > 0 { - invalidVolumeMountsErr := fmt.Sprintf("\n%s", strings.Join(invalidVolumeMountsErrList, "\n")) - returnedErr = multierror.Append(returnedErr, &MissingVolumeMountError{errMsg: invalidVolumeMountsErr}) - } - - return returnedErr -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/validation/endpoints.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/validation/endpoints.go deleted file mode 100644 index 698b6ecd50f..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/validation/endpoints.go +++ /dev/null @@ -1,29 +0,0 @@ -package validation - -import "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" - -// validateEndpoints checks if -// 1. all the endpoint names are unique across components -// 2. endpoint port are unique across component containers -// ie; two component containers cannot have the same target port but two endpoints -// in a single component container can have the same target port -func validateEndpoints(endpoints []v1alpha2.Endpoint, processedEndPointPort map[int]bool, processedEndPointName map[string]bool) (errList []error) { - currentComponentEndPointPort := make(map[int]bool) - - for _, endPoint := range endpoints { - if _, ok := processedEndPointName[endPoint.Name]; ok { - errList = append(errList, &InvalidEndpointError{name: endPoint.Name}) - } - processedEndPointName[endPoint.Name] = true - currentComponentEndPointPort[endPoint.TargetPort] = true - } - - for targetPort := range currentComponentEndPointPort { - if _, ok := processedEndPointPort[targetPort]; ok { - errList = append(errList, &InvalidEndpointError{port: targetPort}) - } - processedEndPointPort[targetPort] = true - } - - return errList -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/validation/errors.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/validation/errors.go deleted file mode 100644 index fd582e14a68..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/validation/errors.go +++ /dev/null @@ -1,241 +0,0 @@ -package validation - -import ( - "fmt" - - "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" - attributesAPI "github.com/devfile/api/v2/pkg/attributes" -) - -// InvalidEventError returns an error if the devfile event type has invalid events -type InvalidEventError struct { - eventType string - errorMsg string -} - -func (e *InvalidEventError) Error() string { - return fmt.Sprintf("%s type events are invalid: %s", e.eventType, e.errorMsg) -} - -// InvalidCommandError returns an error if the command is invalid -type InvalidCommandError struct { - commandId string - reason string -} - -func (e *InvalidCommandError) Error() string { - return fmt.Sprintf("the command %q is invalid - %s", e.commandId, e.reason) -} - -// InvalidCommandError returns an error if the command is invalid -type InvalidCommandTypeError struct { - commandId string -} - -func (e *InvalidCommandTypeError) Error() string { - return fmt.Sprintf("command %s has invalid type", e.commandId) -} - -// MultipleDefaultCmdError returns an error if there are multiple default commands for a single group kind -type MultipleDefaultCmdError struct { - groupKind v1alpha2.CommandGroupKind - commandsReference string -} - -func (e *MultipleDefaultCmdError) Error() string { - return fmt.Sprintf("command group %s error - there should be exactly one default command, currently there are multiple default commands; %s", - e.groupKind, e.commandsReference) -} - -// MissingDefaultCmdWarning returns an error if there is no default command for a single group kind -type MissingDefaultCmdWarning struct { - groupKind v1alpha2.CommandGroupKind -} - -func (e *MissingDefaultCmdWarning) Error() string { - return fmt.Sprintf("command group %s warning - there should be exactly one default command, currently there is no default command", e.groupKind) -} - -// ReservedEnvError returns an error if the user attempts to customize a reserved ENV in a container -type ReservedEnvError struct { - componentName string - envName string -} - -func (e *ReservedEnvError) Error() string { - return fmt.Sprintf("env variable %s is reserved and cannot be customized in component %s", e.envName, e.componentName) -} - -// InvalidVolumeError returns an error if the volume is invalid -type InvalidVolumeError struct { - name string - reason string -} - -func (e *InvalidVolumeError) Error() string { - return fmt.Sprintf("the volume %q is invalid - %s", e.name, e.reason) -} - -// MissingVolumeMountError returns an error if the container volume mount does not reference a valid volume component -type MissingVolumeMountError struct { - errMsg string -} - -func (e *MissingVolumeMountError) Error() string { - return fmt.Sprintf("unable to find the following volume mounts in devfile volume components: %s", e.errMsg) -} - -// InvalidEndpointError returns an error if the component endpoint is invalid -type InvalidEndpointError struct { - name string - port int -} - -func (e *InvalidEndpointError) Error() string { - var errMsg string - if e.name != "" { - errMsg = fmt.Sprintf("devfile contains multiple endpoint entries with same name: %v", e.name) - } else if fmt.Sprint(e.port) != "" { - errMsg = fmt.Sprintf("devfile contains multiple containers with same endpoint targetPort: %v", e.port) - } - - return errMsg -} - -// InvalidComponentError returns an error if the component is invalid -type InvalidComponentError struct { - componentName string - reason string -} - -func (e *InvalidComponentError) Error() string { - return fmt.Sprintf("the component %q is invalid - %s", e.componentName, e.reason) -} - -//MissingProjectRemoteError returns an error if the git remotes object under a project is empty -type MissingProjectRemoteError struct { - projectName string -} - -func (e *MissingProjectRemoteError) Error() string { - return fmt.Sprintf("project %s should have at least one remote", e.projectName) -} - -//MissingRemoteError returns an error if the git remotes object is empty -type MissingRemoteError struct { - objectType string - objectName string -} - -func (e *MissingRemoteError) Error() string { - return fmt.Sprintf("%s %s should have at least one remote", e.objectType, e.objectName) -} - -//MultipleRemoteError returns an error if multiple git remotes are specified. There can only be one remote. -type MultipleRemoteError struct { - objectType string - objectName string -} - -func (e *MultipleRemoteError) Error() string { - return fmt.Sprintf("%s %s should have one remote only", e.objectType, e.objectName) -} - -//MissingProjectCheckoutFromRemoteError returns an error if there are multiple git remotes but the checkoutFrom remote has not been specified -type MissingProjectCheckoutFromRemoteError struct { - projectName string -} - -func (e *MissingProjectCheckoutFromRemoteError) Error() string { - return fmt.Sprintf("project %s has more than one remote defined, but has no checkoutfrom remote defined", e.projectName) -} - -//InvalidProjectCheckoutRemoteError returns an error if there is an unmatched, checkoutFrom remote specified -type InvalidProjectCheckoutRemoteError struct { - objectType string - objectName string - checkoutRemote string -} - -func (e *InvalidProjectCheckoutRemoteError) Error() string { - return fmt.Sprintf("unable to find the checkout remote %s in the remotes for %s %s", e.checkoutRemote, e.objectType, e.objectName) -} - -type ResourceRequirementType string - -const ( - MemoryLimit ResourceRequirementType = "memoryLimit" - CpuLimit ResourceRequirementType = "cpuLimit" - MemoryRequest ResourceRequirementType = "memoryRequest" - CpuRequest ResourceRequirementType = "cpuRequest" -) - -//ParsingResourceRequirementError returns an error if failed to parse a resource requirement -type ParsingResourceRequirementError struct { - resource ResourceRequirementType - cmpName string - errMsg string -} - -func (e *ParsingResourceRequirementError) Error() string { - return fmt.Sprintf("error parsing %s requirement for component %s: %s", e.resource, e.cmpName, e.errMsg) -} - -//InvalidResourceRequestError returns an error if resource limit < resource requested -type InvalidResourceRequestError struct { - cmpName string - errMsg string -} - -func (e *InvalidResourceRequestError) Error() string { - return fmt.Sprintf("invalid resource request for component %s: %s", e.cmpName, e.errMsg) -} - -type AnnotationType string - -const ( - DeploymentAnnotation AnnotationType = "deployment" - ServiceAnnotation AnnotationType = "service" -) - -//AnnotationConflictError returns an error if an annotation has been declared with conflict values -type AnnotationConflictError struct { - annotationName string - annotationType AnnotationType -} - -func (e *AnnotationConflictError) Error() string { - return fmt.Sprintf("%v annotation: %v has been declared multiple times and with different values", e.annotationType, e.annotationName) -} - -// resolveErrorMessageWithImportAttributes returns an updated error message -// with detailed information on the imported and overriden resource. -// example: -// "the component is invalid - , imported from Uri: http://example.com/devfile.yaml, in parent overrides from main devfile" -func resolveErrorMessageWithImportAttributes(validationErr error, attributes attributesAPI.Attributes) error { - var findKeyErr error - importReference := attributes.Get(ImportSourceAttribute, &findKeyErr) - - // overridden element must contain import resource information - // an overridden element can be either parentOverride or pluginOverride - // example: - // if an element is imported from another devfile, but contains no overrides - ImportSourceAttribute - // if an element is from parentOverride - ImportSourceAttribute + ParentOverrideAttribute - // if an element is from pluginOverride - ImportSourceAttribute + PluginOverrideAttribute - if findKeyErr == nil { - validationErr = fmt.Errorf("%s, imported from %s", validationErr.Error(), importReference) - parentOverrideReference := attributes.Get(ParentOverrideAttribute, &findKeyErr) - if findKeyErr == nil { - validationErr = fmt.Errorf("%s, in parent overrides from %s", validationErr.Error(), parentOverrideReference) - } else { - // reset findKeyErr to nil - findKeyErr = nil - pluginOverrideReference := attributes.Get(PluginOverrideAttribute, &findKeyErr) - if findKeyErr == nil { - validationErr = fmt.Errorf("%s, in plugin overrides from %s", validationErr.Error(), pluginOverrideReference) - } - } - } - - return validationErr -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/validation/events.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/validation/events.go deleted file mode 100644 index e31e474ba72..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/validation/events.go +++ /dev/null @@ -1,125 +0,0 @@ -package validation - -import ( - "fmt" - "github.com/hashicorp/go-multierror" - "strings" - - "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" -) - -const ( - preStart = "preStart" - postStart = "postStart" - preStop = "preStop" - postStop = "postStop" -) - -// ValidateEvents validates all the devfile events -func ValidateEvents(events v1alpha2.Events, commands []v1alpha2.Command) (err error) { - - commandMap := getCommandsMap(commands) - - switch { - case len(events.PreStart) > 0: - if preStartErr := isEventValid(events.PreStart, preStart, commandMap); preStartErr != nil { - err = multierror.Append(err, preStartErr) - } - fallthrough - case len(events.PostStart) > 0: - if postStartErr := isEventValid(events.PostStart, postStart, commandMap); postStartErr != nil { - err = multierror.Append(err, postStartErr) - } - fallthrough - case len(events.PreStop) > 0: - if preStopErr := isEventValid(events.PreStop, preStop, commandMap); preStopErr != nil { - err = multierror.Append(err, preStopErr) - } - fallthrough - case len(events.PostStop) > 0: - if postStopErr := isEventValid(events.PostStop, postStop, commandMap); postStopErr != nil { - err = multierror.Append(err, postStopErr) - } - } - - return err -} - -// isEventValid checks if events belonging to a specific event type are valid ie; -// 1. event should map to a valid devfile command -// 2. preStart and postStop events should either map to an apply command or a composite command with apply commands -// 3. postStart and preStop events should either map to an exec command or a composite command with exec commands -func isEventValid(eventNames []string, eventType string, commandMap map[string]v1alpha2.Command) error { - var invalidCommand, invalidApplyEvents, invalidExecEvents []string - - for _, eventName := range eventNames { - command, ok := commandMap[strings.ToLower(eventName)] - if !ok { // check if event is in the list of devfile commands - invalidCommand = append(invalidCommand, eventName) - continue - } - - switch eventType { - case preStart, postStop: - // check if the event is either an apply command or a composite of apply commands - if command.Apply == nil && command.Composite == nil { - invalidApplyEvents = append(invalidApplyEvents, eventName) - } else if command.Composite != nil { - invalidApplyEvents = append(invalidApplyEvents, validateCompositeEvents(*command.Composite, eventName, eventType, commandMap)...) - } - case postStart, preStop: - // check if the event is either an exec command or a composite of exec commands - if command.Exec == nil && command.Composite == nil { - invalidExecEvents = append(invalidExecEvents, eventName) - } else if command.Composite != nil { - invalidExecEvents = append(invalidExecEvents, validateCompositeEvents(*command.Composite, eventName, eventType, commandMap)...) - } - } - } - - var err error - var eventErrorsList []string - - if len(invalidCommand) > 0 { - eventErrorsList = append(eventErrorsList, fmt.Sprintf("%s does not map to a valid devfile command", strings.Join(invalidCommand, ", "))) - } - - if len(invalidApplyEvents) > 0 { - eventErrorsList = append(eventErrorsList, fmt.Sprintf("%s should either map to an apply command or a composite command with apply commands", strings.Join(invalidApplyEvents, ", "))) - } - - if len(invalidExecEvents) > 0 { - eventErrorsList = append(eventErrorsList, fmt.Sprintf("%s should either map to an exec command or a composite command with exec commands", strings.Join(invalidExecEvents, ", "))) - } - - if len(eventErrorsList) != 0 { - eventErrors := fmt.Sprintf("\n%s", strings.Join(eventErrorsList, "\n")) - err = &InvalidEventError{eventType: eventType, errorMsg: eventErrors} - } - - return err -} - -// validateCompositeEvents checks if a composite subcommands are -// 1. apply commands for preStart and postStop -// 2. exec commands for postStart and preStop -func validateCompositeEvents(composite v1alpha2.CompositeCommand, eventName, eventType string, commandMap map[string]v1alpha2.Command) []string { - var invalidEvents []string - - switch eventType { - case preStart, postStop: - for _, subCommand := range composite.Commands { - if command, ok := commandMap[subCommand]; ok && command.Apply == nil { - invalidEvents = append(invalidEvents, eventName) - } - } - case postStart, preStop: - for _, subCommand := range composite.Commands { - if command, ok := commandMap[subCommand]; ok && command.Exec == nil { - invalidEvents = append(invalidEvents, eventName) - } - } - } - - return invalidEvents -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/validation/projects.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/validation/projects.go deleted file mode 100644 index 4cbc875c0df..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/validation/projects.go +++ /dev/null @@ -1,94 +0,0 @@ -package validation - -import ( - "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" - "github.com/hashicorp/go-multierror" -) - -// ValidateStarterProjects checks if starter project has only one remote configured -// and if the checkout remote matches the remote configured -func ValidateStarterProjects(starterProjects []v1alpha2.StarterProject) (returnedErr error) { - - for _, starterProject := range starterProjects { - var gitSource v1alpha2.GitLikeProjectSource - if starterProject.Git != nil { - gitSource = starterProject.Git.GitLikeProjectSource - } else { - continue - } - - if starterProjectErr := validateSingleRemoteGitSrc("starterProject", starterProject.Name, gitSource); starterProjectErr != nil { - newErr := resolveErrorMessageWithImportAttributes(starterProjectErr, starterProject.Attributes) - returnedErr = multierror.Append(returnedErr, newErr) - } - } - - return returnedErr -} - -// ValidateProjects checks if the project has more than one remote configured then a checkout -// remote is mandatory and if the checkout remote matches the renote configured -func ValidateProjects(projects []v1alpha2.Project) (returnedErr error) { - - for _, project := range projects { - var gitSource v1alpha2.GitLikeProjectSource - if project.Git != nil { - gitSource = project.Git.GitLikeProjectSource - } else { - continue - } - switch len(gitSource.Remotes) { - case 0: - - newErr := resolveErrorMessageWithImportAttributes(&MissingProjectRemoteError{projectName: project.Name}, project.Attributes) - returnedErr = multierror.Append(returnedErr, newErr) - case 1: - if gitSource.CheckoutFrom != nil && gitSource.CheckoutFrom.Remote != "" { - if err := validateRemoteMap(gitSource.Remotes, gitSource.CheckoutFrom.Remote, "project", project.Name); err != nil { - newErr := resolveErrorMessageWithImportAttributes(err, project.Attributes) - returnedErr = multierror.Append(returnedErr, newErr) - } - } - default: // len(gitSource.Remotes) >= 2 - if gitSource.CheckoutFrom == nil || gitSource.CheckoutFrom.Remote == "" { - - newErr := resolveErrorMessageWithImportAttributes(&MissingProjectCheckoutFromRemoteError{projectName: project.Name}, project.Attributes) - returnedErr = multierror.Append(returnedErr, newErr) - continue - } - if err := validateRemoteMap(gitSource.Remotes, gitSource.CheckoutFrom.Remote, "project", project.Name); err != nil { - newErr := resolveErrorMessageWithImportAttributes(err, project.Attributes) - returnedErr = multierror.Append(returnedErr, newErr) - } - } - } - - return returnedErr -} - -// validateRemoteMap checks if the checkout remote is present in the project remote map -func validateRemoteMap(remotes map[string]string, checkoutRemote, objectType, objectName string) error { - - if _, ok := remotes[checkoutRemote]; !ok { - - return &InvalidProjectCheckoutRemoteError{objectName: objectName, objectType: objectType, checkoutRemote: checkoutRemote} - } - - return nil -} - -// validateSingleRemoteGitSrc validates a git src for a single remote only -func validateSingleRemoteGitSrc(objectType, objectName string, gitSource v1alpha2.GitLikeProjectSource) (err error) { - switch len(gitSource.Remotes) { - case 0: - err = &MissingRemoteError{objectType: objectType, objectName: objectName} - case 1: - if gitSource.CheckoutFrom != nil && gitSource.CheckoutFrom.Remote != "" { - err = validateRemoteMap(gitSource.Remotes, gitSource.CheckoutFrom.Remote, objectType, objectName) - } - default: // len(gitSource.Remotes) >= 2 - err = &MultipleRemoteError{objectType: objectType, objectName: objectName} - } - - return err -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/validation/utils.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/validation/utils.go deleted file mode 100644 index 6f0ec12df36..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/validation/utils.go +++ /dev/null @@ -1,44 +0,0 @@ -package validation - -import ( - "net/url" - "strings" - - "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" -) - -// attribute keys for imported and overridden elements -// the value of those keys is the resource information -const ( - // attribute key of the imported element resource information - ImportSourceAttribute = "api.devfile.io/imported-from" - // attribute key of the parent overridden element resource information - ParentOverrideAttribute = "api.devfile.io/parent-override-from" - // attribute key of the plugin overridden element resource information - PluginOverrideAttribute = "api.devfile.io/plugin-override-from" -) - -// getCommandsMap iterates through the commands and returns a map of command -func getCommandsMap(commands []v1alpha2.Command) map[string]v1alpha2.Command { - commandMap := make(map[string]v1alpha2.Command, len(commands)) - - for _, command := range commands { - command.Id = strings.ToLower(command.Id) - commandMap[command.Id] = command - } - - return commandMap -} - -// ValidateURI checks if the string is with valid uri format, return error if not valid -func ValidateURI(uri string) error { - if strings.HasPrefix(uri, "http") { - if _, err := url.ParseRequestURI(uri); err != nil { - return err - } - } else if _, err := url.Parse(uri); err != nil { - return err - } - - return nil -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/validation/validation-rule.md b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/validation/validation-rule.md deleted file mode 100644 index c8ff0cccb96..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/validation/validation-rule.md +++ /dev/null @@ -1,68 +0,0 @@ -### Id and Name: -`^[a-z0-9]([-a-z0-9]*[a-z0-9])?$` - -The restriction is added to allow easy translation to K8s resource names, and also to have consistent rules for both name and id fields. - -The validation will be done as part of schema validation, the rule will be introduced as a regex in schema definition, any objection of the rule in devfile will result in a failure. - -- limit to lowercase characters i.e., no uppercase allowed -- limit within 63 characters -- no special characters allowed except dash(-) -- start with an alphanumeric character -- end with an alphanumeric character - - -### Endpoints: -- all the endpoint names are unique across components -- endpoint ports must be unique across components -- two components cannot have the same target port, but one component may have two endpoints with the same target port. This restriction does not apply to container components with `dedicatedPod` set to `true`. - - -### Commands: -1. id must be unique -2. composite command: - - Should not reference itself via a subcommand - - Should not indirectly reference itself via a subcommand which is a composite command - - Should reference a valid devfile command -3. exec command should: map to a valid container component -4. apply command should: map to a valid container/kubernetes/openshift/image component -5. `{build, run, test, debug, deploy}`, each kind of group can only have one default command associated with it. If there are multiple commands of the same kind without a default, a warning will be displayed. - -### Components: -Common rules for all components types: -- Name must be unique - -#### Container component -1. the container components must reference a valid volume component if it uses volume mounts, and the volume components are unique -2. `PROJECT_SOURCE` or `PROJECTS_ROOT` are reserved environment variables defined under env, cannot be defined again in `env` -3. the annotations should not have conflict values for same key, except deployment annotations and service annotations set for a container with `dedicatedPod=true` -4. resource requirements, e.g. `cpuLimit`, `cpuRequest`, `memoryLimit`, `memoryRequest`, must be in valid quantity format; and the resource requested must be less than the resource limit (if specified). - -#### Plugin Component -- Commands in plugins components share the same commands validation rules as listed above. Validation occurs after overriding and merging, in flattened devfile -- Registry URL needs to be in valid format - -#### Kubernetes & Openshift component -- URI needs to be in valid URI format - -#### Image component -- A Dockerfile Image component's git source cannot have more than one remote defined. If checkout remote is mentioned, validate it against the remote configured map - - -### Events: -1. preStart and postStop events can only be Apply commands -2. postStart and preStop events can only be Exec commands -3. if preStart and postStop events refer to a composite command, then all containing commands need to be Apply commands. -4. if postStart and preStop events refer to a composite command, then all containing commands need to be Exec commands. - - -### Parent: -- Share the same validation rules as listed above. Validation occurs after overriding and merging, in flattened devfile - - -### starterProjects: -- Starter project entries cannot have more than one remote defined -- if checkout remote is mentioned, validate it against the starter project remote configured map - -### projects -- if more than one remote is configured, a checkout remote is mandatory -- if checkout remote is mentioned, validate it against the starter project remote configured map diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/validation/variables/errors.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/validation/variables/errors.go deleted file mode 100644 index 572eefaac49..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/validation/variables/errors.go +++ /dev/null @@ -1,31 +0,0 @@ -package variables - -import ( - "fmt" - "sort" - "strings" -) - -// InvalidKeysError returns an error for the invalid keys -type InvalidKeysError struct { - Keys []string -} - -func (e *InvalidKeysError) Error() string { - return fmt.Sprintf("invalid variable references - %s", strings.Join(e.Keys, ",")) -} - -// newInvalidKeysError processes the invalid key set and returns an InvalidKeysError if present -func newInvalidKeysError(keySet map[string]bool) error { - var invalidKeysArr []string - for key := range keySet { - invalidKeysArr = append(invalidKeysArr, key) - } - - if len(invalidKeysArr) > 0 { - sort.Strings(invalidKeysArr) - return &InvalidKeysError{Keys: invalidKeysArr} - } - - return nil -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/validation/variables/utils.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/validation/variables/utils.go deleted file mode 100644 index be047ee2589..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/validation/variables/utils.go +++ /dev/null @@ -1,10 +0,0 @@ -package variables - -// checkForInvalidError checks for InvalidKeysError and stores the key in the map -func checkForInvalidError(invalidKeys map[string]bool, err error) { - if verr, ok := err.(*InvalidKeysError); ok { - for _, key := range verr.Keys { - invalidKeys[key] = true - } - } -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/validation/variables/variables.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/validation/variables/variables.go deleted file mode 100644 index 0abba8f8169..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/validation/variables/variables.go +++ /dev/null @@ -1,69 +0,0 @@ -package variables - -import ( - "regexp" - "strings" - - "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" -) - -// example of the regex: {{variable}} / {{ variable }} -var globalVariableRegex = regexp.MustCompile(`\{\{\s*(.*?)\s*\}\}`) - -// VariableWarning stores the invalid variable references for each devfile object -type VariableWarning struct { - // Commands stores a map of command ids to the invalid variable references - Commands map[string][]string - - // Components stores a map of component names to the invalid variable references - Components map[string][]string - - // Projects stores a map of project names to the invalid variable references - Projects map[string][]string - - // StarterProjects stores a map of starter project names to the invalid variable references - StarterProjects map[string][]string -} - -// ValidateAndReplaceGlobalVariable validates the workspace template spec data for global variable references and replaces them with the variable value -func ValidateAndReplaceGlobalVariable(workspaceTemplateSpec *v1alpha2.DevWorkspaceTemplateSpec) VariableWarning { - - var variableWarning VariableWarning - - if workspaceTemplateSpec != nil { - // Validate the components and replace for global variable - variableWarning.Components = ValidateAndReplaceForComponents(workspaceTemplateSpec.Variables, workspaceTemplateSpec.Components) - - // Validate the commands and replace for global variable - variableWarning.Commands = ValidateAndReplaceForCommands(workspaceTemplateSpec.Variables, workspaceTemplateSpec.Commands) - - // Validate the projects and replace for global variable - variableWarning.Projects = ValidateAndReplaceForProjects(workspaceTemplateSpec.Variables, workspaceTemplateSpec.Projects) - - // Validate the starter projects and replace for global variable - variableWarning.StarterProjects = ValidateAndReplaceForStarterProjects(workspaceTemplateSpec.Variables, workspaceTemplateSpec.StarterProjects) - } - - return variableWarning -} - -// validateAndReplaceDataWithVariable validates the string for a global variable and replaces it. An error -// is returned if the string references an invalid global variable key -func validateAndReplaceDataWithVariable(val string, variables map[string]string) (string, error) { - matches := globalVariableRegex.FindAllStringSubmatch(val, -1) - var invalidKeys []string - for _, match := range matches { - varValue, ok := variables[match[1]] - if !ok { - invalidKeys = append(invalidKeys, match[1]) - } else { - val = strings.Replace(val, match[0], varValue, -1) - } - } - - if len(invalidKeys) > 0 { - return val, &InvalidKeysError{Keys: invalidKeys} - } - - return val, nil -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/validation/variables/variables_command.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/validation/variables/variables_command.go deleted file mode 100644 index eb51b230a14..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/validation/variables/variables_command.go +++ /dev/null @@ -1,111 +0,0 @@ -package variables - -import ( - "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" -) - -// ValidateAndReplaceForCommands validates the commands data for global variable references and replaces them with the variable value. -// Returns a map of command ids and invalid variable references if present. -func ValidateAndReplaceForCommands(variables map[string]string, commands []v1alpha2.Command) map[string][]string { - - commandsWarningMap := make(map[string][]string) - - for i := range commands { - var err error - - // Validate various command types - switch { - case commands[i].Exec != nil: - if err = validateAndReplaceForExecCommand(variables, commands[i].Exec); err != nil { - if verr, ok := err.(*InvalidKeysError); ok { - commandsWarningMap[commands[i].Id] = verr.Keys - } - } - case commands[i].Composite != nil: - if err = validateAndReplaceForCompositeCommand(variables, commands[i].Composite); err != nil { - if verr, ok := err.(*InvalidKeysError); ok { - commandsWarningMap[commands[i].Id] = verr.Keys - } - } - case commands[i].Apply != nil: - if err = validateAndReplaceForApplyCommand(variables, commands[i].Apply); err != nil { - if verr, ok := err.(*InvalidKeysError); ok { - commandsWarningMap[commands[i].Id] = verr.Keys - } - } - } - } - - return commandsWarningMap -} - -// validateAndReplaceForExecCommand validates the exec command data for global variable references and replaces them with the variable value -func validateAndReplaceForExecCommand(variables map[string]string, exec *v1alpha2.ExecCommand) error { - - if exec == nil { - return nil - } - - var err error - invalidKeys := make(map[string]bool) - - // Validate exec command line - if exec.CommandLine, err = validateAndReplaceDataWithVariable(exec.CommandLine, variables); err != nil { - checkForInvalidError(invalidKeys, err) - } - - // Validate exec working dir - if exec.WorkingDir, err = validateAndReplaceDataWithVariable(exec.WorkingDir, variables); err != nil { - checkForInvalidError(invalidKeys, err) - } - - // Validate exec label - if exec.Label, err = validateAndReplaceDataWithVariable(exec.Label, variables); err != nil { - checkForInvalidError(invalidKeys, err) - } - - // Validate exec env - if len(exec.Env) > 0 { - if err = validateAndReplaceForEnv(variables, exec.Env); err != nil { - checkForInvalidError(invalidKeys, err) - } - } - - return newInvalidKeysError(invalidKeys) -} - -// validateAndReplaceForCompositeCommand validates the composite command data for global variable references and replaces them with the variable value -func validateAndReplaceForCompositeCommand(variables map[string]string, composite *v1alpha2.CompositeCommand) error { - - if composite == nil { - return nil - } - - var err error - invalidKeys := make(map[string]bool) - - // Validate composite label - if composite.Label, err = validateAndReplaceDataWithVariable(composite.Label, variables); err != nil { - checkForInvalidError(invalidKeys, err) - } - - return newInvalidKeysError(invalidKeys) -} - -// validateAndReplaceForApplyCommand validates the apply command data for global variable references and replaces them with the variable value -func validateAndReplaceForApplyCommand(variables map[string]string, apply *v1alpha2.ApplyCommand) error { - - if apply == nil { - return nil - } - - var err error - invalidKeys := make(map[string]bool) - - // Validate apply label - if apply.Label, err = validateAndReplaceDataWithVariable(apply.Label, variables); err != nil { - checkForInvalidError(invalidKeys, err) - } - - return newInvalidKeysError(invalidKeys) -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/validation/variables/variables_component.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/validation/variables/variables_component.go deleted file mode 100644 index a42aa2f171d..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/validation/variables/variables_component.go +++ /dev/null @@ -1,293 +0,0 @@ -package variables - -import ( - "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" -) - -// ValidateAndReplaceForComponents validates the components data for global variable references and replaces them with the variable value -// Returns a map of component names and invalid variable references if present. -func ValidateAndReplaceForComponents(variables map[string]string, components []v1alpha2.Component) map[string][]string { - - componentsWarningMap := make(map[string][]string) - - for i := range components { - var err error - - // Validate various component types - switch { - case components[i].Container != nil: - if err = validateAndReplaceForContainerComponent(variables, components[i].Container); err != nil { - if verr, ok := err.(*InvalidKeysError); ok { - componentsWarningMap[components[i].Name] = verr.Keys - } - } - case components[i].Kubernetes != nil: - if err = validateAndReplaceForKubernetesComponent(variables, components[i].Kubernetes); err != nil { - if verr, ok := err.(*InvalidKeysError); ok { - componentsWarningMap[components[i].Name] = verr.Keys - } - } - case components[i].Openshift != nil: - if err = validateAndReplaceForOpenShiftComponent(variables, components[i].Openshift); err != nil { - if verr, ok := err.(*InvalidKeysError); ok { - componentsWarningMap[components[i].Name] = verr.Keys - } - } - case components[i].Image != nil: - if err = validateAndReplaceForImageComponent(variables, components[i].Image); err != nil { - if verr, ok := err.(*InvalidKeysError); ok { - componentsWarningMap[components[i].Name] = verr.Keys - } - } - case components[i].Volume != nil: - if err = validateAndReplaceForVolumeComponent(variables, components[i].Volume); err != nil { - if verr, ok := err.(*InvalidKeysError); ok { - componentsWarningMap[components[i].Name] = verr.Keys - } - } - } - } - - return componentsWarningMap -} - -// validateAndReplaceForContainerComponent validates the container component data for global variable references and replaces them with the variable value -func validateAndReplaceForContainerComponent(variables map[string]string, container *v1alpha2.ContainerComponent) error { - - if container == nil { - return nil - } - - var err error - invalidKeys := make(map[string]bool) - - // Validate container image - if container.Image, err = validateAndReplaceDataWithVariable(container.Image, variables); err != nil { - checkForInvalidError(invalidKeys, err) - } - - // Validate container commands - for i := range container.Command { - if container.Command[i], err = validateAndReplaceDataWithVariable(container.Command[i], variables); err != nil { - checkForInvalidError(invalidKeys, err) - } - } - - // Validate container args - for i := range container.Args { - if container.Args[i], err = validateAndReplaceDataWithVariable(container.Args[i], variables); err != nil { - checkForInvalidError(invalidKeys, err) - } - } - - // Validate memory limit - if container.MemoryLimit, err = validateAndReplaceDataWithVariable(container.MemoryLimit, variables); err != nil { - checkForInvalidError(invalidKeys, err) - } - - // Validate memory request - if container.MemoryRequest, err = validateAndReplaceDataWithVariable(container.MemoryRequest, variables); err != nil { - checkForInvalidError(invalidKeys, err) - } - - // Validate source mapping - if container.SourceMapping, err = validateAndReplaceDataWithVariable(container.SourceMapping, variables); err != nil { - checkForInvalidError(invalidKeys, err) - } - - // Validate container env - if len(container.Env) > 0 { - if err = validateAndReplaceForEnv(variables, container.Env); err != nil { - checkForInvalidError(invalidKeys, err) - } - } - - // Validate container volume mounts - for i := range container.VolumeMounts { - if container.VolumeMounts[i].Path, err = validateAndReplaceDataWithVariable(container.VolumeMounts[i].Path, variables); err != nil { - checkForInvalidError(invalidKeys, err) - } - } - - // Validate container endpoints - if len(container.Endpoints) > 0 { - if err = validateAndReplaceForEndpoint(variables, container.Endpoints); err != nil { - checkForInvalidError(invalidKeys, err) - } - } - - return newInvalidKeysError(invalidKeys) -} - -// validateAndReplaceForEnv validates the env data for global variable references and replaces them with the variable value -func validateAndReplaceForEnv(variables map[string]string, env []v1alpha2.EnvVar) error { - - invalidKeys := make(map[string]bool) - - for i := range env { - var err error - - // Validate env name - if env[i].Name, err = validateAndReplaceDataWithVariable(env[i].Name, variables); err != nil { - checkForInvalidError(invalidKeys, err) - } - - // Validate env value - if env[i].Value, err = validateAndReplaceDataWithVariable(env[i].Value, variables); err != nil { - checkForInvalidError(invalidKeys, err) - } - } - - return newInvalidKeysError(invalidKeys) -} - -// validateAndReplaceForKubernetesComponent validates the kubernetes component data for global variable references and replaces them with the variable value -func validateAndReplaceForKubernetesComponent(variables map[string]string, kubernetes *v1alpha2.KubernetesComponent) error { - - if kubernetes == nil { - return nil - } - - var err error - invalidKeys := make(map[string]bool) - - // Validate kubernetes uri - if kubernetes.Uri, err = validateAndReplaceDataWithVariable(kubernetes.Uri, variables); err != nil { - checkForInvalidError(invalidKeys, err) - } - - // Validate kubernetes inlined - if kubernetes.Inlined, err = validateAndReplaceDataWithVariable(kubernetes.Inlined, variables); err != nil { - checkForInvalidError(invalidKeys, err) - } - - // Validate kubernetes endpoints - if len(kubernetes.Endpoints) > 0 { - if err = validateAndReplaceForEndpoint(variables, kubernetes.Endpoints); err != nil { - checkForInvalidError(invalidKeys, err) - } - } - - return newInvalidKeysError(invalidKeys) -} - -// validateAndReplaceForOpenShiftComponent validates the openshift component data for global variable references and replaces them with the variable value -func validateAndReplaceForOpenShiftComponent(variables map[string]string, openshift *v1alpha2.OpenshiftComponent) error { - - if openshift == nil { - return nil - } - - var err error - invalidKeys := make(map[string]bool) - - // Validate openshift uri - if openshift.Uri, err = validateAndReplaceDataWithVariable(openshift.Uri, variables); err != nil { - checkForInvalidError(invalidKeys, err) - } - - // Validate openshift inlined - if openshift.Inlined, err = validateAndReplaceDataWithVariable(openshift.Inlined, variables); err != nil { - checkForInvalidError(invalidKeys, err) - } - - // Validate openshift endpoints - if len(openshift.Endpoints) > 0 { - if err = validateAndReplaceForEndpoint(variables, openshift.Endpoints); err != nil { - checkForInvalidError(invalidKeys, err) - } - } - - return newInvalidKeysError(invalidKeys) -} - -// validateAndReplaceForImageComponent validates the image component data for global variable references and replaces them with the variable value -func validateAndReplaceForImageComponent(variables map[string]string, image *v1alpha2.ImageComponent) error { - - if image == nil { - return nil - } - - var err error - invalidKeys := make(map[string]bool) - - // Validate image's image name - if image.ImageName, err = validateAndReplaceDataWithVariable(image.ImageName, variables); err != nil { - checkForInvalidError(invalidKeys, err) - } - - if err = validateAndReplaceForDockerfileImageComponent(variables, image.Dockerfile); err != nil { - checkForInvalidError(invalidKeys, err) - } - - return newInvalidKeysError(invalidKeys) -} - -// validateAndReplaceForDockerfileImageComponent validates the dockerfile image component data for global variable references and replaces them with the variable value -func validateAndReplaceForDockerfileImageComponent(variables map[string]string, dockerfileImage *v1alpha2.DockerfileImage) error { - - if dockerfileImage == nil { - return nil - } - - var err error - invalidKeys := make(map[string]bool) - - switch { - case dockerfileImage.Uri != "": - // Validate dockerfile image URI - if dockerfileImage.Uri, err = validateAndReplaceDataWithVariable(dockerfileImage.Uri, variables); err != nil { - checkForInvalidError(invalidKeys, err) - } - case dockerfileImage.Git != nil: - // Validate dockerfile Git location - if dockerfileImage.Git.FileLocation, err = validateAndReplaceDataWithVariable(dockerfileImage.Git.FileLocation, variables); err != nil { - checkForInvalidError(invalidKeys, err) - } - - gitProject := &dockerfileImage.Git.GitLikeProjectSource - if err = validateAndReplaceForGitProjectSource(variables, gitProject); err != nil { - checkForInvalidError(invalidKeys, err) - } - case dockerfileImage.DevfileRegistry != nil: - // Validate dockerfile devfile registry src - if dockerfileImage.DevfileRegistry.Id, err = validateAndReplaceDataWithVariable(dockerfileImage.DevfileRegistry.Id, variables); err != nil { - checkForInvalidError(invalidKeys, err) - } - if dockerfileImage.DevfileRegistry.RegistryUrl, err = validateAndReplaceDataWithVariable(dockerfileImage.DevfileRegistry.RegistryUrl, variables); err != nil { - checkForInvalidError(invalidKeys, err) - } - } - - // Validate dockerfile image's build context - if dockerfileImage.BuildContext, err = validateAndReplaceDataWithVariable(dockerfileImage.BuildContext, variables); err != nil { - checkForInvalidError(invalidKeys, err) - } - - // Validate dockerfile image's args - for i := range dockerfileImage.Args { - if dockerfileImage.Args[i], err = validateAndReplaceDataWithVariable(dockerfileImage.Args[i], variables); err != nil { - checkForInvalidError(invalidKeys, err) - } - } - - return newInvalidKeysError(invalidKeys) -} - -// validateAndReplaceForVolumeComponent validates the volume component data for global variable references and replaces them with the variable value -func validateAndReplaceForVolumeComponent(variables map[string]string, volume *v1alpha2.VolumeComponent) error { - - if volume == nil { - return nil - } - - var err error - invalidKeys := make(map[string]bool) - - // Validate volume size - if volume.Size, err = validateAndReplaceDataWithVariable(volume.Size, variables); err != nil { - checkForInvalidError(invalidKeys, err) - } - - return newInvalidKeysError(invalidKeys) -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/validation/variables/variables_endpoint.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/validation/variables/variables_endpoint.go deleted file mode 100644 index 17a1e2ed0bd..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/validation/variables/variables_endpoint.go +++ /dev/null @@ -1,22 +0,0 @@ -package variables - -import ( - "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" -) - -// validateAndReplaceForEndpoint validates the endpoint data for global variable references and replaces them with the variable value -func validateAndReplaceForEndpoint(variables map[string]string, endpoints []v1alpha2.Endpoint) error { - - invalidKeys := make(map[string]bool) - - for i := range endpoints { - var err error - - // Validate endpoint path - if endpoints[i].Path, err = validateAndReplaceDataWithVariable(endpoints[i].Path, variables); err != nil { - checkForInvalidError(invalidKeys, err) - } - } - - return newInvalidKeysError(invalidKeys) -} diff --git a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/validation/variables/variables_project.go b/ui/wasm/vendor/github.com/devfile/api/v2/pkg/validation/variables/variables_project.go deleted file mode 100644 index 15afe461696..00000000000 --- a/ui/wasm/vendor/github.com/devfile/api/v2/pkg/validation/variables/variables_project.go +++ /dev/null @@ -1,138 +0,0 @@ -package variables - -import ( - "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" -) - -// ValidateAndReplaceForProjects validates the projects data for global variable references and replaces them with the variable value. -// Returns a map of project names and invalid variable references if present. -func ValidateAndReplaceForProjects(variables map[string]string, projects []v1alpha2.Project) map[string][]string { - - projectsWarningMap := make(map[string][]string) - - for i := range projects { - var err error - - invalidKeys := make(map[string]bool) - - // Validate project clonepath - if projects[i].ClonePath, err = validateAndReplaceDataWithVariable(projects[i].ClonePath, variables); err != nil { - checkForInvalidError(invalidKeys, err) - } - - // Validate project source - if err = validateandReplaceForProjectSource(variables, &projects[i].ProjectSource); err != nil { - checkForInvalidError(invalidKeys, err) - } - - err = newInvalidKeysError(invalidKeys) - if verr, ok := err.(*InvalidKeysError); ok { - projectsWarningMap[projects[i].Name] = verr.Keys - } - } - - return projectsWarningMap -} - -// ValidateAndReplaceForStarterProjects validates the starter projects data for global variable references and replaces them with the variable value. -// Returns a map of starter project names and invalid variable references if present. -func ValidateAndReplaceForStarterProjects(variables map[string]string, starterProjects []v1alpha2.StarterProject) map[string][]string { - - starterProjectsWarningMap := make(map[string][]string) - - for i := range starterProjects { - var err error - - invalidKeys := make(map[string]bool) - - // Validate starter project description - if starterProjects[i].Description, err = validateAndReplaceDataWithVariable(starterProjects[i].Description, variables); err != nil { - checkForInvalidError(invalidKeys, err) - } - - // Validate starter project sub dir - if starterProjects[i].SubDir, err = validateAndReplaceDataWithVariable(starterProjects[i].SubDir, variables); err != nil { - checkForInvalidError(invalidKeys, err) - } - - // Validate starter project source - if err = validateandReplaceForProjectSource(variables, &starterProjects[i].ProjectSource); err != nil { - checkForInvalidError(invalidKeys, err) - } - - err = newInvalidKeysError(invalidKeys) - if verr, ok := err.(*InvalidKeysError); ok { - starterProjectsWarningMap[starterProjects[i].Name] = verr.Keys - } - } - - return starterProjectsWarningMap -} - -// validateandReplaceForProjectSource validates a project source location for global variable references and replaces them with the variable value -func validateandReplaceForProjectSource(variables map[string]string, projectSource *v1alpha2.ProjectSource) error { - - if projectSource == nil { - return nil - } - - var err error - invalidKeys := make(map[string]bool) - - switch { - case projectSource.Zip != nil: - if projectSource.Zip.Location, err = validateAndReplaceDataWithVariable(projectSource.Zip.Location, variables); err != nil { - checkForInvalidError(invalidKeys, err) - } - case projectSource.Git != nil: - gitProject := &projectSource.Git.GitLikeProjectSource - - if err = validateAndReplaceForGitProjectSource(variables, gitProject); err != nil { - checkForInvalidError(invalidKeys, err) - } - } - - return newInvalidKeysError(invalidKeys) -} - -// validateAndReplaceForGitProjectSource validates a project git src for global variable references and replaces them with the variable value -func validateAndReplaceForGitProjectSource(variables map[string]string, gitProject *v1alpha2.GitLikeProjectSource) error { - - if gitProject == nil { - return nil - } - - var err error - invalidKeys := make(map[string]bool) - - if gitProject.CheckoutFrom != nil { - // validate git checkout revision - if gitProject.CheckoutFrom.Revision, err = validateAndReplaceDataWithVariable(gitProject.CheckoutFrom.Revision, variables); err != nil { - checkForInvalidError(invalidKeys, err) - } - - // // validate git checkout remote - if gitProject.CheckoutFrom.Remote, err = validateAndReplaceDataWithVariable(gitProject.CheckoutFrom.Remote, variables); err != nil { - checkForInvalidError(invalidKeys, err) - } - } - - // validate git remotes - for k := range gitProject.Remotes { - // validate remote map value - if gitProject.Remotes[k], err = validateAndReplaceDataWithVariable(gitProject.Remotes[k], variables); err != nil { - checkForInvalidError(invalidKeys, err) - } - - // validate remote map key - var updatedKey string - if updatedKey, err = validateAndReplaceDataWithVariable(k, variables); err != nil { - checkForInvalidError(invalidKeys, err) - } else if updatedKey != k { - gitProject.Remotes[updatedKey] = gitProject.Remotes[k] - delete(gitProject.Remotes, k) - } - } - - return newInvalidKeysError(invalidKeys) -} diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/LICENSE b/ui/wasm/vendor/github.com/devfile/library/v2/LICENSE deleted file mode 100644 index 261eeb9e9f8..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parse.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parse.go deleted file mode 100644 index 1a2b0aa3190..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parse.go +++ /dev/null @@ -1,119 +0,0 @@ -// -// Copyright 2022 Red Hat, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package devfile - -import ( - "github.com/devfile/api/v2/pkg/validation/variables" - "github.com/devfile/library/v2/pkg/devfile/parser" - "github.com/devfile/library/v2/pkg/devfile/validate" -) - -// ParseFromURLAndValidate func parses the devfile data from the url -// and validates the devfile integrity with the schema -// and validates the devfile data. -// Creates devfile context and runtime objects. -// Deprecated, use ParseDevfileAndValidate() instead -func ParseFromURLAndValidate(url string) (d parser.DevfileObj, err error) { - - // read and parse devfile from the given URL - d, err = parser.ParseFromURL(url) - if err != nil { - return d, err - } - - // generic validation on devfile content - err = validate.ValidateDevfileData(d.Data) - if err != nil { - return d, err - } - - return d, err -} - -// ParseFromDataAndValidate func parses the devfile data -// and validates the devfile integrity with the schema -// and validates the devfile data. -// Creates devfile context and runtime objects. -// Deprecated, use ParseDevfileAndValidate() instead -func ParseFromDataAndValidate(data []byte) (d parser.DevfileObj, err error) { - // read and parse devfile from the given bytes - d, err = parser.ParseFromData(data) - if err != nil { - return d, err - } - // generic validation on devfile content - err = validate.ValidateDevfileData(d.Data) - if err != nil { - return d, err - } - - return d, err -} - -// ParseAndValidate func parses the devfile data -// and validates the devfile integrity with the schema -// and validates the devfile data. -// Creates devfile context and runtime objects. -// Deprecated, use ParseDevfileAndValidate() instead -func ParseAndValidate(path string) (d parser.DevfileObj, err error) { - - // read and parse devfile from given path - d, err = parser.Parse(path) - if err != nil { - return d, err - } - - // generic validation on devfile content - err = validate.ValidateDevfileData(d.Data) - if err != nil { - return d, err - } - - return d, err -} - -// ParseDevfileAndValidate func parses the devfile data, validates the devfile integrity with the schema -// replaces the top-level variable keys if present and validates the devfile data. -// It returns devfile context and runtime objects, variable substitution warning if any and an error. -func ParseDevfileAndValidate(args parser.ParserArgs) (d parser.DevfileObj, varWarning variables.VariableWarning, err error) { - d, err = parser.ParseDevfile(args) - if err != nil { - return d, varWarning, err - } - - if d.Data.GetSchemaVersion() != "2.0.0" { - - // add external variables to spec variables - if d.Data.GetDevfileWorkspaceSpec().Variables == nil { - d.Data.GetDevfileWorkspaceSpec().Variables = map[string]string{} - } - allVariables := d.Data.GetDevfileWorkspaceSpec().Variables - for key, val := range args.ExternalVariables { - allVariables[key] = val - } - - // replace the top level variable keys with their values in the devfile - varWarning = variables.ValidateAndReplaceGlobalVariable(d.Data.GetDevfileWorkspaceSpec()) - } - - // generic validation on devfile content - err = validate.ValidateDevfileData(d.Data) - if err != nil { - return d, varWarning, err - } - - return d, varWarning, err -} diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/configurables.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/configurables.go deleted file mode 100644 index 5d53c482923..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/configurables.go +++ /dev/null @@ -1,134 +0,0 @@ -// -// Copyright 2022 Red Hat, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package parser - -import ( - v1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" - "github.com/devfile/library/v2/pkg/devfile/parser/data/v2/common" -) - -const ( - Name = "Name" - Ports = "Ports" - Memory = "Memory" - PortsDescription = "Ports to be opened in all component containers" - MemoryDescription = "The Maximum memory all the component containers can consume" - NameDescription = "The name of the component" -) - -// SetMetadataName set metadata name in a devfile -func (d DevfileObj) SetMetadataName(name string) error { - metadata := d.Data.GetMetadata() - metadata.Name = name - d.Data.SetMetadata(metadata) - return d.WriteYamlDevfile() -} - -// AddEnvVars accepts a map of container name mapped to an array of the env vars to be set; -// it adds the envirnoment variables to a given container name, and writes to the devfile -// Example of containerEnvMap : {"runtime": {{Name: "Foo", Value: "Bar"}}} -func (d DevfileObj) AddEnvVars(containerEnvMap map[string][]v1.EnvVar) error { - err := d.Data.AddEnvVars(containerEnvMap) - if err != nil { - return err - } - return d.WriteYamlDevfile() -} - -// RemoveEnvVars accepts a map of container name mapped to an array of environment variables to be removed; -// it removes the env vars from the specified container name and writes it to the devfile -func (d DevfileObj) RemoveEnvVars(containerEnvMap map[string][]string) (err error) { - err = d.Data.RemoveEnvVars(containerEnvMap) - if err != nil { - return err - } - return d.WriteYamlDevfile() -} - -// SetPorts accepts a map of container name mapped to an array of port numbers to be set; -// it converts ports to endpoints, sets the endpoint to a given container name, and writes to the devfile -// Example of containerPortsMap: {"runtime": {"8080", "9000"}, "wildfly": {"12956"}} -func (d DevfileObj) SetPorts(containerPortsMap map[string][]string) error { - err := d.Data.SetPorts(containerPortsMap) - if err != nil { - return err - } - return d.WriteYamlDevfile() -} - -// RemovePorts accepts a map of container name mapped to an array of port numbers to be removed; -// it removes the container endpoints with the specified port numbers of the specified container, and writes to the devfile -// Example of containerPortsMap: {"runtime": {"8080", "9000"}, "wildfly": {"12956"}} -func (d DevfileObj) RemovePorts(containerPortsMap map[string][]string) error { - err := d.Data.RemovePorts(containerPortsMap) - if err != nil { - return err - } - return d.WriteYamlDevfile() -} - -// HasPorts checks if a devfile contains container endpoints -func (d DevfileObj) HasPorts() bool { - components, err := d.Data.GetComponents(common.DevfileOptions{}) - if err != nil { - return false - } - for _, component := range components { - if component.Container != nil { - if len(component.Container.Endpoints) > 0 { - return true - } - } - } - return false -} - -// SetMemory sets memoryLimit in devfile container -func (d DevfileObj) SetMemory(memory string) error { - components, err := d.Data.GetComponents(common.DevfileOptions{}) - if err != nil { - return err - } - for _, component := range components { - if component.Container != nil { - component.Container.MemoryLimit = memory - _ = d.Data.UpdateComponent(component) - } - } - return d.WriteYamlDevfile() -} - -// GetMemory gets memoryLimit from devfile container -func (d DevfileObj) GetMemory() string { - components, err := d.Data.GetComponents(common.DevfileOptions{}) - if err != nil { - return "" - } - for _, component := range components { - if component.Container != nil { - if component.Container.MemoryLimit != "" { - return component.Container.MemoryLimit - } - } - - } - return "" -} - -// GetMetadataName gets metadata name from a devfile -func (d DevfileObj) GetMetadataName() string { - return d.Data.GetMetadata().Name -} diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/context/apiVersion.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/context/apiVersion.go deleted file mode 100644 index 5cea0ce48d4..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/context/apiVersion.go +++ /dev/null @@ -1,72 +0,0 @@ -// -// Copyright 2022 Red Hat, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package parser - -import ( - "encoding/json" - "fmt" - "strings" - - "github.com/devfile/library/v2/pkg/devfile/parser/data" - "github.com/pkg/errors" - "k8s.io/klog" -) - -// SetDevfileAPIVersion returns the devfile APIVersion -func (d *DevfileCtx) SetDevfileAPIVersion() error { - - // Unmarshal JSON into map - var r map[string]interface{} - err := json.Unmarshal(d.rawContent, &r) - if err != nil { - return errors.Wrapf(err, "failed to decode devfile json") - } - - // Get "schemaVersion" value from map for devfile V2 - schemaVersion, okSchema := r["schemaVersion"] - var devfilePath string - if d.GetAbsPath() != "" { - devfilePath = d.GetAbsPath() - } else if d.GetURL() != "" { - devfilePath = d.GetURL() - } - - if okSchema { - // SchemaVersion cannot be empty - if schemaVersion.(string) == "" { - return fmt.Errorf("schemaVersion in devfile: %s cannot be empty", devfilePath) - } - } else { - return fmt.Errorf("schemaVersion not present in devfile: %s", devfilePath) - } - - // Successful - // split by `-` and get the first substring as schema version, schemaVersion without `-` won't get affected - // e.g. 2.2.0-latest => 2.2.0, 2.2.0 => 2.2.0 - d.apiVersion = strings.Split(schemaVersion.(string), "-")[0] - klog.V(4).Infof("devfile schemaVersion: '%s'", d.apiVersion) - return nil -} - -// GetApiVersion returns apiVersion stored in devfile context -func (d *DevfileCtx) GetApiVersion() string { - return d.apiVersion -} - -// IsApiVersionSupported return true if the apiVersion in DevfileCtx is supported -func (d *DevfileCtx) IsApiVersionSupported() bool { - return data.IsApiVersionSupported(d.apiVersion) -} diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/context/content.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/context/content.go deleted file mode 100644 index 3bab1fa08e1..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/context/content.go +++ /dev/null @@ -1,105 +0,0 @@ -// -// Copyright 2022 Red Hat, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package parser - -import ( - "bytes" - "unicode" - - "github.com/devfile/library/v2/pkg/util" - "github.com/pkg/errors" - "k8s.io/klog" - "sigs.k8s.io/yaml" -) - -// Every JSON document starts with "{" -var jsonPrefix = []byte("{") - -// YAMLToJSON converts a single YAML document into a JSON document -// or returns an error. If the document appears to be JSON the -// YAML decoding path is not used. -func YAMLToJSON(data []byte) ([]byte, error) { - - // Is already JSON - if hasJSONPrefix(data) { - return data, nil - } - - // Is YAML, convert to JSON - data, err := yaml.YAMLToJSON(data) - if err != nil { - return data, errors.Wrapf(err, "failed to convert devfile yaml to json") - } - - // Successful - klog.V(4).Infof("converted devfile YAML to JSON") - return data, nil -} - -// hasJSONPrefix returns true if the provided buffer appears to start with -// a JSON open brace. -func hasJSONPrefix(buf []byte) bool { - return hasPrefix(buf, jsonPrefix) -} - -// hasPrefix returns true if the first non-whitespace bytes in buf is prefix. -func hasPrefix(buf []byte, prefix []byte) bool { - trim := bytes.TrimLeftFunc(buf, unicode.IsSpace) - return bytes.HasPrefix(trim, prefix) -} - -// SetDevfileContent reads devfile and if devfile is in YAML format converts it to JSON -func (d *DevfileCtx) SetDevfileContent() error { - - var err error - var data []byte - if d.url != "" { - // set the client identifier for telemetry - params := util.HTTPRequestParams{URL: d.url, TelemetryClientName: util.TelemetryClientName} - data, err = util.DownloadInMemory(params) - if err != nil { - return errors.Wrap(err, "error getting devfile info from url") - } - } else if d.absPath != "" { - // Read devfile - fs := d.GetFs() - data, err = fs.ReadFile(d.absPath) - if err != nil { - return errors.Wrapf(err, "failed to read devfile from path '%s'", d.absPath) - } - } - - // set devfile content - return d.SetDevfileContentFromBytes(data) -} - -// SetDevfileContentFromBytes sets devfile content from byte input -func (d *DevfileCtx) SetDevfileContentFromBytes(data []byte) error { - // If YAML file convert it to JSON - var err error - d.rawContent, err = YAMLToJSON(data) - if err != nil { - return err - } - - // Successful - return nil -} - -// GetDevfileContent returns the devfile content -func (d *DevfileCtx) GetDevfileContent() []byte { - return d.rawContent -} diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/context/context.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/context/context.go deleted file mode 100644 index bd2e6d6d588..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/context/context.go +++ /dev/null @@ -1,173 +0,0 @@ -// -// Copyright 2022 Red Hat, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package parser - -import ( - "fmt" - "net/url" - "os" - "path/filepath" - "strings" - - "github.com/devfile/library/v2/pkg/testingutil/filesystem" - "github.com/devfile/library/v2/pkg/util" - "k8s.io/klog" -) - -// DevfileCtx stores context info regarding devfile -type DevfileCtx struct { - - // devfile ApiVersion - apiVersion string - - // absolute path of devfile - absPath string - - // relative path of devfile - relPath string - - // raw content of the devfile - rawContent []byte - - // devfile json schema - jsonSchema string - - //url path of the devfile - url string - - // filesystem for devfile - fs filesystem.Filesystem - - // devfile kubernetes components has been coverted from uri to inlined in memory - convertUriToInlined bool -} - -// NewDevfileCtx returns a new DevfileCtx type object -func NewDevfileCtx(path string) DevfileCtx { - return DevfileCtx{ - relPath: path, - fs: filesystem.DefaultFs{}, - } -} - -// NewURLDevfileCtx returns a new DevfileCtx type object -func NewURLDevfileCtx(url string) DevfileCtx { - return DevfileCtx{ - url: url, - } -} - -// NewByteContentDevfileCtx set devfile content from byte data and returns a new DevfileCtx type object and error -func NewByteContentDevfileCtx(data []byte) (d DevfileCtx, err error) { - err = d.SetDevfileContentFromBytes(data) - if err != nil { - return DevfileCtx{}, err - } - return d, nil -} - -// populateDevfile checks the API version is supported and returns the JSON schema for the given devfile API Version -func (d *DevfileCtx) populateDevfile() (err error) { - - // Get devfile APIVersion - if err := d.SetDevfileAPIVersion(); err != nil { - return err - } - - // Read and save devfile JSON schema for provided apiVersion - return d.SetDevfileJSONSchema() -} - -// Populate fills the DevfileCtx struct with relevant context info -func (d *DevfileCtx) Populate() (err error) { - if !strings.HasSuffix(d.relPath, ".yaml") { - if _, err := os.Stat(filepath.Join(d.relPath, "devfile.yaml")); os.IsNotExist(err) { - if _, err := os.Stat(filepath.Join(d.relPath, ".devfile.yaml")); os.IsNotExist(err) { - return fmt.Errorf("the provided path is not a valid yaml filepath, and devfile.yaml or .devfile.yaml not found in the provided path : %s", d.relPath) - } else { - d.relPath = filepath.Join(d.relPath, ".devfile.yaml") - } - } else { - d.relPath = filepath.Join(d.relPath, "devfile.yaml") - } - } - if err := d.SetAbsPath(); err != nil { - return err - } - klog.V(4).Infof("absolute devfile path: '%s'", d.absPath) - // Read and save devfile content - if err := d.SetDevfileContent(); err != nil { - return err - } - return d.populateDevfile() -} - -// PopulateFromURL fills the DevfileCtx struct with relevant context info -func (d *DevfileCtx) PopulateFromURL() (err error) { - _, err = url.ParseRequestURI(d.url) - if err != nil { - return err - } - // Read and save devfile content - if err := d.SetDevfileContent(); err != nil { - return err - } - return d.populateDevfile() -} - -// PopulateFromRaw fills the DevfileCtx struct with relevant context info -func (d *DevfileCtx) PopulateFromRaw() (err error) { - return d.populateDevfile() -} - -// Validate func validates devfile JSON schema for the given apiVersion -func (d *DevfileCtx) Validate() error { - - // Validate devfile - return d.ValidateDevfileSchema() -} - -// GetAbsPath func returns current devfile absolute path -func (d *DevfileCtx) GetAbsPath() string { - return d.absPath -} - -// GetURL func returns current devfile absolute URL address -func (d *DevfileCtx) GetURL() string { - return d.url -} - -// SetAbsPath sets absolute file path for devfile -func (d *DevfileCtx) SetAbsPath() (err error) { - // Set devfile absolute path - if d.absPath, err = util.GetAbsPath(d.relPath); err != nil { - return err - } - klog.V(2).Infof("absolute devfile path: '%s'", d.absPath) - - return nil - -} - -// GetConvertUriToInlined func returns if the devfile kubernetes comp has been converted from uri to inlined -func (d *DevfileCtx) GetConvertUriToInlined() bool { - return d.convertUriToInlined -} - -// SetConvertUriToInlined sets if the devfile kubernetes comp has been converted from uri to inlined -func (d *DevfileCtx) SetConvertUriToInlined(value bool) { - d.convertUriToInlined = value -} diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/context/fakecontext.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/context/fakecontext.go deleted file mode 100644 index 8c286a47c0d..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/context/fakecontext.go +++ /dev/null @@ -1,25 +0,0 @@ -// -// Copyright 2022 Red Hat, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package parser - -import "github.com/devfile/library/v2/pkg/testingutil/filesystem" - -func FakeContext(fs filesystem.Filesystem, absPath string) DevfileCtx { - return DevfileCtx{ - fs: fs, - absPath: absPath, - } -} diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/context/fs.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/context/fs.go deleted file mode 100644 index 56f1fde3866..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/context/fs.go +++ /dev/null @@ -1,23 +0,0 @@ -// -// Copyright 2022 Red Hat, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package parser - -import "github.com/devfile/library/v2/pkg/testingutil/filesystem" - -// GetFs returns the filesystem object -func (d *DevfileCtx) GetFs() filesystem.Filesystem { - return d.fs -} diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/context/schema.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/context/schema.go deleted file mode 100644 index ca8f37cf6ce..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/context/schema.go +++ /dev/null @@ -1,63 +0,0 @@ -// -// Copyright 2022 Red Hat, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package parser - -import ( - "fmt" - - "github.com/devfile/library/v2/pkg/devfile/parser/data" - "github.com/pkg/errors" - "github.com/xeipuuv/gojsonschema" - "k8s.io/klog" -) - -// SetDevfileJSONSchema returns the JSON schema for the given devfile apiVersion -func (d *DevfileCtx) SetDevfileJSONSchema() error { - - // Check if json schema is present for the given apiVersion - jsonSchema, err := data.GetDevfileJSONSchema(d.apiVersion) - if err != nil { - return err - } - d.jsonSchema = jsonSchema - return nil -} - -// ValidateDevfileSchema validate JSON schema of the provided devfile -func (d *DevfileCtx) ValidateDevfileSchema() error { - var ( - schemaLoader = gojsonschema.NewStringLoader(d.jsonSchema) - documentLoader = gojsonschema.NewStringLoader(string(d.rawContent)) - ) - - // Validate devfile with JSON schema - result, err := gojsonschema.Validate(schemaLoader, documentLoader) - if err != nil { - return errors.Wrapf(err, "failed to validate devfile schema") - } - - if !result.Valid() { - errMsg := "invalid devfile schema. errors :\n" - for _, desc := range result.Errors() { - errMsg = errMsg + fmt.Sprintf("- %s\n", desc) - } - return fmt.Errorf(errMsg) - } - - // Sucessful - klog.V(4).Info("validated devfile schema") - return nil -} diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/helper.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/helper.go deleted file mode 100644 index 7ce98456396..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/helper.go +++ /dev/null @@ -1,65 +0,0 @@ -// -// Copyright 2022 Red Hat, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package data - -import ( - "fmt" - "reflect" - "strings" - - "k8s.io/klog" -) - -// String converts supportedApiVersion type to string type -func (s supportedApiVersion) String() string { - return string(s) -} - -// NewDevfileData returns relevant devfile struct for the provided API version -func NewDevfileData(version string) (obj DevfileData, err error) { - - // Fetch devfile struct type from map - devfileType, ok := apiVersionToDevfileStruct[supportedApiVersion(version)] - if !ok { - errMsg := fmt.Sprintf("devfile type not present for apiVersion '%s'", version) - return obj, fmt.Errorf(errMsg) - } - - return reflect.New(devfileType).Interface().(DevfileData), nil -} - -// GetDevfileJSONSchema returns the devfile JSON schema of the supported apiVersion -func GetDevfileJSONSchema(version string) (string, error) { - - // Fetch json schema from the devfileApiVersionToJSONSchema map - schema, ok := devfileApiVersionToJSONSchema[supportedApiVersion(version)] - if !ok { - var supportedVersions []string - for version := range devfileApiVersionToJSONSchema { - supportedVersions = append(supportedVersions, string(version)) - } - return "", fmt.Errorf("unable to find schema for version %q. The parser supports devfile schema for version %s", version, strings.Join(supportedVersions, ", ")) - } - klog.V(4).Infof("devfile apiVersion '%s' is supported", version) - - // Successful - return schema, nil -} - -// IsApiVersionSupported returns true if the API version is supported -func IsApiVersionSupported(version string) bool { - return apiVersionToDevfileStruct[supportedApiVersion(version)] != nil -} diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/interface.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/interface.go deleted file mode 100644 index a24a06ed93c..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/interface.go +++ /dev/null @@ -1,106 +0,0 @@ -// -// Copyright 2022 Red Hat, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package data - -import ( - v1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" - "github.com/devfile/api/v2/pkg/attributes" - devfilepkg "github.com/devfile/api/v2/pkg/devfile" - "github.com/devfile/library/v2/pkg/devfile/parser/data/v2/common" -) - -// Generate mock interfaces for DevfileData by executing the following cmd in pkg/devfile/parser/data -// mockgen -package=data -source=interface.go DevfileData > /tmp/mock_interface.go ; cp /tmp/mock_interface.go ./mock_interface.go - -// DevfileData is an interface that defines functions for Devfile data operations -type DevfileData interface { - - // header related methods - - GetSchemaVersion() string - SetSchemaVersion(version string) - GetMetadata() devfilepkg.DevfileMetadata - SetMetadata(metadata devfilepkg.DevfileMetadata) - - // top-level attributes related method - - GetAttributes() (attributes.Attributes, error) - AddAttributes(key string, value interface{}) error - UpdateAttributes(key string, value interface{}) error - - // parent related methods - - GetParent() *v1.Parent - SetParent(parent *v1.Parent) - - // event related methods - - GetEvents() v1.Events - AddEvents(events v1.Events) error - UpdateEvents(postStart, postStop, preStart, preStop []string) - - // component related methods - - GetComponents(common.DevfileOptions) ([]v1.Component, error) - AddComponents(components []v1.Component) error - UpdateComponent(component v1.Component) error - DeleteComponent(name string) error - - // project related methods - - GetProjects(common.DevfileOptions) ([]v1.Project, error) - AddProjects(projects []v1.Project) error - UpdateProject(project v1.Project) error - DeleteProject(name string) error - - // starter projects related commands - - GetStarterProjects(common.DevfileOptions) ([]v1.StarterProject, error) - AddStarterProjects(projects []v1.StarterProject) error - UpdateStarterProject(project v1.StarterProject) error - DeleteStarterProject(name string) error - - // command related methods - - GetCommands(common.DevfileOptions) ([]v1.Command, error) - AddCommands(commands []v1.Command) error - UpdateCommand(command v1.Command) error - DeleteCommand(id string) error - - // volume mount related methods - - AddVolumeMounts(containerName string, volumeMounts []v1.VolumeMount) error - DeleteVolumeMount(name string) error - GetVolumeMountPaths(mountName, containerName string) ([]string, error) - - // workspace related methods - - GetDevfileWorkspaceSpecContent() *v1.DevWorkspaceTemplateSpecContent - SetDevfileWorkspaceSpecContent(content v1.DevWorkspaceTemplateSpecContent) - GetDevfileWorkspaceSpec() *v1.DevWorkspaceTemplateSpec - SetDevfileWorkspaceSpec(spec v1.DevWorkspaceTemplateSpec) - - // utils - - GetDevfileContainerComponents(common.DevfileOptions) ([]v1.Component, error) - GetDevfileVolumeComponents(common.DevfileOptions) ([]v1.Component, error) - - // containers - RemoveEnvVars(containerEnvMap map[string][]string) error - SetPorts(containerPortsMap map[string][]string) error - AddEnvVars(containerEnvMap map[string][]v1.EnvVar) error - RemovePorts(containerPortsMap map[string][]string) error -} diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/mock_interface.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/mock_interface.go deleted file mode 100644 index 43836cbaa6a..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/mock_interface.go +++ /dev/null @@ -1,608 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: interface.go - -// Package data is a generated GoMock package. -package data - -import ( - reflect "reflect" - - v1alpha2 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" - attributes "github.com/devfile/api/v2/pkg/attributes" - devfile "github.com/devfile/api/v2/pkg/devfile" - common "github.com/devfile/library/v2/pkg/devfile/parser/data/v2/common" - gomock "github.com/golang/mock/gomock" -) - -// MockDevfileData is a mock of DevfileData interface. -type MockDevfileData struct { - ctrl *gomock.Controller - recorder *MockDevfileDataMockRecorder -} - -// MockDevfileDataMockRecorder is the mock recorder for MockDevfileData. -type MockDevfileDataMockRecorder struct { - mock *MockDevfileData -} - -// NewMockDevfileData creates a new mock instance. -func NewMockDevfileData(ctrl *gomock.Controller) *MockDevfileData { - mock := &MockDevfileData{ctrl: ctrl} - mock.recorder = &MockDevfileDataMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockDevfileData) EXPECT() *MockDevfileDataMockRecorder { - return m.recorder -} - -// AddAttributes mocks base method. -func (m *MockDevfileData) AddAttributes(key string, value interface{}) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AddAttributes", key, value) - ret0, _ := ret[0].(error) - return ret0 -} - -// AddAttributes indicates an expected call of AddAttributes. -func (mr *MockDevfileDataMockRecorder) AddAttributes(key, value interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddAttributes", reflect.TypeOf((*MockDevfileData)(nil).AddAttributes), key, value) -} - -// AddCommands mocks base method. -func (m *MockDevfileData) AddCommands(commands []v1alpha2.Command) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AddCommands", commands) - ret0, _ := ret[0].(error) - return ret0 -} - -// AddCommands indicates an expected call of AddCommands. -func (mr *MockDevfileDataMockRecorder) AddCommands(commands interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddCommands", reflect.TypeOf((*MockDevfileData)(nil).AddCommands), commands) -} - -// AddComponents mocks base method. -func (m *MockDevfileData) AddComponents(components []v1alpha2.Component) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AddComponents", components) - ret0, _ := ret[0].(error) - return ret0 -} - -// AddComponents indicates an expected call of AddComponents. -func (mr *MockDevfileDataMockRecorder) AddComponents(components interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddComponents", reflect.TypeOf((*MockDevfileData)(nil).AddComponents), components) -} - -// AddEvents mocks base method. -func (m *MockDevfileData) AddEvents(events v1alpha2.Events) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AddEvents", events) - ret0, _ := ret[0].(error) - return ret0 -} - -// AddEvents indicates an expected call of AddEvents. -func (mr *MockDevfileDataMockRecorder) AddEvents(events interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddEvents", reflect.TypeOf((*MockDevfileData)(nil).AddEvents), events) -} - -// AddProjects mocks base method. -func (m *MockDevfileData) AddProjects(projects []v1alpha2.Project) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AddProjects", projects) - ret0, _ := ret[0].(error) - return ret0 -} - -// AddProjects indicates an expected call of AddProjects. -func (mr *MockDevfileDataMockRecorder) AddProjects(projects interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddProjects", reflect.TypeOf((*MockDevfileData)(nil).AddProjects), projects) -} - -// AddStarterProjects mocks base method. -func (m *MockDevfileData) AddStarterProjects(projects []v1alpha2.StarterProject) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AddStarterProjects", projects) - ret0, _ := ret[0].(error) - return ret0 -} - -// AddStarterProjects indicates an expected call of AddStarterProjects. -func (mr *MockDevfileDataMockRecorder) AddStarterProjects(projects interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddStarterProjects", reflect.TypeOf((*MockDevfileData)(nil).AddStarterProjects), projects) -} - -// AddVolumeMounts mocks base method. -func (m *MockDevfileData) AddVolumeMounts(containerName string, volumeMounts []v1alpha2.VolumeMount) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AddVolumeMounts", containerName, volumeMounts) - ret0, _ := ret[0].(error) - return ret0 -} - -// AddVolumeMounts indicates an expected call of AddVolumeMounts. -func (mr *MockDevfileDataMockRecorder) AddVolumeMounts(containerName, volumeMounts interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddVolumeMounts", reflect.TypeOf((*MockDevfileData)(nil).AddVolumeMounts), containerName, volumeMounts) -} - -// DeleteCommand mocks base method. -func (m *MockDevfileData) DeleteCommand(id string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteCommand", id) - ret0, _ := ret[0].(error) - return ret0 -} - -// DeleteCommand indicates an expected call of DeleteCommand. -func (mr *MockDevfileDataMockRecorder) DeleteCommand(id interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCommand", reflect.TypeOf((*MockDevfileData)(nil).DeleteCommand), id) -} - -// DeleteComponent mocks base method. -func (m *MockDevfileData) DeleteComponent(name string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteComponent", name) - ret0, _ := ret[0].(error) - return ret0 -} - -// DeleteComponent indicates an expected call of DeleteComponent. -func (mr *MockDevfileDataMockRecorder) DeleteComponent(name interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteComponent", reflect.TypeOf((*MockDevfileData)(nil).DeleteComponent), name) -} - -// DeleteProject mocks base method. -func (m *MockDevfileData) DeleteProject(name string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteProject", name) - ret0, _ := ret[0].(error) - return ret0 -} - -// DeleteProject indicates an expected call of DeleteProject. -func (mr *MockDevfileDataMockRecorder) DeleteProject(name interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteProject", reflect.TypeOf((*MockDevfileData)(nil).DeleteProject), name) -} - -// DeleteStarterProject mocks base method. -func (m *MockDevfileData) DeleteStarterProject(name string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteStarterProject", name) - ret0, _ := ret[0].(error) - return ret0 -} - -// DeleteStarterProject indicates an expected call of DeleteStarterProject. -func (mr *MockDevfileDataMockRecorder) DeleteStarterProject(name interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteStarterProject", reflect.TypeOf((*MockDevfileData)(nil).DeleteStarterProject), name) -} - -// DeleteVolumeMount mocks base method. -func (m *MockDevfileData) DeleteVolumeMount(name string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteVolumeMount", name) - ret0, _ := ret[0].(error) - return ret0 -} - -// DeleteVolumeMount indicates an expected call of DeleteVolumeMount. -func (mr *MockDevfileDataMockRecorder) DeleteVolumeMount(name interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteVolumeMount", reflect.TypeOf((*MockDevfileData)(nil).DeleteVolumeMount), name) -} - -// GetAttributes mocks base method. -func (m *MockDevfileData) GetAttributes() (attributes.Attributes, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetAttributes") - ret0, _ := ret[0].(attributes.Attributes) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetAttributes indicates an expected call of GetAttributes. -func (mr *MockDevfileDataMockRecorder) GetAttributes() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAttributes", reflect.TypeOf((*MockDevfileData)(nil).GetAttributes)) -} - -// GetCommands mocks base method. -func (m *MockDevfileData) GetCommands(arg0 common.DevfileOptions) ([]v1alpha2.Command, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetCommands", arg0) - ret0, _ := ret[0].([]v1alpha2.Command) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetCommands indicates an expected call of GetCommands. -func (mr *MockDevfileDataMockRecorder) GetCommands(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCommands", reflect.TypeOf((*MockDevfileData)(nil).GetCommands), arg0) -} - -// GetComponents mocks base method. -func (m *MockDevfileData) GetComponents(arg0 common.DevfileOptions) ([]v1alpha2.Component, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetComponents", arg0) - ret0, _ := ret[0].([]v1alpha2.Component) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetComponents indicates an expected call of GetComponents. -func (mr *MockDevfileDataMockRecorder) GetComponents(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetComponents", reflect.TypeOf((*MockDevfileData)(nil).GetComponents), arg0) -} - -// GetDevfileContainerComponents mocks base method. -func (m *MockDevfileData) GetDevfileContainerComponents(arg0 common.DevfileOptions) ([]v1alpha2.Component, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetDevfileContainerComponents", arg0) - ret0, _ := ret[0].([]v1alpha2.Component) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetDevfileContainerComponents indicates an expected call of GetDevfileContainerComponents. -func (mr *MockDevfileDataMockRecorder) GetDevfileContainerComponents(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDevfileContainerComponents", reflect.TypeOf((*MockDevfileData)(nil).GetDevfileContainerComponents), arg0) -} - -// GetDevfileVolumeComponents mocks base method. -func (m *MockDevfileData) GetDevfileVolumeComponents(arg0 common.DevfileOptions) ([]v1alpha2.Component, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetDevfileVolumeComponents", arg0) - ret0, _ := ret[0].([]v1alpha2.Component) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetDevfileVolumeComponents indicates an expected call of GetDevfileVolumeComponents. -func (mr *MockDevfileDataMockRecorder) GetDevfileVolumeComponents(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDevfileVolumeComponents", reflect.TypeOf((*MockDevfileData)(nil).GetDevfileVolumeComponents), arg0) -} - -// GetDevfileWorkspaceSpec mocks base method. -func (m *MockDevfileData) GetDevfileWorkspaceSpec() *v1alpha2.DevWorkspaceTemplateSpec { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetDevfileWorkspaceSpec") - ret0, _ := ret[0].(*v1alpha2.DevWorkspaceTemplateSpec) - return ret0 -} - -// GetDevfileWorkspaceSpec indicates an expected call of GetDevfileWorkspaceSpec. -func (mr *MockDevfileDataMockRecorder) GetDevfileWorkspaceSpec() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDevfileWorkspaceSpec", reflect.TypeOf((*MockDevfileData)(nil).GetDevfileWorkspaceSpec)) -} - -// GetDevfileWorkspaceSpecContent mocks base method. -func (m *MockDevfileData) GetDevfileWorkspaceSpecContent() *v1alpha2.DevWorkspaceTemplateSpecContent { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetDevfileWorkspaceSpecContent") - ret0, _ := ret[0].(*v1alpha2.DevWorkspaceTemplateSpecContent) - return ret0 -} - -// GetDevfileWorkspaceSpecContent indicates an expected call of GetDevfileWorkspaceSpecContent. -func (mr *MockDevfileDataMockRecorder) GetDevfileWorkspaceSpecContent() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDevfileWorkspaceSpecContent", reflect.TypeOf((*MockDevfileData)(nil).GetDevfileWorkspaceSpecContent)) -} - -// GetEvents mocks base method. -func (m *MockDevfileData) GetEvents() v1alpha2.Events { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetEvents") - ret0, _ := ret[0].(v1alpha2.Events) - return ret0 -} - -// GetEvents indicates an expected call of GetEvents. -func (mr *MockDevfileDataMockRecorder) GetEvents() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEvents", reflect.TypeOf((*MockDevfileData)(nil).GetEvents)) -} - -// GetMetadata mocks base method. -func (m *MockDevfileData) GetMetadata() devfile.DevfileMetadata { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetMetadata") - ret0, _ := ret[0].(devfile.DevfileMetadata) - return ret0 -} - -// GetMetadata indicates an expected call of GetMetadata. -func (mr *MockDevfileDataMockRecorder) GetMetadata() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMetadata", reflect.TypeOf((*MockDevfileData)(nil).GetMetadata)) -} - -// GetParent mocks base method. -func (m *MockDevfileData) GetParent() *v1alpha2.Parent { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetParent") - ret0, _ := ret[0].(*v1alpha2.Parent) - return ret0 -} - -// GetParent indicates an expected call of GetParent. -func (mr *MockDevfileDataMockRecorder) GetParent() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetParent", reflect.TypeOf((*MockDevfileData)(nil).GetParent)) -} - -// GetProjects mocks base method. -func (m *MockDevfileData) GetProjects(arg0 common.DevfileOptions) ([]v1alpha2.Project, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetProjects", arg0) - ret0, _ := ret[0].([]v1alpha2.Project) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetProjects indicates an expected call of GetProjects. -func (mr *MockDevfileDataMockRecorder) GetProjects(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProjects", reflect.TypeOf((*MockDevfileData)(nil).GetProjects), arg0) -} - -// GetSchemaVersion mocks base method. -func (m *MockDevfileData) GetSchemaVersion() string { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetSchemaVersion") - ret0, _ := ret[0].(string) - return ret0 -} - -// GetSchemaVersion indicates an expected call of GetSchemaVersion. -func (mr *MockDevfileDataMockRecorder) GetSchemaVersion() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSchemaVersion", reflect.TypeOf((*MockDevfileData)(nil).GetSchemaVersion)) -} - -// GetStarterProjects mocks base method. -func (m *MockDevfileData) GetStarterProjects(arg0 common.DevfileOptions) ([]v1alpha2.StarterProject, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetStarterProjects", arg0) - ret0, _ := ret[0].([]v1alpha2.StarterProject) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetStarterProjects indicates an expected call of GetStarterProjects. -func (mr *MockDevfileDataMockRecorder) GetStarterProjects(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStarterProjects", reflect.TypeOf((*MockDevfileData)(nil).GetStarterProjects), arg0) -} - -// GetVolumeMountPaths mocks base method. -func (m *MockDevfileData) GetVolumeMountPaths(mountName, containerName string) ([]string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetVolumeMountPaths", mountName, containerName) - ret0, _ := ret[0].([]string) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetVolumeMountPaths indicates an expected call of GetVolumeMountPaths. -func (mr *MockDevfileDataMockRecorder) GetVolumeMountPaths(mountName, containerName interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVolumeMountPaths", reflect.TypeOf((*MockDevfileData)(nil).GetVolumeMountPaths), mountName, containerName) -} - -// SetDevfileWorkspaceSpec mocks base method. -func (m *MockDevfileData) SetDevfileWorkspaceSpec(spec v1alpha2.DevWorkspaceTemplateSpec) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetDevfileWorkspaceSpec", spec) -} - -// SetDevfileWorkspaceSpec indicates an expected call of SetDevfileWorkspaceSpec. -func (mr *MockDevfileDataMockRecorder) SetDevfileWorkspaceSpec(spec interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDevfileWorkspaceSpec", reflect.TypeOf((*MockDevfileData)(nil).SetDevfileWorkspaceSpec), spec) -} - -// SetDevfileWorkspaceSpecContent mocks base method. -func (m *MockDevfileData) SetDevfileWorkspaceSpecContent(content v1alpha2.DevWorkspaceTemplateSpecContent) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetDevfileWorkspaceSpecContent", content) -} - -// SetDevfileWorkspaceSpecContent indicates an expected call of SetDevfileWorkspaceSpecContent. -func (mr *MockDevfileDataMockRecorder) SetDevfileWorkspaceSpecContent(content interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDevfileWorkspaceSpecContent", reflect.TypeOf((*MockDevfileData)(nil).SetDevfileWorkspaceSpecContent), content) -} - -// SetMetadata mocks base method. -func (m *MockDevfileData) SetMetadata(metadata devfile.DevfileMetadata) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetMetadata", metadata) -} - -// SetMetadata indicates an expected call of SetMetadata. -func (mr *MockDevfileDataMockRecorder) SetMetadata(metadata interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetMetadata", reflect.TypeOf((*MockDevfileData)(nil).SetMetadata), metadata) -} - -// SetParent mocks base method. -func (m *MockDevfileData) SetParent(parent *v1alpha2.Parent) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetParent", parent) -} - -// SetParent indicates an expected call of SetParent. -func (mr *MockDevfileDataMockRecorder) SetParent(parent interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetParent", reflect.TypeOf((*MockDevfileData)(nil).SetParent), parent) -} - -// SetSchemaVersion mocks base method. -func (m *MockDevfileData) SetSchemaVersion(version string) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetSchemaVersion", version) -} - -// SetSchemaVersion indicates an expected call of SetSchemaVersion. -func (mr *MockDevfileDataMockRecorder) SetSchemaVersion(version interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetSchemaVersion", reflect.TypeOf((*MockDevfileData)(nil).SetSchemaVersion), version) -} - -// UpdateAttributes mocks base method. -func (m *MockDevfileData) UpdateAttributes(key string, value interface{}) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateAttributes", key, value) - ret0, _ := ret[0].(error) - return ret0 -} - -// UpdateAttributes indicates an expected call of UpdateAttributes. -func (mr *MockDevfileDataMockRecorder) UpdateAttributes(key, value interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateAttributes", reflect.TypeOf((*MockDevfileData)(nil).UpdateAttributes), key, value) -} - -// UpdateCommand mocks base method. -func (m *MockDevfileData) UpdateCommand(command v1alpha2.Command) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateCommand", command) - ret0, _ := ret[0].(error) - return ret0 -} - -// UpdateCommand indicates an expected call of UpdateCommand. -func (mr *MockDevfileDataMockRecorder) UpdateCommand(command interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateCommand", reflect.TypeOf((*MockDevfileData)(nil).UpdateCommand), command) -} - -// UpdateComponent mocks base method. -func (m *MockDevfileData) UpdateComponent(component v1alpha2.Component) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateComponent", component) - ret0, _ := ret[0].(error) - return ret0 -} - -// UpdateComponent indicates an expected call of UpdateComponent. -func (mr *MockDevfileDataMockRecorder) UpdateComponent(component interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateComponent", reflect.TypeOf((*MockDevfileData)(nil).UpdateComponent), component) -} - -// UpdateEvents mocks base method. -func (m *MockDevfileData) UpdateEvents(postStart, postStop, preStart, preStop []string) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "UpdateEvents", postStart, postStop, preStart, preStop) -} - -// UpdateEvents indicates an expected call of UpdateEvents. -func (mr *MockDevfileDataMockRecorder) UpdateEvents(postStart, postStop, preStart, preStop interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateEvents", reflect.TypeOf((*MockDevfileData)(nil).UpdateEvents), postStart, postStop, preStart, preStop) -} - -// UpdateProject mocks base method. -func (m *MockDevfileData) UpdateProject(project v1alpha2.Project) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateProject", project) - ret0, _ := ret[0].(error) - return ret0 -} - -// UpdateProject indicates an expected call of UpdateProject. -func (mr *MockDevfileDataMockRecorder) UpdateProject(project interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateProject", reflect.TypeOf((*MockDevfileData)(nil).UpdateProject), project) -} - -// UpdateStarterProject mocks base method. -func (m *MockDevfileData) UpdateStarterProject(project v1alpha2.StarterProject) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateStarterProject", project) - ret0, _ := ret[0].(error) - return ret0 -} - -// UpdateStarterProject indicates an expected call of UpdateStarterProject. -func (mr *MockDevfileDataMockRecorder) UpdateStarterProject(project interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateStarterProject", reflect.TypeOf((*MockDevfileData)(nil).UpdateStarterProject), project) -} - -// RemoveEnvVars mocks base method -func (m *MockDevfileData) RemoveEnvVars(containerEnvMap map[string][]string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RemoveEnvVars", containerEnvMap) - ret0, _ := ret[0].(error) - return ret0 -} - -// RemoveEnvVars indicates an expected call of RemoveEnvVars -func (mr *MockDevfileDataMockRecorder) RemoveEnvVars(containerEnvMap interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveEnvVars", reflect.TypeOf((*MockDevfileData)(nil).RemoveEnvVars), containerEnvMap) -} - -// SetPorts mocks base method -func (m *MockDevfileData) SetPorts(containerPortsMap map[string][]string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetPorts", containerPortsMap) - ret0, _ := ret[0].(error) - return ret0 -} - -// SetPorts indicates an expected call of SetPorts -func (mr *MockDevfileDataMockRecorder) SetPorts(containerPortsMap interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetPorts", reflect.TypeOf((*MockDevfileData)(nil).SetPorts), containerPortsMap) -} - -// AddEnvVars mocks base method -func (m *MockDevfileData) AddEnvVars(containerEnvMap map[string][]v1alpha2.EnvVar) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AddEnvVars", containerEnvMap) - ret0, _ := ret[0].(error) - return ret0 -} - -// AddEnvVars indicates an expected call of AddEnvVars -func (mr *MockDevfileDataMockRecorder) AddEnvVars(containerEnvMap interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddEnvVars", reflect.TypeOf((*MockDevfileData)(nil).AddEnvVars), containerEnvMap) -} - -// RemovePorts mocks base method -func (m *MockDevfileData) RemovePorts(containerPortsMap map[string][]string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RemovePorts", containerPortsMap) - ret0, _ := ret[0].(error) - return ret0 -} - -// RemovePorts indicates an expected call of RemovePorts -func (mr *MockDevfileDataMockRecorder) RemovePorts(containerPortsMap interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemovePorts", reflect.TypeOf((*MockDevfileData)(nil).RemovePorts), containerPortsMap) -} diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/2.0.0/devfileJsonSchema200.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/2.0.0/devfileJsonSchema200.go deleted file mode 100644 index 431d5fc027f..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/2.0.0/devfileJsonSchema200.go +++ /dev/null @@ -1,3393 +0,0 @@ -// -// Copyright 2022 Red Hat, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package version200 - -// https://raw.githubusercontent.com/devfile/api/2.0.x/schemas/latest/devfile.json -const JsonSchema200 = `{ - "description": "Devfile describes the structure of a cloud-native workspace and development environment.", - "type": "object", - "title": "Devfile schema - Version 2.0.0", - "required": [ - "schemaVersion" - ], - "properties": { - "commands": { - "description": "Predefined, ready-to-use, workspace-related commands", - "type": "array", - "items": { - "type": "object", - "required": [ - "id" - ], - "oneOf": [ - { - "required": [ - "exec" - ] - }, - { - "required": [ - "apply" - ] - }, - { - "required": [ - "vscodeTask" - ] - }, - { - "required": [ - "vscodeLaunch" - ] - }, - { - "required": [ - "composite" - ] - } - ], - "properties": { - "apply": { - "description": "Command that consists in applying a given component definition, typically bound to a workspace event.\n\nFor example, when an 'apply' command is bound to a 'preStart' event, and references a 'container' component, it will start the container as a K8S initContainer in the workspace POD, unless the component has its 'dedicatedPod' field set to 'true'.\n\nWhen no 'apply' command exist for a given component, it is assumed the component will be applied at workspace start by default.", - "type": "object", - "required": [ - "component" - ], - "properties": { - "component": { - "description": "Describes component that will be applied", - "type": "string" - }, - "group": { - "description": "Defines the group this command is part of", - "type": "object", - "required": [ - "kind" - ], - "properties": { - "isDefault": { - "description": "Identifies the default command for a given group kind", - "type": "boolean" - }, - "kind": { - "description": "Kind of group the command is part of", - "type": "string", - "enum": [ - "build", - "run", - "test", - "debug" - ] - } - }, - "additionalProperties": false - }, - "label": { - "description": "Optional label that provides a label for this command to be used in Editor UI menus for example", - "type": "string" - } - }, - "additionalProperties": false - }, - "attributes": { - "description": "Map of implementation-dependant free-form YAML attributes.", - "type": "object", - "additionalProperties": true - }, - "composite": { - "description": "Composite command that allows executing several sub-commands either sequentially or concurrently", - "type": "object", - "properties": { - "commands": { - "description": "The commands that comprise this composite command", - "type": "array", - "items": { - "type": "string" - } - }, - "group": { - "description": "Defines the group this command is part of", - "type": "object", - "required": [ - "kind" - ], - "properties": { - "isDefault": { - "description": "Identifies the default command for a given group kind", - "type": "boolean" - }, - "kind": { - "description": "Kind of group the command is part of", - "type": "string", - "enum": [ - "build", - "run", - "test", - "debug" - ] - } - }, - "additionalProperties": false - }, - "label": { - "description": "Optional label that provides a label for this command to be used in Editor UI menus for example", - "type": "string" - }, - "parallel": { - "description": "Indicates if the sub-commands should be executed concurrently", - "type": "boolean" - } - }, - "additionalProperties": false - }, - "exec": { - "description": "CLI Command executed in an existing component container", - "type": "object", - "required": [ - "commandLine", - "component" - ], - "properties": { - "commandLine": { - "description": "The actual command-line string\n\nSpecial variables that can be used:\n\n - '$PROJECTS_ROOT': A path where projects sources are mounted as defined by container component's sourceMapping.\n\n - '$PROJECT_SOURCE': A path to a project source ($PROJECTS_ROOT/\u003cproject-name\u003e). If there are multiple projects, this will point to the directory of the first one.", - "type": "string" - }, - "component": { - "description": "Describes component to which given action relates", - "type": "string" - }, - "env": { - "description": "Optional list of environment variables that have to be set before running the command", - "type": "array", - "items": { - "type": "object", - "required": [ - "name", - "value" - ], - "properties": { - "name": { - "type": "string" - }, - "value": { - "type": "string" - } - }, - "additionalProperties": false - } - }, - "group": { - "description": "Defines the group this command is part of", - "type": "object", - "required": [ - "kind" - ], - "properties": { - "isDefault": { - "description": "Identifies the default command for a given group kind", - "type": "boolean" - }, - "kind": { - "description": "Kind of group the command is part of", - "type": "string", - "enum": [ - "build", - "run", - "test", - "debug" - ] - } - }, - "additionalProperties": false - }, - "hotReloadCapable": { - "description": "Whether the command is capable to reload itself when source code changes. If set to 'true' the command won't be restarted and it is expected to handle file changes on its own.\n\nDefault value is 'false'", - "type": "boolean" - }, - "label": { - "description": "Optional label that provides a label for this command to be used in Editor UI menus for example", - "type": "string" - }, - "workingDir": { - "description": "Working directory where the command should be executed\n\nSpecial variables that can be used:\n\n - '$PROJECTS_ROOT': A path where projects sources are mounted as defined by container component's sourceMapping.\n\n - '$PROJECT_SOURCE': A path to a project source ($PROJECTS_ROOT/\u003cproject-name\u003e). If there are multiple projects, this will point to the directory of the first one.", - "type": "string" - } - }, - "additionalProperties": false - }, - "id": { - "description": "Mandatory identifier that allows referencing this command in composite commands, from a parent, or in events.", - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "vscodeLaunch": { - "description": "Command providing the definition of a VsCode launch action", - "type": "object", - "oneOf": [ - { - "required": [ - "uri" - ] - }, - { - "required": [ - "inlined" - ] - } - ], - "properties": { - "group": { - "description": "Defines the group this command is part of", - "type": "object", - "required": [ - "kind" - ], - "properties": { - "isDefault": { - "description": "Identifies the default command for a given group kind", - "type": "boolean" - }, - "kind": { - "description": "Kind of group the command is part of", - "type": "string", - "enum": [ - "build", - "run", - "test", - "debug" - ] - } - }, - "additionalProperties": false - }, - "inlined": { - "description": "Inlined content of the VsCode configuration", - "type": "string" - }, - "uri": { - "description": "Location as an absolute of relative URI the VsCode configuration will be fetched from", - "type": "string" - } - }, - "additionalProperties": false - }, - "vscodeTask": { - "description": "Command providing the definition of a VsCode Task", - "type": "object", - "oneOf": [ - { - "required": [ - "uri" - ] - }, - { - "required": [ - "inlined" - ] - } - ], - "properties": { - "group": { - "description": "Defines the group this command is part of", - "type": "object", - "required": [ - "kind" - ], - "properties": { - "isDefault": { - "description": "Identifies the default command for a given group kind", - "type": "boolean" - }, - "kind": { - "description": "Kind of group the command is part of", - "type": "string", - "enum": [ - "build", - "run", - "test", - "debug" - ] - } - }, - "additionalProperties": false - }, - "inlined": { - "description": "Inlined content of the VsCode configuration", - "type": "string" - }, - "uri": { - "description": "Location as an absolute of relative URI the VsCode configuration will be fetched from", - "type": "string" - } - }, - "additionalProperties": false - } - }, - "additionalProperties": false - } - }, - "components": { - "description": "List of the workspace components, such as editor and plugins, user-provided containers, or other types of components", - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "oneOf": [ - { - "required": [ - "container" - ] - }, - { - "required": [ - "kubernetes" - ] - }, - { - "required": [ - "openshift" - ] - }, - { - "required": [ - "volume" - ] - }, - { - "required": [ - "plugin" - ] - } - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant free-form YAML attributes.", - "type": "object", - "additionalProperties": true - }, - "container": { - "description": "Allows adding and configuring workspace-related containers", - "type": "object", - "required": [ - "image" - ], - "properties": { - "args": { - "description": "The arguments to supply to the command running the dockerimage component. The arguments are supplied either to the default command provided in the image or to the overridden command.\n\nDefaults to an empty array, meaning use whatever is defined in the image.", - "type": "array", - "items": { - "type": "string" - } - }, - "command": { - "description": "The command to run in the dockerimage component instead of the default one provided in the image.\n\nDefaults to an empty array, meaning use whatever is defined in the image.", - "type": "array", - "items": { - "type": "string" - } - }, - "dedicatedPod": { - "description": "Specify if a container should run in its own separated pod, instead of running as part of the main development environment pod.\n\nDefault value is 'false'", - "type": "boolean" - }, - "endpoints": { - "type": "array", - "items": { - "type": "object", - "required": [ - "name", - "targetPort" - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant string-based free-form attributes.\n\nExamples of Che-specific attributes:\n- cookiesAuthEnabled: \"true\" / \"false\",\n- type: \"terminal\" / \"ide\" / \"ide-dev\",", - "type": "object", - "additionalProperties": true - }, - "exposure": { - "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main workspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main workspace POD, on a local address.\n\nDefault value is 'public'", - "type": "string", - "default": "public", - "enum": [ - "public", - "internal", - "none" - ] - }, - "name": { - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "path": { - "description": "Path of the endpoint URL", - "type": "string" - }, - "protocol": { - "description": "Describes the application and transport protocols of the traffic that will go through this endpoint.\n- 'http': Endpoint will have 'http' traffic, typically on a TCP connection. It will be automaticaly promoted to 'https' when the 'secure' field is set to 'true'.\n- 'https': Endpoint will have 'https' traffic, typically on a TCP connection.\n- 'ws': Endpoint will have 'ws' traffic, typically on a TCP connection. It will be automaticaly promoted to 'wss' when the 'secure' field is set to 'true'.\n- 'wss': Endpoint will have 'wss' traffic, typically on a TCP connection.\n- 'tcp': Endpoint will have traffic on a TCP connection, without specifying an application protocol.\n- 'udp': Endpoint will have traffic on an UDP connection, without specifying an application protocol.\n\nDefault value is 'http'", - "type": "string", - "default": "http", - "enum": [ - "http", - "https", - "ws", - "wss", - "tcp", - "udp" - ] - }, - "secure": { - "description": "Describes whether the endpoint should be secured and protected by some authentication process. This requires a protocol of 'https' or 'wss'.", - "type": "boolean" - }, - "targetPort": { - "type": "integer" - } - }, - "additionalProperties": false - } - }, - "env": { - "description": "Environment variables used in this container.\n\nThe following variables are reserved and cannot be overridden via env:\n\n - '$PROJECTS_ROOT'\n\n - '$PROJECT_SOURCE'", - "type": "array", - "items": { - "type": "object", - "required": [ - "name", - "value" - ], - "properties": { - "name": { - "type": "string" - }, - "value": { - "type": "string" - } - }, - "additionalProperties": false - } - }, - "image": { - "type": "string" - }, - "memoryLimit": { - "type": "string" - }, - "mountSources": { - "description": "Toggles whether or not the project source code should be mounted in the component.\n\nDefaults to true for all component types except plugins and components that set 'dedicatedPod' to true.", - "type": "boolean" - }, - "sourceMapping": { - "description": "Optional specification of the path in the container where project sources should be transferred/mounted when 'mountSources' is 'true'. When omitted, the default value of /projects is used.", - "type": "string", - "default": "/projects" - }, - "volumeMounts": { - "description": "List of volumes mounts that should be mounted is this container.", - "type": "array", - "items": { - "description": "Volume that should be mounted to a component container", - "type": "object", - "required": [ - "name" - ], - "properties": { - "name": { - "description": "The volume mount name is the name of an existing 'Volume' component. If several containers mount the same volume name then they will reuse the same volume and will be able to access to the same files.", - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "path": { - "description": "The path in the component container where the volume should be mounted. If not path is mentioned, default path is the is '/\u003cname\u003e'.", - "type": "string" - } - }, - "additionalProperties": false - } - } - }, - "additionalProperties": false - }, - "kubernetes": { - "description": "Allows importing into the workspace the Kubernetes resources defined in a given manifest. For example this allows reusing the Kubernetes definitions used to deploy some runtime components in production.", - "type": "object", - "oneOf": [ - { - "required": [ - "uri" - ] - }, - { - "required": [ - "inlined" - ] - } - ], - "properties": { - "endpoints": { - "type": "array", - "items": { - "type": "object", - "required": [ - "name", - "targetPort" - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant string-based free-form attributes.\n\nExamples of Che-specific attributes:\n- cookiesAuthEnabled: \"true\" / \"false\",\n- type: \"terminal\" / \"ide\" / \"ide-dev\",", - "type": "object", - "additionalProperties": true - }, - "exposure": { - "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main workspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main workspace POD, on a local address.\n\nDefault value is 'public'", - "type": "string", - "default": "public", - "enum": [ - "public", - "internal", - "none" - ] - }, - "name": { - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "path": { - "description": "Path of the endpoint URL", - "type": "string" - }, - "protocol": { - "description": "Describes the application and transport protocols of the traffic that will go through this endpoint.\n- 'http': Endpoint will have 'http' traffic, typically on a TCP connection. It will be automaticaly promoted to 'https' when the 'secure' field is set to 'true'.\n- 'https': Endpoint will have 'https' traffic, typically on a TCP connection.\n- 'ws': Endpoint will have 'ws' traffic, typically on a TCP connection. It will be automaticaly promoted to 'wss' when the 'secure' field is set to 'true'.\n- 'wss': Endpoint will have 'wss' traffic, typically on a TCP connection.\n- 'tcp': Endpoint will have traffic on a TCP connection, without specifying an application protocol.\n- 'udp': Endpoint will have traffic on an UDP connection, without specifying an application protocol.\n\nDefault value is 'http'", - "type": "string", - "default": "http", - "enum": [ - "http", - "https", - "ws", - "wss", - "tcp", - "udp" - ] - }, - "secure": { - "description": "Describes whether the endpoint should be secured and protected by some authentication process. This requires a protocol of 'https' or 'wss'.", - "type": "boolean" - }, - "targetPort": { - "type": "integer" - } - }, - "additionalProperties": false - } - }, - "inlined": { - "description": "Inlined manifest", - "type": "string" - }, - "uri": { - "description": "Location in a file fetched from a uri.", - "type": "string" - } - }, - "additionalProperties": false - }, - "name": { - "description": "Mandatory name that allows referencing the component from other elements (such as commands) or from an external devfile that may reference this component through a parent or a plugin.", - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "openshift": { - "description": "Allows importing into the workspace the OpenShift resources defined in a given manifest. For example this allows reusing the OpenShift definitions used to deploy some runtime components in production.", - "type": "object", - "oneOf": [ - { - "required": [ - "uri" - ] - }, - { - "required": [ - "inlined" - ] - } - ], - "properties": { - "endpoints": { - "type": "array", - "items": { - "type": "object", - "required": [ - "name", - "targetPort" - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant string-based free-form attributes.\n\nExamples of Che-specific attributes:\n- cookiesAuthEnabled: \"true\" / \"false\",\n- type: \"terminal\" / \"ide\" / \"ide-dev\",", - "type": "object", - "additionalProperties": true - }, - "exposure": { - "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main workspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main workspace POD, on a local address.\n\nDefault value is 'public'", - "type": "string", - "default": "public", - "enum": [ - "public", - "internal", - "none" - ] - }, - "name": { - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "path": { - "description": "Path of the endpoint URL", - "type": "string" - }, - "protocol": { - "description": "Describes the application and transport protocols of the traffic that will go through this endpoint.\n- 'http': Endpoint will have 'http' traffic, typically on a TCP connection. It will be automaticaly promoted to 'https' when the 'secure' field is set to 'true'.\n- 'https': Endpoint will have 'https' traffic, typically on a TCP connection.\n- 'ws': Endpoint will have 'ws' traffic, typically on a TCP connection. It will be automaticaly promoted to 'wss' when the 'secure' field is set to 'true'.\n- 'wss': Endpoint will have 'wss' traffic, typically on a TCP connection.\n- 'tcp': Endpoint will have traffic on a TCP connection, without specifying an application protocol.\n- 'udp': Endpoint will have traffic on an UDP connection, without specifying an application protocol.\n\nDefault value is 'http'", - "type": "string", - "default": "http", - "enum": [ - "http", - "https", - "ws", - "wss", - "tcp", - "udp" - ] - }, - "secure": { - "description": "Describes whether the endpoint should be secured and protected by some authentication process. This requires a protocol of 'https' or 'wss'.", - "type": "boolean" - }, - "targetPort": { - "type": "integer" - } - }, - "additionalProperties": false - } - }, - "inlined": { - "description": "Inlined manifest", - "type": "string" - }, - "uri": { - "description": "Location in a file fetched from a uri.", - "type": "string" - } - }, - "additionalProperties": false - }, - "plugin": { - "description": "Allows importing a plugin.\n\nPlugins are mainly imported devfiles that contribute components, commands and events as a consistent single unit. They are defined in either YAML files following the devfile syntax, or as 'DevWorkspaceTemplate' Kubernetes Custom Resources", - "type": "object", - "oneOf": [ - { - "required": [ - "uri" - ] - }, - { - "required": [ - "id" - ] - }, - { - "required": [ - "kubernetes" - ] - } - ], - "properties": { - "commands": { - "description": "Overrides of commands encapsulated in a parent devfile or a plugin. Overriding is done according to K8S strategic merge patch standard rules.", - "type": "array", - "items": { - "type": "object", - "required": [ - "id" - ], - "oneOf": [ - { - "required": [ - "exec" - ] - }, - { - "required": [ - "apply" - ] - }, - { - "required": [ - "vscodeTask" - ] - }, - { - "required": [ - "vscodeLaunch" - ] - }, - { - "required": [ - "composite" - ] - } - ], - "properties": { - "apply": { - "description": "Command that consists in applying a given component definition, typically bound to a workspace event.\n\nFor example, when an 'apply' command is bound to a 'preStart' event, and references a 'container' component, it will start the container as a K8S initContainer in the workspace POD, unless the component has its 'dedicatedPod' field set to 'true'.\n\nWhen no 'apply' command exist for a given component, it is assumed the component will be applied at workspace start by default.", - "type": "object", - "properties": { - "component": { - "description": "Describes component that will be applied", - "type": "string" - }, - "group": { - "description": "Defines the group this command is part of", - "type": "object", - "properties": { - "isDefault": { - "description": "Identifies the default command for a given group kind", - "type": "boolean" - }, - "kind": { - "description": "Kind of group the command is part of", - "type": "string", - "enum": [ - "build", - "run", - "test", - "debug" - ] - } - }, - "additionalProperties": false - }, - "label": { - "description": "Optional label that provides a label for this command to be used in Editor UI menus for example", - "type": "string" - } - }, - "additionalProperties": false - }, - "attributes": { - "description": "Map of implementation-dependant free-form YAML attributes.", - "type": "object", - "additionalProperties": true - }, - "composite": { - "description": "Composite command that allows executing several sub-commands either sequentially or concurrently", - "type": "object", - "properties": { - "commands": { - "description": "The commands that comprise this composite command", - "type": "array", - "items": { - "type": "string" - } - }, - "group": { - "description": "Defines the group this command is part of", - "type": "object", - "properties": { - "isDefault": { - "description": "Identifies the default command for a given group kind", - "type": "boolean" - }, - "kind": { - "description": "Kind of group the command is part of", - "type": "string", - "enum": [ - "build", - "run", - "test", - "debug" - ] - } - }, - "additionalProperties": false - }, - "label": { - "description": "Optional label that provides a label for this command to be used in Editor UI menus for example", - "type": "string" - }, - "parallel": { - "description": "Indicates if the sub-commands should be executed concurrently", - "type": "boolean" - } - }, - "additionalProperties": false - }, - "exec": { - "description": "CLI Command executed in an existing component container", - "type": "object", - "properties": { - "commandLine": { - "description": "The actual command-line string\n\nSpecial variables that can be used:\n\n - '$PROJECTS_ROOT': A path where projects sources are mounted as defined by container component's sourceMapping.\n\n - '$PROJECT_SOURCE': A path to a project source ($PROJECTS_ROOT/\u003cproject-name\u003e). If there are multiple projects, this will point to the directory of the first one.", - "type": "string" - }, - "component": { - "description": "Describes component to which given action relates", - "type": "string" - }, - "env": { - "description": "Optional list of environment variables that have to be set before running the command", - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "name": { - "type": "string" - }, - "value": { - "type": "string" - } - }, - "additionalProperties": false - } - }, - "group": { - "description": "Defines the group this command is part of", - "type": "object", - "properties": { - "isDefault": { - "description": "Identifies the default command for a given group kind", - "type": "boolean" - }, - "kind": { - "description": "Kind of group the command is part of", - "type": "string", - "enum": [ - "build", - "run", - "test", - "debug" - ] - } - }, - "additionalProperties": false - }, - "hotReloadCapable": { - "description": "Whether the command is capable to reload itself when source code changes. If set to 'true' the command won't be restarted and it is expected to handle file changes on its own.\n\nDefault value is 'false'", - "type": "boolean" - }, - "label": { - "description": "Optional label that provides a label for this command to be used in Editor UI menus for example", - "type": "string" - }, - "workingDir": { - "description": "Working directory where the command should be executed\n\nSpecial variables that can be used:\n\n - '$PROJECTS_ROOT': A path where projects sources are mounted as defined by container component's sourceMapping.\n\n - '$PROJECT_SOURCE': A path to a project source ($PROJECTS_ROOT/\u003cproject-name\u003e). If there are multiple projects, this will point to the directory of the first one.", - "type": "string" - } - }, - "additionalProperties": false - }, - "id": { - "description": "Mandatory identifier that allows referencing this command in composite commands, from a parent, or in events.", - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "vscodeLaunch": { - "description": "Command providing the definition of a VsCode launch action", - "type": "object", - "oneOf": [ - { - "required": [ - "uri" - ] - }, - { - "required": [ - "inlined" - ] - } - ], - "properties": { - "group": { - "description": "Defines the group this command is part of", - "type": "object", - "properties": { - "isDefault": { - "description": "Identifies the default command for a given group kind", - "type": "boolean" - }, - "kind": { - "description": "Kind of group the command is part of", - "type": "string", - "enum": [ - "build", - "run", - "test", - "debug" - ] - } - }, - "additionalProperties": false - }, - "inlined": { - "description": "Inlined content of the VsCode configuration", - "type": "string" - }, - "uri": { - "description": "Location as an absolute of relative URI the VsCode configuration will be fetched from", - "type": "string" - } - }, - "additionalProperties": false - }, - "vscodeTask": { - "description": "Command providing the definition of a VsCode Task", - "type": "object", - "oneOf": [ - { - "required": [ - "uri" - ] - }, - { - "required": [ - "inlined" - ] - } - ], - "properties": { - "group": { - "description": "Defines the group this command is part of", - "type": "object", - "properties": { - "isDefault": { - "description": "Identifies the default command for a given group kind", - "type": "boolean" - }, - "kind": { - "description": "Kind of group the command is part of", - "type": "string", - "enum": [ - "build", - "run", - "test", - "debug" - ] - } - }, - "additionalProperties": false - }, - "inlined": { - "description": "Inlined content of the VsCode configuration", - "type": "string" - }, - "uri": { - "description": "Location as an absolute of relative URI the VsCode configuration will be fetched from", - "type": "string" - } - }, - "additionalProperties": false - } - }, - "additionalProperties": false - } - }, - "components": { - "description": "Overrides of components encapsulated in a parent devfile or a plugin. Overriding is done according to K8S strategic merge patch standard rules.", - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "oneOf": [ - { - "required": [ - "container" - ] - }, - { - "required": [ - "kubernetes" - ] - }, - { - "required": [ - "openshift" - ] - }, - { - "required": [ - "volume" - ] - } - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant free-form YAML attributes.", - "type": "object", - "additionalProperties": true - }, - "container": { - "description": "Allows adding and configuring workspace-related containers", - "type": "object", - "properties": { - "args": { - "description": "The arguments to supply to the command running the dockerimage component. The arguments are supplied either to the default command provided in the image or to the overridden command.\n\nDefaults to an empty array, meaning use whatever is defined in the image.", - "type": "array", - "items": { - "type": "string" - } - }, - "command": { - "description": "The command to run in the dockerimage component instead of the default one provided in the image.\n\nDefaults to an empty array, meaning use whatever is defined in the image.", - "type": "array", - "items": { - "type": "string" - } - }, - "dedicatedPod": { - "description": "Specify if a container should run in its own separated pod, instead of running as part of the main development environment pod.\n\nDefault value is 'false'", - "type": "boolean" - }, - "endpoints": { - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant string-based free-form attributes.\n\nExamples of Che-specific attributes:\n- cookiesAuthEnabled: \"true\" / \"false\",\n- type: \"terminal\" / \"ide\" / \"ide-dev\",", - "type": "object", - "additionalProperties": true - }, - "exposure": { - "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main workspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main workspace POD, on a local address.\n\nDefault value is 'public'", - "type": "string", - "enum": [ - "public", - "internal", - "none" - ] - }, - "name": { - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "path": { - "description": "Path of the endpoint URL", - "type": "string" - }, - "protocol": { - "description": "Describes the application and transport protocols of the traffic that will go through this endpoint.\n- 'http': Endpoint will have 'http' traffic, typically on a TCP connection. It will be automaticaly promoted to 'https' when the 'secure' field is set to 'true'.\n- 'https': Endpoint will have 'https' traffic, typically on a TCP connection.\n- 'ws': Endpoint will have 'ws' traffic, typically on a TCP connection. It will be automaticaly promoted to 'wss' when the 'secure' field is set to 'true'.\n- 'wss': Endpoint will have 'wss' traffic, typically on a TCP connection.\n- 'tcp': Endpoint will have traffic on a TCP connection, without specifying an application protocol.\n- 'udp': Endpoint will have traffic on an UDP connection, without specifying an application protocol.\n\nDefault value is 'http'", - "type": "string", - "enum": [ - "http", - "https", - "ws", - "wss", - "tcp", - "udp" - ] - }, - "secure": { - "description": "Describes whether the endpoint should be secured and protected by some authentication process. This requires a protocol of 'https' or 'wss'.", - "type": "boolean" - }, - "targetPort": { - "type": "integer" - } - }, - "additionalProperties": false - } - }, - "env": { - "description": "Environment variables used in this container.\n\nThe following variables are reserved and cannot be overridden via env:\n\n - '$PROJECTS_ROOT'\n\n - '$PROJECT_SOURCE'", - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "name": { - "type": "string" - }, - "value": { - "type": "string" - } - }, - "additionalProperties": false - } - }, - "image": { - "type": "string" - }, - "memoryLimit": { - "type": "string" - }, - "mountSources": { - "description": "Toggles whether or not the project source code should be mounted in the component.\n\nDefaults to true for all component types except plugins and components that set 'dedicatedPod' to true.", - "type": "boolean" - }, - "sourceMapping": { - "description": "Optional specification of the path in the container where project sources should be transferred/mounted when 'mountSources' is 'true'. When omitted, the default value of /projects is used.", - "type": "string" - }, - "volumeMounts": { - "description": "List of volumes mounts that should be mounted is this container.", - "type": "array", - "items": { - "description": "Volume that should be mounted to a component container", - "type": "object", - "required": [ - "name" - ], - "properties": { - "name": { - "description": "The volume mount name is the name of an existing 'Volume' component. If several containers mount the same volume name then they will reuse the same volume and will be able to access to the same files.", - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "path": { - "description": "The path in the component container where the volume should be mounted. If not path is mentioned, default path is the is '/\u003cname\u003e'.", - "type": "string" - } - }, - "additionalProperties": false - } - } - }, - "additionalProperties": false - }, - "kubernetes": { - "description": "Allows importing into the workspace the Kubernetes resources defined in a given manifest. For example this allows reusing the Kubernetes definitions used to deploy some runtime components in production.", - "type": "object", - "oneOf": [ - { - "required": [ - "uri" - ] - }, - { - "required": [ - "inlined" - ] - } - ], - "properties": { - "endpoints": { - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant string-based free-form attributes.\n\nExamples of Che-specific attributes:\n- cookiesAuthEnabled: \"true\" / \"false\",\n- type: \"terminal\" / \"ide\" / \"ide-dev\",", - "type": "object", - "additionalProperties": true - }, - "exposure": { - "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main workspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main workspace POD, on a local address.\n\nDefault value is 'public'", - "type": "string", - "enum": [ - "public", - "internal", - "none" - ] - }, - "name": { - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "path": { - "description": "Path of the endpoint URL", - "type": "string" - }, - "protocol": { - "description": "Describes the application and transport protocols of the traffic that will go through this endpoint.\n- 'http': Endpoint will have 'http' traffic, typically on a TCP connection. It will be automaticaly promoted to 'https' when the 'secure' field is set to 'true'.\n- 'https': Endpoint will have 'https' traffic, typically on a TCP connection.\n- 'ws': Endpoint will have 'ws' traffic, typically on a TCP connection. It will be automaticaly promoted to 'wss' when the 'secure' field is set to 'true'.\n- 'wss': Endpoint will have 'wss' traffic, typically on a TCP connection.\n- 'tcp': Endpoint will have traffic on a TCP connection, without specifying an application protocol.\n- 'udp': Endpoint will have traffic on an UDP connection, without specifying an application protocol.\n\nDefault value is 'http'", - "type": "string", - "enum": [ - "http", - "https", - "ws", - "wss", - "tcp", - "udp" - ] - }, - "secure": { - "description": "Describes whether the endpoint should be secured and protected by some authentication process. This requires a protocol of 'https' or 'wss'.", - "type": "boolean" - }, - "targetPort": { - "type": "integer" - } - }, - "additionalProperties": false - } - }, - "inlined": { - "description": "Inlined manifest", - "type": "string" - }, - "uri": { - "description": "Location in a file fetched from a uri.", - "type": "string" - } - }, - "additionalProperties": false - }, - "name": { - "description": "Mandatory name that allows referencing the component from other elements (such as commands) or from an external devfile that may reference this component through a parent or a plugin.", - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "openshift": { - "description": "Allows importing into the workspace the OpenShift resources defined in a given manifest. For example this allows reusing the OpenShift definitions used to deploy some runtime components in production.", - "type": "object", - "oneOf": [ - { - "required": [ - "uri" - ] - }, - { - "required": [ - "inlined" - ] - } - ], - "properties": { - "endpoints": { - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant string-based free-form attributes.\n\nExamples of Che-specific attributes:\n- cookiesAuthEnabled: \"true\" / \"false\",\n- type: \"terminal\" / \"ide\" / \"ide-dev\",", - "type": "object", - "additionalProperties": true - }, - "exposure": { - "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main workspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main workspace POD, on a local address.\n\nDefault value is 'public'", - "type": "string", - "enum": [ - "public", - "internal", - "none" - ] - }, - "name": { - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "path": { - "description": "Path of the endpoint URL", - "type": "string" - }, - "protocol": { - "description": "Describes the application and transport protocols of the traffic that will go through this endpoint.\n- 'http': Endpoint will have 'http' traffic, typically on a TCP connection. It will be automaticaly promoted to 'https' when the 'secure' field is set to 'true'.\n- 'https': Endpoint will have 'https' traffic, typically on a TCP connection.\n- 'ws': Endpoint will have 'ws' traffic, typically on a TCP connection. It will be automaticaly promoted to 'wss' when the 'secure' field is set to 'true'.\n- 'wss': Endpoint will have 'wss' traffic, typically on a TCP connection.\n- 'tcp': Endpoint will have traffic on a TCP connection, without specifying an application protocol.\n- 'udp': Endpoint will have traffic on an UDP connection, without specifying an application protocol.\n\nDefault value is 'http'", - "type": "string", - "enum": [ - "http", - "https", - "ws", - "wss", - "tcp", - "udp" - ] - }, - "secure": { - "description": "Describes whether the endpoint should be secured and protected by some authentication process. This requires a protocol of 'https' or 'wss'.", - "type": "boolean" - }, - "targetPort": { - "type": "integer" - } - }, - "additionalProperties": false - } - }, - "inlined": { - "description": "Inlined manifest", - "type": "string" - }, - "uri": { - "description": "Location in a file fetched from a uri.", - "type": "string" - } - }, - "additionalProperties": false - }, - "volume": { - "description": "Allows specifying the definition of a volume shared by several other components", - "type": "object", - "properties": { - "size": { - "description": "Size of the volume", - "type": "string" - } - }, - "additionalProperties": false - } - }, - "additionalProperties": false - } - }, - "id": { - "description": "Id in a registry that contains a Devfile yaml file", - "type": "string" - }, - "kubernetes": { - "description": "Reference to a Kubernetes CRD of type DevWorkspaceTemplate", - "type": "object", - "required": [ - "name" - ], - "properties": { - "name": { - "type": "string" - }, - "namespace": { - "type": "string" - } - }, - "additionalProperties": false - }, - "registryUrl": { - "type": "string" - }, - "uri": { - "description": "Uri of a Devfile yaml file", - "type": "string" - } - }, - "additionalProperties": false - }, - "volume": { - "description": "Allows specifying the definition of a volume shared by several other components", - "type": "object", - "properties": { - "size": { - "description": "Size of the volume", - "type": "string" - } - }, - "additionalProperties": false - } - }, - "additionalProperties": false - } - }, - "events": { - "description": "Bindings of commands to events. Each command is referred-to by its name.", - "type": "object", - "properties": { - "postStart": { - "description": "IDs of commands that should be executed after the workspace is completely started. In the case of Che-Theia, these commands should be executed after all plugins and extensions have started, including project cloning. This means that those commands are not triggered until the user opens the IDE in his browser.", - "type": "array", - "items": { - "type": "string" - } - }, - "postStop": { - "description": "IDs of commands that should be executed after stopping the workspace.", - "type": "array", - "items": { - "type": "string" - } - }, - "preStart": { - "description": "IDs of commands that should be executed before the workspace start. Kubernetes-wise, these commands would typically be executed in init containers of the workspace POD.", - "type": "array", - "items": { - "type": "string" - } - }, - "preStop": { - "description": "IDs of commands that should be executed before stopping the workspace.", - "type": "array", - "items": { - "type": "string" - } - } - }, - "additionalProperties": false - }, - "metadata": { - "description": "Optional metadata", - "type": "object", - "properties": { - "attributes": { - "description": "Map of implementation-dependant free-form YAML attributes.", - "type": "object", - "additionalProperties": true - }, - "description": { - "description": "Optional devfile description", - "type": "string" - }, - "displayName": { - "description": "Optional devfile display name", - "type": "string" - }, - "globalMemoryLimit": { - "description": "Optional devfile global memory limit", - "type": "string" - }, - "icon": { - "description": "Optional devfile icon", - "type": "string" - }, - "name": { - "description": "Optional devfile name", - "type": "string" - }, - "tags": { - "description": "Optional devfile tags", - "type": "array", - "items": { - "type": "string" - } - }, - "version": { - "description": "Optional semver-compatible version", - "type": "string", - "pattern": "^([0-9]+)\\.([0-9]+)\\.([0-9]+)(\\-[0-9a-z-]+(\\.[0-9a-z-]+)*)?(\\+[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?$" - } - }, - "additionalProperties": true - }, - "parent": { - "description": "Parent workspace template", - "type": "object", - "oneOf": [ - { - "required": [ - "uri" - ] - }, - { - "required": [ - "id" - ] - }, - { - "required": [ - "kubernetes" - ] - } - ], - "properties": { - "commands": { - "description": "Overrides of commands encapsulated in a parent devfile or a plugin. Overriding is done according to K8S strategic merge patch standard rules.", - "type": "array", - "items": { - "type": "object", - "required": [ - "id" - ], - "oneOf": [ - { - "required": [ - "exec" - ] - }, - { - "required": [ - "apply" - ] - }, - { - "required": [ - "vscodeTask" - ] - }, - { - "required": [ - "vscodeLaunch" - ] - }, - { - "required": [ - "composite" - ] - } - ], - "properties": { - "apply": { - "description": "Command that consists in applying a given component definition, typically bound to a workspace event.\n\nFor example, when an 'apply' command is bound to a 'preStart' event, and references a 'container' component, it will start the container as a K8S initContainer in the workspace POD, unless the component has its 'dedicatedPod' field set to 'true'.\n\nWhen no 'apply' command exist for a given component, it is assumed the component will be applied at workspace start by default.", - "type": "object", - "properties": { - "component": { - "description": "Describes component that will be applied", - "type": "string" - }, - "group": { - "description": "Defines the group this command is part of", - "type": "object", - "properties": { - "isDefault": { - "description": "Identifies the default command for a given group kind", - "type": "boolean" - }, - "kind": { - "description": "Kind of group the command is part of", - "type": "string", - "enum": [ - "build", - "run", - "test", - "debug" - ] - } - }, - "additionalProperties": false - }, - "label": { - "description": "Optional label that provides a label for this command to be used in Editor UI menus for example", - "type": "string" - } - }, - "additionalProperties": false - }, - "attributes": { - "description": "Map of implementation-dependant free-form YAML attributes.", - "type": "object", - "additionalProperties": true - }, - "composite": { - "description": "Composite command that allows executing several sub-commands either sequentially or concurrently", - "type": "object", - "properties": { - "commands": { - "description": "The commands that comprise this composite command", - "type": "array", - "items": { - "type": "string" - } - }, - "group": { - "description": "Defines the group this command is part of", - "type": "object", - "properties": { - "isDefault": { - "description": "Identifies the default command for a given group kind", - "type": "boolean" - }, - "kind": { - "description": "Kind of group the command is part of", - "type": "string", - "enum": [ - "build", - "run", - "test", - "debug" - ] - } - }, - "additionalProperties": false - }, - "label": { - "description": "Optional label that provides a label for this command to be used in Editor UI menus for example", - "type": "string" - }, - "parallel": { - "description": "Indicates if the sub-commands should be executed concurrently", - "type": "boolean" - } - }, - "additionalProperties": false - }, - "exec": { - "description": "CLI Command executed in an existing component container", - "type": "object", - "properties": { - "commandLine": { - "description": "The actual command-line string\n\nSpecial variables that can be used:\n\n - '$PROJECTS_ROOT': A path where projects sources are mounted as defined by container component's sourceMapping.\n\n - '$PROJECT_SOURCE': A path to a project source ($PROJECTS_ROOT/\u003cproject-name\u003e). If there are multiple projects, this will point to the directory of the first one.", - "type": "string" - }, - "component": { - "description": "Describes component to which given action relates", - "type": "string" - }, - "env": { - "description": "Optional list of environment variables that have to be set before running the command", - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "name": { - "type": "string" - }, - "value": { - "type": "string" - } - }, - "additionalProperties": false - } - }, - "group": { - "description": "Defines the group this command is part of", - "type": "object", - "properties": { - "isDefault": { - "description": "Identifies the default command for a given group kind", - "type": "boolean" - }, - "kind": { - "description": "Kind of group the command is part of", - "type": "string", - "enum": [ - "build", - "run", - "test", - "debug" - ] - } - }, - "additionalProperties": false - }, - "hotReloadCapable": { - "description": "Whether the command is capable to reload itself when source code changes. If set to 'true' the command won't be restarted and it is expected to handle file changes on its own.\n\nDefault value is 'false'", - "type": "boolean" - }, - "label": { - "description": "Optional label that provides a label for this command to be used in Editor UI menus for example", - "type": "string" - }, - "workingDir": { - "description": "Working directory where the command should be executed\n\nSpecial variables that can be used:\n\n - '$PROJECTS_ROOT': A path where projects sources are mounted as defined by container component's sourceMapping.\n\n - '$PROJECT_SOURCE': A path to a project source ($PROJECTS_ROOT/\u003cproject-name\u003e). If there are multiple projects, this will point to the directory of the first one.", - "type": "string" - } - }, - "additionalProperties": false - }, - "id": { - "description": "Mandatory identifier that allows referencing this command in composite commands, from a parent, or in events.", - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "vscodeLaunch": { - "description": "Command providing the definition of a VsCode launch action", - "type": "object", - "oneOf": [ - { - "required": [ - "uri" - ] - }, - { - "required": [ - "inlined" - ] - } - ], - "properties": { - "group": { - "description": "Defines the group this command is part of", - "type": "object", - "properties": { - "isDefault": { - "description": "Identifies the default command for a given group kind", - "type": "boolean" - }, - "kind": { - "description": "Kind of group the command is part of", - "type": "string", - "enum": [ - "build", - "run", - "test", - "debug" - ] - } - }, - "additionalProperties": false - }, - "inlined": { - "description": "Inlined content of the VsCode configuration", - "type": "string" - }, - "uri": { - "description": "Location as an absolute of relative URI the VsCode configuration will be fetched from", - "type": "string" - } - }, - "additionalProperties": false - }, - "vscodeTask": { - "description": "Command providing the definition of a VsCode Task", - "type": "object", - "oneOf": [ - { - "required": [ - "uri" - ] - }, - { - "required": [ - "inlined" - ] - } - ], - "properties": { - "group": { - "description": "Defines the group this command is part of", - "type": "object", - "properties": { - "isDefault": { - "description": "Identifies the default command for a given group kind", - "type": "boolean" - }, - "kind": { - "description": "Kind of group the command is part of", - "type": "string", - "enum": [ - "build", - "run", - "test", - "debug" - ] - } - }, - "additionalProperties": false - }, - "inlined": { - "description": "Inlined content of the VsCode configuration", - "type": "string" - }, - "uri": { - "description": "Location as an absolute of relative URI the VsCode configuration will be fetched from", - "type": "string" - } - }, - "additionalProperties": false - } - }, - "additionalProperties": false - } - }, - "components": { - "description": "Overrides of components encapsulated in a parent devfile or a plugin. Overriding is done according to K8S strategic merge patch standard rules.", - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "oneOf": [ - { - "required": [ - "container" - ] - }, - { - "required": [ - "kubernetes" - ] - }, - { - "required": [ - "openshift" - ] - }, - { - "required": [ - "volume" - ] - }, - { - "required": [ - "plugin" - ] - } - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant free-form YAML attributes.", - "type": "object", - "additionalProperties": true - }, - "container": { - "description": "Allows adding and configuring workspace-related containers", - "type": "object", - "properties": { - "args": { - "description": "The arguments to supply to the command running the dockerimage component. The arguments are supplied either to the default command provided in the image or to the overridden command.\n\nDefaults to an empty array, meaning use whatever is defined in the image.", - "type": "array", - "items": { - "type": "string" - } - }, - "command": { - "description": "The command to run in the dockerimage component instead of the default one provided in the image.\n\nDefaults to an empty array, meaning use whatever is defined in the image.", - "type": "array", - "items": { - "type": "string" - } - }, - "dedicatedPod": { - "description": "Specify if a container should run in its own separated pod, instead of running as part of the main development environment pod.\n\nDefault value is 'false'", - "type": "boolean" - }, - "endpoints": { - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant string-based free-form attributes.\n\nExamples of Che-specific attributes:\n- cookiesAuthEnabled: \"true\" / \"false\",\n- type: \"terminal\" / \"ide\" / \"ide-dev\",", - "type": "object", - "additionalProperties": true - }, - "exposure": { - "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main workspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main workspace POD, on a local address.\n\nDefault value is 'public'", - "type": "string", - "enum": [ - "public", - "internal", - "none" - ] - }, - "name": { - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "path": { - "description": "Path of the endpoint URL", - "type": "string" - }, - "protocol": { - "description": "Describes the application and transport protocols of the traffic that will go through this endpoint.\n- 'http': Endpoint will have 'http' traffic, typically on a TCP connection. It will be automaticaly promoted to 'https' when the 'secure' field is set to 'true'.\n- 'https': Endpoint will have 'https' traffic, typically on a TCP connection.\n- 'ws': Endpoint will have 'ws' traffic, typically on a TCP connection. It will be automaticaly promoted to 'wss' when the 'secure' field is set to 'true'.\n- 'wss': Endpoint will have 'wss' traffic, typically on a TCP connection.\n- 'tcp': Endpoint will have traffic on a TCP connection, without specifying an application protocol.\n- 'udp': Endpoint will have traffic on an UDP connection, without specifying an application protocol.\n\nDefault value is 'http'", - "type": "string", - "enum": [ - "http", - "https", - "ws", - "wss", - "tcp", - "udp" - ] - }, - "secure": { - "description": "Describes whether the endpoint should be secured and protected by some authentication process. This requires a protocol of 'https' or 'wss'.", - "type": "boolean" - }, - "targetPort": { - "type": "integer" - } - }, - "additionalProperties": false - } - }, - "env": { - "description": "Environment variables used in this container.\n\nThe following variables are reserved and cannot be overridden via env:\n\n - '$PROJECTS_ROOT'\n\n - '$PROJECT_SOURCE'", - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "name": { - "type": "string" - }, - "value": { - "type": "string" - } - }, - "additionalProperties": false - } - }, - "image": { - "type": "string" - }, - "memoryLimit": { - "type": "string" - }, - "mountSources": { - "description": "Toggles whether or not the project source code should be mounted in the component.\n\nDefaults to true for all component types except plugins and components that set 'dedicatedPod' to true.", - "type": "boolean" - }, - "sourceMapping": { - "description": "Optional specification of the path in the container where project sources should be transferred/mounted when 'mountSources' is 'true'. When omitted, the default value of /projects is used.", - "type": "string" - }, - "volumeMounts": { - "description": "List of volumes mounts that should be mounted is this container.", - "type": "array", - "items": { - "description": "Volume that should be mounted to a component container", - "type": "object", - "required": [ - "name" - ], - "properties": { - "name": { - "description": "The volume mount name is the name of an existing 'Volume' component. If several containers mount the same volume name then they will reuse the same volume and will be able to access to the same files.", - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "path": { - "description": "The path in the component container where the volume should be mounted. If not path is mentioned, default path is the is '/\u003cname\u003e'.", - "type": "string" - } - }, - "additionalProperties": false - } - } - }, - "additionalProperties": false - }, - "kubernetes": { - "description": "Allows importing into the workspace the Kubernetes resources defined in a given manifest. For example this allows reusing the Kubernetes definitions used to deploy some runtime components in production.", - "type": "object", - "oneOf": [ - { - "required": [ - "uri" - ] - }, - { - "required": [ - "inlined" - ] - } - ], - "properties": { - "endpoints": { - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant string-based free-form attributes.\n\nExamples of Che-specific attributes:\n- cookiesAuthEnabled: \"true\" / \"false\",\n- type: \"terminal\" / \"ide\" / \"ide-dev\",", - "type": "object", - "additionalProperties": true - }, - "exposure": { - "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main workspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main workspace POD, on a local address.\n\nDefault value is 'public'", - "type": "string", - "enum": [ - "public", - "internal", - "none" - ] - }, - "name": { - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "path": { - "description": "Path of the endpoint URL", - "type": "string" - }, - "protocol": { - "description": "Describes the application and transport protocols of the traffic that will go through this endpoint.\n- 'http': Endpoint will have 'http' traffic, typically on a TCP connection. It will be automaticaly promoted to 'https' when the 'secure' field is set to 'true'.\n- 'https': Endpoint will have 'https' traffic, typically on a TCP connection.\n- 'ws': Endpoint will have 'ws' traffic, typically on a TCP connection. It will be automaticaly promoted to 'wss' when the 'secure' field is set to 'true'.\n- 'wss': Endpoint will have 'wss' traffic, typically on a TCP connection.\n- 'tcp': Endpoint will have traffic on a TCP connection, without specifying an application protocol.\n- 'udp': Endpoint will have traffic on an UDP connection, without specifying an application protocol.\n\nDefault value is 'http'", - "type": "string", - "enum": [ - "http", - "https", - "ws", - "wss", - "tcp", - "udp" - ] - }, - "secure": { - "description": "Describes whether the endpoint should be secured and protected by some authentication process. This requires a protocol of 'https' or 'wss'.", - "type": "boolean" - }, - "targetPort": { - "type": "integer" - } - }, - "additionalProperties": false - } - }, - "inlined": { - "description": "Inlined manifest", - "type": "string" - }, - "uri": { - "description": "Location in a file fetched from a uri.", - "type": "string" - } - }, - "additionalProperties": false - }, - "name": { - "description": "Mandatory name that allows referencing the component from other elements (such as commands) or from an external devfile that may reference this component through a parent or a plugin.", - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "openshift": { - "description": "Allows importing into the workspace the OpenShift resources defined in a given manifest. For example this allows reusing the OpenShift definitions used to deploy some runtime components in production.", - "type": "object", - "oneOf": [ - { - "required": [ - "uri" - ] - }, - { - "required": [ - "inlined" - ] - } - ], - "properties": { - "endpoints": { - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant string-based free-form attributes.\n\nExamples of Che-specific attributes:\n- cookiesAuthEnabled: \"true\" / \"false\",\n- type: \"terminal\" / \"ide\" / \"ide-dev\",", - "type": "object", - "additionalProperties": true - }, - "exposure": { - "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main workspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main workspace POD, on a local address.\n\nDefault value is 'public'", - "type": "string", - "enum": [ - "public", - "internal", - "none" - ] - }, - "name": { - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "path": { - "description": "Path of the endpoint URL", - "type": "string" - }, - "protocol": { - "description": "Describes the application and transport protocols of the traffic that will go through this endpoint.\n- 'http': Endpoint will have 'http' traffic, typically on a TCP connection. It will be automaticaly promoted to 'https' when the 'secure' field is set to 'true'.\n- 'https': Endpoint will have 'https' traffic, typically on a TCP connection.\n- 'ws': Endpoint will have 'ws' traffic, typically on a TCP connection. It will be automaticaly promoted to 'wss' when the 'secure' field is set to 'true'.\n- 'wss': Endpoint will have 'wss' traffic, typically on a TCP connection.\n- 'tcp': Endpoint will have traffic on a TCP connection, without specifying an application protocol.\n- 'udp': Endpoint will have traffic on an UDP connection, without specifying an application protocol.\n\nDefault value is 'http'", - "type": "string", - "enum": [ - "http", - "https", - "ws", - "wss", - "tcp", - "udp" - ] - }, - "secure": { - "description": "Describes whether the endpoint should be secured and protected by some authentication process. This requires a protocol of 'https' or 'wss'.", - "type": "boolean" - }, - "targetPort": { - "type": "integer" - } - }, - "additionalProperties": false - } - }, - "inlined": { - "description": "Inlined manifest", - "type": "string" - }, - "uri": { - "description": "Location in a file fetched from a uri.", - "type": "string" - } - }, - "additionalProperties": false - }, - "plugin": { - "description": "Allows importing a plugin.\n\nPlugins are mainly imported devfiles that contribute components, commands and events as a consistent single unit. They are defined in either YAML files following the devfile syntax, or as 'DevWorkspaceTemplate' Kubernetes Custom Resources", - "type": "object", - "oneOf": [ - { - "required": [ - "uri" - ] - }, - { - "required": [ - "id" - ] - }, - { - "required": [ - "kubernetes" - ] - } - ], - "properties": { - "commands": { - "description": "Overrides of commands encapsulated in a parent devfile or a plugin. Overriding is done according to K8S strategic merge patch standard rules.", - "type": "array", - "items": { - "type": "object", - "required": [ - "id" - ], - "oneOf": [ - { - "required": [ - "exec" - ] - }, - { - "required": [ - "apply" - ] - }, - { - "required": [ - "vscodeTask" - ] - }, - { - "required": [ - "vscodeLaunch" - ] - }, - { - "required": [ - "composite" - ] - } - ], - "properties": { - "apply": { - "description": "Command that consists in applying a given component definition, typically bound to a workspace event.\n\nFor example, when an 'apply' command is bound to a 'preStart' event, and references a 'container' component, it will start the container as a K8S initContainer in the workspace POD, unless the component has its 'dedicatedPod' field set to 'true'.\n\nWhen no 'apply' command exist for a given component, it is assumed the component will be applied at workspace start by default.", - "type": "object", - "properties": { - "component": { - "description": "Describes component that will be applied", - "type": "string" - }, - "group": { - "description": "Defines the group this command is part of", - "type": "object", - "properties": { - "isDefault": { - "description": "Identifies the default command for a given group kind", - "type": "boolean" - }, - "kind": { - "description": "Kind of group the command is part of", - "type": "string", - "enum": [ - "build", - "run", - "test", - "debug" - ] - } - }, - "additionalProperties": false - }, - "label": { - "description": "Optional label that provides a label for this command to be used in Editor UI menus for example", - "type": "string" - } - }, - "additionalProperties": false - }, - "attributes": { - "description": "Map of implementation-dependant free-form YAML attributes.", - "type": "object", - "additionalProperties": true - }, - "composite": { - "description": "Composite command that allows executing several sub-commands either sequentially or concurrently", - "type": "object", - "properties": { - "commands": { - "description": "The commands that comprise this composite command", - "type": "array", - "items": { - "type": "string" - } - }, - "group": { - "description": "Defines the group this command is part of", - "type": "object", - "properties": { - "isDefault": { - "description": "Identifies the default command for a given group kind", - "type": "boolean" - }, - "kind": { - "description": "Kind of group the command is part of", - "type": "string", - "enum": [ - "build", - "run", - "test", - "debug" - ] - } - }, - "additionalProperties": false - }, - "label": { - "description": "Optional label that provides a label for this command to be used in Editor UI menus for example", - "type": "string" - }, - "parallel": { - "description": "Indicates if the sub-commands should be executed concurrently", - "type": "boolean" - } - }, - "additionalProperties": false - }, - "exec": { - "description": "CLI Command executed in an existing component container", - "type": "object", - "properties": { - "commandLine": { - "description": "The actual command-line string\n\nSpecial variables that can be used:\n\n - '$PROJECTS_ROOT': A path where projects sources are mounted as defined by container component's sourceMapping.\n\n - '$PROJECT_SOURCE': A path to a project source ($PROJECTS_ROOT/\u003cproject-name\u003e). If there are multiple projects, this will point to the directory of the first one.", - "type": "string" - }, - "component": { - "description": "Describes component to which given action relates", - "type": "string" - }, - "env": { - "description": "Optional list of environment variables that have to be set before running the command", - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "name": { - "type": "string" - }, - "value": { - "type": "string" - } - }, - "additionalProperties": false - } - }, - "group": { - "description": "Defines the group this command is part of", - "type": "object", - "properties": { - "isDefault": { - "description": "Identifies the default command for a given group kind", - "type": "boolean" - }, - "kind": { - "description": "Kind of group the command is part of", - "type": "string", - "enum": [ - "build", - "run", - "test", - "debug" - ] - } - }, - "additionalProperties": false - }, - "hotReloadCapable": { - "description": "Whether the command is capable to reload itself when source code changes. If set to 'true' the command won't be restarted and it is expected to handle file changes on its own.\n\nDefault value is 'false'", - "type": "boolean" - }, - "label": { - "description": "Optional label that provides a label for this command to be used in Editor UI menus for example", - "type": "string" - }, - "workingDir": { - "description": "Working directory where the command should be executed\n\nSpecial variables that can be used:\n\n - '$PROJECTS_ROOT': A path where projects sources are mounted as defined by container component's sourceMapping.\n\n - '$PROJECT_SOURCE': A path to a project source ($PROJECTS_ROOT/\u003cproject-name\u003e). If there are multiple projects, this will point to the directory of the first one.", - "type": "string" - } - }, - "additionalProperties": false - }, - "id": { - "description": "Mandatory identifier that allows referencing this command in composite commands, from a parent, or in events.", - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "vscodeLaunch": { - "description": "Command providing the definition of a VsCode launch action", - "type": "object", - "oneOf": [ - { - "required": [ - "uri" - ] - }, - { - "required": [ - "inlined" - ] - } - ], - "properties": { - "group": { - "description": "Defines the group this command is part of", - "type": "object", - "properties": { - "isDefault": { - "description": "Identifies the default command for a given group kind", - "type": "boolean" - }, - "kind": { - "description": "Kind of group the command is part of", - "type": "string", - "enum": [ - "build", - "run", - "test", - "debug" - ] - } - }, - "additionalProperties": false - }, - "inlined": { - "description": "Inlined content of the VsCode configuration", - "type": "string" - }, - "uri": { - "description": "Location as an absolute of relative URI the VsCode configuration will be fetched from", - "type": "string" - } - }, - "additionalProperties": false - }, - "vscodeTask": { - "description": "Command providing the definition of a VsCode Task", - "type": "object", - "oneOf": [ - { - "required": [ - "uri" - ] - }, - { - "required": [ - "inlined" - ] - } - ], - "properties": { - "group": { - "description": "Defines the group this command is part of", - "type": "object", - "properties": { - "isDefault": { - "description": "Identifies the default command for a given group kind", - "type": "boolean" - }, - "kind": { - "description": "Kind of group the command is part of", - "type": "string", - "enum": [ - "build", - "run", - "test", - "debug" - ] - } - }, - "additionalProperties": false - }, - "inlined": { - "description": "Inlined content of the VsCode configuration", - "type": "string" - }, - "uri": { - "description": "Location as an absolute of relative URI the VsCode configuration will be fetched from", - "type": "string" - } - }, - "additionalProperties": false - } - }, - "additionalProperties": false - } - }, - "components": { - "description": "Overrides of components encapsulated in a parent devfile or a plugin. Overriding is done according to K8S strategic merge patch standard rules.", - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "oneOf": [ - { - "required": [ - "container" - ] - }, - { - "required": [ - "kubernetes" - ] - }, - { - "required": [ - "openshift" - ] - }, - { - "required": [ - "volume" - ] - } - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant free-form YAML attributes.", - "type": "object", - "additionalProperties": true - }, - "container": { - "description": "Allows adding and configuring workspace-related containers", - "type": "object", - "properties": { - "args": { - "description": "The arguments to supply to the command running the dockerimage component. The arguments are supplied either to the default command provided in the image or to the overridden command.\n\nDefaults to an empty array, meaning use whatever is defined in the image.", - "type": "array", - "items": { - "type": "string" - } - }, - "command": { - "description": "The command to run in the dockerimage component instead of the default one provided in the image.\n\nDefaults to an empty array, meaning use whatever is defined in the image.", - "type": "array", - "items": { - "type": "string" - } - }, - "dedicatedPod": { - "description": "Specify if a container should run in its own separated pod, instead of running as part of the main development environment pod.\n\nDefault value is 'false'", - "type": "boolean" - }, - "endpoints": { - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant string-based free-form attributes.\n\nExamples of Che-specific attributes:\n- cookiesAuthEnabled: \"true\" / \"false\",\n- type: \"terminal\" / \"ide\" / \"ide-dev\",", - "type": "object", - "additionalProperties": true - }, - "exposure": { - "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main workspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main workspace POD, on a local address.\n\nDefault value is 'public'", - "type": "string", - "enum": [ - "public", - "internal", - "none" - ] - }, - "name": { - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "path": { - "description": "Path of the endpoint URL", - "type": "string" - }, - "protocol": { - "description": "Describes the application and transport protocols of the traffic that will go through this endpoint.\n- 'http': Endpoint will have 'http' traffic, typically on a TCP connection. It will be automaticaly promoted to 'https' when the 'secure' field is set to 'true'.\n- 'https': Endpoint will have 'https' traffic, typically on a TCP connection.\n- 'ws': Endpoint will have 'ws' traffic, typically on a TCP connection. It will be automaticaly promoted to 'wss' when the 'secure' field is set to 'true'.\n- 'wss': Endpoint will have 'wss' traffic, typically on a TCP connection.\n- 'tcp': Endpoint will have traffic on a TCP connection, without specifying an application protocol.\n- 'udp': Endpoint will have traffic on an UDP connection, without specifying an application protocol.\n\nDefault value is 'http'", - "type": "string", - "enum": [ - "http", - "https", - "ws", - "wss", - "tcp", - "udp" - ] - }, - "secure": { - "description": "Describes whether the endpoint should be secured and protected by some authentication process. This requires a protocol of 'https' or 'wss'.", - "type": "boolean" - }, - "targetPort": { - "type": "integer" - } - }, - "additionalProperties": false - } - }, - "env": { - "description": "Environment variables used in this container.\n\nThe following variables are reserved and cannot be overridden via env:\n\n - '$PROJECTS_ROOT'\n\n - '$PROJECT_SOURCE'", - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "name": { - "type": "string" - }, - "value": { - "type": "string" - } - }, - "additionalProperties": false - } - }, - "image": { - "type": "string" - }, - "memoryLimit": { - "type": "string" - }, - "mountSources": { - "description": "Toggles whether or not the project source code should be mounted in the component.\n\nDefaults to true for all component types except plugins and components that set 'dedicatedPod' to true.", - "type": "boolean" - }, - "sourceMapping": { - "description": "Optional specification of the path in the container where project sources should be transferred/mounted when 'mountSources' is 'true'. When omitted, the default value of /projects is used.", - "type": "string" - }, - "volumeMounts": { - "description": "List of volumes mounts that should be mounted is this container.", - "type": "array", - "items": { - "description": "Volume that should be mounted to a component container", - "type": "object", - "required": [ - "name" - ], - "properties": { - "name": { - "description": "The volume mount name is the name of an existing 'Volume' component. If several containers mount the same volume name then they will reuse the same volume and will be able to access to the same files.", - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "path": { - "description": "The path in the component container where the volume should be mounted. If not path is mentioned, default path is the is '/\u003cname\u003e'.", - "type": "string" - } - }, - "additionalProperties": false - } - } - }, - "additionalProperties": false - }, - "kubernetes": { - "description": "Allows importing into the workspace the Kubernetes resources defined in a given manifest. For example this allows reusing the Kubernetes definitions used to deploy some runtime components in production.", - "type": "object", - "oneOf": [ - { - "required": [ - "uri" - ] - }, - { - "required": [ - "inlined" - ] - } - ], - "properties": { - "endpoints": { - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant string-based free-form attributes.\n\nExamples of Che-specific attributes:\n- cookiesAuthEnabled: \"true\" / \"false\",\n- type: \"terminal\" / \"ide\" / \"ide-dev\",", - "type": "object", - "additionalProperties": true - }, - "exposure": { - "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main workspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main workspace POD, on a local address.\n\nDefault value is 'public'", - "type": "string", - "enum": [ - "public", - "internal", - "none" - ] - }, - "name": { - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "path": { - "description": "Path of the endpoint URL", - "type": "string" - }, - "protocol": { - "description": "Describes the application and transport protocols of the traffic that will go through this endpoint.\n- 'http': Endpoint will have 'http' traffic, typically on a TCP connection. It will be automaticaly promoted to 'https' when the 'secure' field is set to 'true'.\n- 'https': Endpoint will have 'https' traffic, typically on a TCP connection.\n- 'ws': Endpoint will have 'ws' traffic, typically on a TCP connection. It will be automaticaly promoted to 'wss' when the 'secure' field is set to 'true'.\n- 'wss': Endpoint will have 'wss' traffic, typically on a TCP connection.\n- 'tcp': Endpoint will have traffic on a TCP connection, without specifying an application protocol.\n- 'udp': Endpoint will have traffic on an UDP connection, without specifying an application protocol.\n\nDefault value is 'http'", - "type": "string", - "enum": [ - "http", - "https", - "ws", - "wss", - "tcp", - "udp" - ] - }, - "secure": { - "description": "Describes whether the endpoint should be secured and protected by some authentication process. This requires a protocol of 'https' or 'wss'.", - "type": "boolean" - }, - "targetPort": { - "type": "integer" - } - }, - "additionalProperties": false - } - }, - "inlined": { - "description": "Inlined manifest", - "type": "string" - }, - "uri": { - "description": "Location in a file fetched from a uri.", - "type": "string" - } - }, - "additionalProperties": false - }, - "name": { - "description": "Mandatory name that allows referencing the component from other elements (such as commands) or from an external devfile that may reference this component through a parent or a plugin.", - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "openshift": { - "description": "Allows importing into the workspace the OpenShift resources defined in a given manifest. For example this allows reusing the OpenShift definitions used to deploy some runtime components in production.", - "type": "object", - "oneOf": [ - { - "required": [ - "uri" - ] - }, - { - "required": [ - "inlined" - ] - } - ], - "properties": { - "endpoints": { - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant string-based free-form attributes.\n\nExamples of Che-specific attributes:\n- cookiesAuthEnabled: \"true\" / \"false\",\n- type: \"terminal\" / \"ide\" / \"ide-dev\",", - "type": "object", - "additionalProperties": true - }, - "exposure": { - "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main workspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main workspace POD, on a local address.\n\nDefault value is 'public'", - "type": "string", - "enum": [ - "public", - "internal", - "none" - ] - }, - "name": { - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "path": { - "description": "Path of the endpoint URL", - "type": "string" - }, - "protocol": { - "description": "Describes the application and transport protocols of the traffic that will go through this endpoint.\n- 'http': Endpoint will have 'http' traffic, typically on a TCP connection. It will be automaticaly promoted to 'https' when the 'secure' field is set to 'true'.\n- 'https': Endpoint will have 'https' traffic, typically on a TCP connection.\n- 'ws': Endpoint will have 'ws' traffic, typically on a TCP connection. It will be automaticaly promoted to 'wss' when the 'secure' field is set to 'true'.\n- 'wss': Endpoint will have 'wss' traffic, typically on a TCP connection.\n- 'tcp': Endpoint will have traffic on a TCP connection, without specifying an application protocol.\n- 'udp': Endpoint will have traffic on an UDP connection, without specifying an application protocol.\n\nDefault value is 'http'", - "type": "string", - "enum": [ - "http", - "https", - "ws", - "wss", - "tcp", - "udp" - ] - }, - "secure": { - "description": "Describes whether the endpoint should be secured and protected by some authentication process. This requires a protocol of 'https' or 'wss'.", - "type": "boolean" - }, - "targetPort": { - "type": "integer" - } - }, - "additionalProperties": false - } - }, - "inlined": { - "description": "Inlined manifest", - "type": "string" - }, - "uri": { - "description": "Location in a file fetched from a uri.", - "type": "string" - } - }, - "additionalProperties": false - }, - "volume": { - "description": "Allows specifying the definition of a volume shared by several other components", - "type": "object", - "properties": { - "size": { - "description": "Size of the volume", - "type": "string" - } - }, - "additionalProperties": false - } - }, - "additionalProperties": false - } - }, - "id": { - "description": "Id in a registry that contains a Devfile yaml file", - "type": "string" - }, - "kubernetes": { - "description": "Reference to a Kubernetes CRD of type DevWorkspaceTemplate", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "namespace": { - "type": "string" - } - }, - "additionalProperties": false - }, - "registryUrl": { - "type": "string" - }, - "uri": { - "description": "Uri of a Devfile yaml file", - "type": "string" - } - }, - "additionalProperties": false - }, - "volume": { - "description": "Allows specifying the definition of a volume shared by several other components", - "type": "object", - "properties": { - "size": { - "description": "Size of the volume", - "type": "string" - } - }, - "additionalProperties": false - } - }, - "additionalProperties": false - } - }, - "id": { - "description": "Id in a registry that contains a Devfile yaml file", - "type": "string" - }, - "kubernetes": { - "description": "Reference to a Kubernetes CRD of type DevWorkspaceTemplate", - "type": "object", - "required": [ - "name" - ], - "properties": { - "name": { - "type": "string" - }, - "namespace": { - "type": "string" - } - }, - "additionalProperties": false - }, - "projects": { - "description": "Overrides of projects encapsulated in a parent devfile. Overriding is done according to K8S strategic merge patch standard rules.", - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "oneOf": [ - { - "required": [ - "git" - ] - }, - { - "required": [ - "github" - ] - }, - { - "required": [ - "zip" - ] - } - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant free-form YAML attributes.", - "type": "object", - "additionalProperties": true - }, - "clonePath": { - "description": "Path relative to the root of the projects to which this project should be cloned into. This is a unix-style relative path (i.e. uses forward slashes). The path is invalid if it is absolute or tries to escape the project root through the usage of '..'. If not specified, defaults to the project name.", - "type": "string" - }, - "git": { - "description": "Project's Git source", - "type": "object", - "properties": { - "checkoutFrom": { - "description": "Defines from what the project should be checked out. Required if there are more than one remote configured", - "type": "object", - "properties": { - "remote": { - "description": "The remote name should be used as init. Required if there are more than one remote configured", - "type": "string" - }, - "revision": { - "description": "The revision to checkout from. Should be branch name, tag or commit id. Default branch is used if missing or specified revision is not found.", - "type": "string" - } - }, - "additionalProperties": false - }, - "remotes": { - "description": "The remotes map which should be initialized in the git project. Must have at least one remote configured", - "type": "object", - "additionalProperties": { - "type": "string" - } - } - }, - "additionalProperties": false - }, - "github": { - "description": "Project's GitHub source", - "type": "object", - "properties": { - "checkoutFrom": { - "description": "Defines from what the project should be checked out. Required if there are more than one remote configured", - "type": "object", - "properties": { - "remote": { - "description": "The remote name should be used as init. Required if there are more than one remote configured", - "type": "string" - }, - "revision": { - "description": "The revision to checkout from. Should be branch name, tag or commit id. Default branch is used if missing or specified revision is not found.", - "type": "string" - } - }, - "additionalProperties": false - }, - "remotes": { - "description": "The remotes map which should be initialized in the git project. Must have at least one remote configured", - "type": "object", - "additionalProperties": { - "type": "string" - } - } - }, - "additionalProperties": false - }, - "name": { - "description": "Project name", - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "sparseCheckoutDirs": { - "description": "Populate the project sparsely with selected directories.", - "type": "array", - "items": { - "type": "string" - } - }, - "zip": { - "description": "Project's Zip source", - "type": "object", - "properties": { - "location": { - "description": "Zip project's source location address. Should be file path of the archive, e.g. file://$FILE_PATH", - "type": "string" - } - }, - "additionalProperties": false - } - }, - "additionalProperties": false - } - }, - "registryUrl": { - "type": "string" - }, - "starterProjects": { - "description": "Overrides of starterProjects encapsulated in a parent devfile. Overriding is done according to K8S strategic merge patch standard rules.", - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "oneOf": [ - { - "required": [ - "git" - ] - }, - { - "required": [ - "github" - ] - }, - { - "required": [ - "zip" - ] - } - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant free-form YAML attributes.", - "type": "object", - "additionalProperties": true - }, - "description": { - "description": "Description of a starter project", - "type": "string" - }, - "git": { - "description": "Project's Git source", - "type": "object", - "properties": { - "checkoutFrom": { - "description": "Defines from what the project should be checked out. Required if there are more than one remote configured", - "type": "object", - "properties": { - "remote": { - "description": "The remote name should be used as init. Required if there are more than one remote configured", - "type": "string" - }, - "revision": { - "description": "The revision to checkout from. Should be branch name, tag or commit id. Default branch is used if missing or specified revision is not found.", - "type": "string" - } - }, - "additionalProperties": false - }, - "remotes": { - "description": "The remotes map which should be initialized in the git project. Must have at least one remote configured", - "type": "object", - "additionalProperties": { - "type": "string" - } - } - }, - "additionalProperties": false - }, - "github": { - "description": "Project's GitHub source", - "type": "object", - "properties": { - "checkoutFrom": { - "description": "Defines from what the project should be checked out. Required if there are more than one remote configured", - "type": "object", - "properties": { - "remote": { - "description": "The remote name should be used as init. Required if there are more than one remote configured", - "type": "string" - }, - "revision": { - "description": "The revision to checkout from. Should be branch name, tag or commit id. Default branch is used if missing or specified revision is not found.", - "type": "string" - } - }, - "additionalProperties": false - }, - "remotes": { - "description": "The remotes map which should be initialized in the git project. Must have at least one remote configured", - "type": "object", - "additionalProperties": { - "type": "string" - } - } - }, - "additionalProperties": false - }, - "name": { - "description": "Project name", - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "subDir": { - "description": "Sub-directory from a starter project to be used as root for starter project.", - "type": "string" - }, - "zip": { - "description": "Project's Zip source", - "type": "object", - "properties": { - "location": { - "description": "Zip project's source location address. Should be file path of the archive, e.g. file://$FILE_PATH", - "type": "string" - } - }, - "additionalProperties": false - } - }, - "additionalProperties": false - } - }, - "uri": { - "description": "Uri of a Devfile yaml file", - "type": "string" - } - }, - "additionalProperties": false - }, - "projects": { - "description": "Projects worked on in the workspace, containing names and sources locations", - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "oneOf": [ - { - "required": [ - "git" - ] - }, - { - "required": [ - "github" - ] - }, - { - "required": [ - "zip" - ] - } - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant free-form YAML attributes.", - "type": "object", - "additionalProperties": true - }, - "clonePath": { - "description": "Path relative to the root of the projects to which this project should be cloned into. This is a unix-style relative path (i.e. uses forward slashes). The path is invalid if it is absolute or tries to escape the project root through the usage of '..'. If not specified, defaults to the project name.", - "type": "string" - }, - "git": { - "description": "Project's Git source", - "type": "object", - "required": [ - "remotes" - ], - "properties": { - "checkoutFrom": { - "description": "Defines from what the project should be checked out. Required if there are more than one remote configured", - "type": "object", - "properties": { - "remote": { - "description": "The remote name should be used as init. Required if there are more than one remote configured", - "type": "string" - }, - "revision": { - "description": "The revision to checkout from. Should be branch name, tag or commit id. Default branch is used if missing or specified revision is not found.", - "type": "string" - } - }, - "additionalProperties": false - }, - "remotes": { - "description": "The remotes map which should be initialized in the git project. Must have at least one remote configured", - "type": "object", - "additionalProperties": { - "type": "string" - } - } - }, - "additionalProperties": false - }, - "github": { - "description": "Project's GitHub source", - "type": "object", - "required": [ - "remotes" - ], - "properties": { - "checkoutFrom": { - "description": "Defines from what the project should be checked out. Required if there are more than one remote configured", - "type": "object", - "properties": { - "remote": { - "description": "The remote name should be used as init. Required if there are more than one remote configured", - "type": "string" - }, - "revision": { - "description": "The revision to checkout from. Should be branch name, tag or commit id. Default branch is used if missing or specified revision is not found.", - "type": "string" - } - }, - "additionalProperties": false - }, - "remotes": { - "description": "The remotes map which should be initialized in the git project. Must have at least one remote configured", - "type": "object", - "additionalProperties": { - "type": "string" - } - } - }, - "additionalProperties": false - }, - "name": { - "description": "Project name", - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "sparseCheckoutDirs": { - "description": "Populate the project sparsely with selected directories.", - "type": "array", - "items": { - "type": "string" - } - }, - "zip": { - "description": "Project's Zip source", - "type": "object", - "properties": { - "location": { - "description": "Zip project's source location address. Should be file path of the archive, e.g. file://$FILE_PATH", - "type": "string" - } - }, - "additionalProperties": false - } - }, - "additionalProperties": false - } - }, - "schemaVersion": { - "description": "Devfile schema version", - "type": "string", - "pattern": "^([2-9])\\.([0-9]+)\\.([0-9]+)(\\-[0-9a-z-]+(\\.[0-9a-z-]+)*)?(\\+[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?$" - }, - "starterProjects": { - "description": "StarterProjects is a project that can be used as a starting point when bootstrapping new projects", - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "oneOf": [ - { - "required": [ - "git" - ] - }, - { - "required": [ - "github" - ] - }, - { - "required": [ - "zip" - ] - } - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant free-form YAML attributes.", - "type": "object", - "additionalProperties": true - }, - "description": { - "description": "Description of a starter project", - "type": "string" - }, - "git": { - "description": "Project's Git source", - "type": "object", - "required": [ - "remotes" - ], - "properties": { - "checkoutFrom": { - "description": "Defines from what the project should be checked out. Required if there are more than one remote configured", - "type": "object", - "properties": { - "remote": { - "description": "The remote name should be used as init. Required if there are more than one remote configured", - "type": "string" - }, - "revision": { - "description": "The revision to checkout from. Should be branch name, tag or commit id. Default branch is used if missing or specified revision is not found.", - "type": "string" - } - }, - "additionalProperties": false - }, - "remotes": { - "description": "The remotes map which should be initialized in the git project. Must have at least one remote configured", - "type": "object", - "additionalProperties": { - "type": "string" - } - } - }, - "additionalProperties": false - }, - "github": { - "description": "Project's GitHub source", - "type": "object", - "required": [ - "remotes" - ], - "properties": { - "checkoutFrom": { - "description": "Defines from what the project should be checked out. Required if there are more than one remote configured", - "type": "object", - "properties": { - "remote": { - "description": "The remote name should be used as init. Required if there are more than one remote configured", - "type": "string" - }, - "revision": { - "description": "The revision to checkout from. Should be branch name, tag or commit id. Default branch is used if missing or specified revision is not found.", - "type": "string" - } - }, - "additionalProperties": false - }, - "remotes": { - "description": "The remotes map which should be initialized in the git project. Must have at least one remote configured", - "type": "object", - "additionalProperties": { - "type": "string" - } - } - }, - "additionalProperties": false - }, - "name": { - "description": "Project name", - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "subDir": { - "description": "Sub-directory from a starter project to be used as root for starter project.", - "type": "string" - }, - "zip": { - "description": "Project's Zip source", - "type": "object", - "properties": { - "location": { - "description": "Zip project's source location address. Should be file path of the archive, e.g. file://$FILE_PATH", - "type": "string" - } - }, - "additionalProperties": false - } - }, - "additionalProperties": false - } - } - }, - "additionalProperties": false -} -` diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/2.1.0/devfileJsonSchema210.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/2.1.0/devfileJsonSchema210.go deleted file mode 100644 index c92b7a75584..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/2.1.0/devfileJsonSchema210.go +++ /dev/null @@ -1,1677 +0,0 @@ -// -// Copyright 2022 Red Hat, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package version210 - -// https://raw.githubusercontent.com/devfile/api/2.1.x/schemas/latest/devfile.json -const JsonSchema210 = `{ - "description": "Devfile describes the structure of a cloud-native devworkspace and development environment.", - "type": "object", - "title": "Devfile schema - Version 2.1.0", - "required": [ - "schemaVersion" - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant free-form YAML attributes.", - "type": "object", - "additionalProperties": true - }, - "commands": { - "description": "Predefined, ready-to-use, devworkspace-related commands", - "type": "array", - "items": { - "type": "object", - "required": [ - "id" - ], - "oneOf": [ - { - "required": [ - "exec" - ] - }, - { - "required": [ - "apply" - ] - }, - { - "required": [ - "composite" - ] - } - ], - "properties": { - "apply": { - "description": "Command that consists in applying a given component definition, typically bound to a devworkspace event.\n\nFor example, when an 'apply' command is bound to a 'preStart' event, and references a 'container' component, it will start the container as a K8S initContainer in the devworkspace POD, unless the component has its 'dedicatedPod' field set to 'true'.\n\nWhen no 'apply' command exist for a given component, it is assumed the component will be applied at devworkspace start by default.", - "type": "object", - "required": [ - "component" - ], - "properties": { - "component": { - "description": "Describes component that will be applied", - "type": "string" - }, - "group": { - "description": "Defines the group this command is part of", - "type": "object", - "required": [ - "kind" - ], - "properties": { - "isDefault": { - "description": "Identifies the default command for a given group kind", - "type": "boolean" - }, - "kind": { - "description": "Kind of group the command is part of", - "type": "string", - "enum": [ - "build", - "run", - "test", - "debug" - ] - } - }, - "additionalProperties": false - }, - "label": { - "description": "Optional label that provides a label for this command to be used in Editor UI menus for example", - "type": "string" - } - }, - "additionalProperties": false - }, - "attributes": { - "description": "Map of implementation-dependant free-form YAML attributes.", - "type": "object", - "additionalProperties": true - }, - "composite": { - "description": "Composite command that allows executing several sub-commands either sequentially or concurrently", - "type": "object", - "properties": { - "commands": { - "description": "The commands that comprise this composite command", - "type": "array", - "items": { - "type": "string" - } - }, - "group": { - "description": "Defines the group this command is part of", - "type": "object", - "required": [ - "kind" - ], - "properties": { - "isDefault": { - "description": "Identifies the default command for a given group kind", - "type": "boolean" - }, - "kind": { - "description": "Kind of group the command is part of", - "type": "string", - "enum": [ - "build", - "run", - "test", - "debug" - ] - } - }, - "additionalProperties": false - }, - "label": { - "description": "Optional label that provides a label for this command to be used in Editor UI menus for example", - "type": "string" - }, - "parallel": { - "description": "Indicates if the sub-commands should be executed concurrently", - "type": "boolean" - } - }, - "additionalProperties": false - }, - "exec": { - "description": "CLI Command executed in an existing component container", - "type": "object", - "required": [ - "commandLine", - "component" - ], - "properties": { - "commandLine": { - "description": "The actual command-line string\n\nSpecial variables that can be used:\n\n - '$PROJECTS_ROOT': A path where projects sources are mounted as defined by container component's sourceMapping.\n\n - '$PROJECT_SOURCE': A path to a project source ($PROJECTS_ROOT/\u003cproject-name\u003e). If there are multiple projects, this will point to the directory of the first one.", - "type": "string" - }, - "component": { - "description": "Describes component to which given action relates", - "type": "string" - }, - "env": { - "description": "Optional list of environment variables that have to be set before running the command", - "type": "array", - "items": { - "type": "object", - "required": [ - "name", - "value" - ], - "properties": { - "name": { - "type": "string" - }, - "value": { - "type": "string" - } - }, - "additionalProperties": false - } - }, - "group": { - "description": "Defines the group this command is part of", - "type": "object", - "required": [ - "kind" - ], - "properties": { - "isDefault": { - "description": "Identifies the default command for a given group kind", - "type": "boolean" - }, - "kind": { - "description": "Kind of group the command is part of", - "type": "string", - "enum": [ - "build", - "run", - "test", - "debug" - ] - } - }, - "additionalProperties": false - }, - "hotReloadCapable": { - "description": "Whether the command is capable to reload itself when source code changes. If set to 'true' the command won't be restarted and it is expected to handle file changes on its own.\n\nDefault value is 'false'", - "type": "boolean" - }, - "label": { - "description": "Optional label that provides a label for this command to be used in Editor UI menus for example", - "type": "string" - }, - "workingDir": { - "description": "Working directory where the command should be executed\n\nSpecial variables that can be used:\n\n - '$PROJECTS_ROOT': A path where projects sources are mounted as defined by container component's sourceMapping.\n\n - '$PROJECT_SOURCE': A path to a project source ($PROJECTS_ROOT/\u003cproject-name\u003e). If there are multiple projects, this will point to the directory of the first one.", - "type": "string" - } - }, - "additionalProperties": false - }, - "id": { - "description": "Mandatory identifier that allows referencing this command in composite commands, from a parent, or in events.", - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - } - }, - "additionalProperties": false - } - }, - "components": { - "description": "List of the devworkspace components, such as editor and plugins, user-provided containers, or other types of components", - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "oneOf": [ - { - "required": [ - "container" - ] - }, - { - "required": [ - "kubernetes" - ] - }, - { - "required": [ - "openshift" - ] - }, - { - "required": [ - "volume" - ] - } - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant free-form YAML attributes.", - "type": "object", - "additionalProperties": true - }, - "container": { - "description": "Allows adding and configuring devworkspace-related containers", - "type": "object", - "required": [ - "image" - ], - "properties": { - "args": { - "description": "The arguments to supply to the command running the dockerimage component. The arguments are supplied either to the default command provided in the image or to the overridden command.\n\nDefaults to an empty array, meaning use whatever is defined in the image.", - "type": "array", - "items": { - "type": "string" - } - }, - "command": { - "description": "The command to run in the dockerimage component instead of the default one provided in the image.\n\nDefaults to an empty array, meaning use whatever is defined in the image.", - "type": "array", - "items": { - "type": "string" - } - }, - "cpuLimit": { - "type": "string" - }, - "cpuRequest": { - "type": "string" - }, - "dedicatedPod": { - "description": "Specify if a container should run in its own separated pod, instead of running as part of the main development environment pod.\n\nDefault value is 'false'", - "type": "boolean" - }, - "endpoints": { - "type": "array", - "items": { - "type": "object", - "required": [ - "name", - "targetPort" - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant string-based free-form attributes.\n\nExamples of Che-specific attributes:\n- cookiesAuthEnabled: \"true\" / \"false\",\n- type: \"terminal\" / \"ide\" / \"ide-dev\",", - "type": "object", - "additionalProperties": true - }, - "exposure": { - "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main devworkspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main devworkspace POD, on a local address.\n\nDefault value is 'public'", - "type": "string", - "default": "public", - "enum": [ - "public", - "internal", - "none" - ] - }, - "name": { - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "path": { - "description": "Path of the endpoint URL", - "type": "string" - }, - "protocol": { - "description": "Describes the application and transport protocols of the traffic that will go through this endpoint.\n- 'http': Endpoint will have 'http' traffic, typically on a TCP connection. It will be automaticaly promoted to 'https' when the 'secure' field is set to 'true'.\n- 'https': Endpoint will have 'https' traffic, typically on a TCP connection.\n- 'ws': Endpoint will have 'ws' traffic, typically on a TCP connection. It will be automaticaly promoted to 'wss' when the 'secure' field is set to 'true'.\n- 'wss': Endpoint will have 'wss' traffic, typically on a TCP connection.\n- 'tcp': Endpoint will have traffic on a TCP connection, without specifying an application protocol.\n- 'udp': Endpoint will have traffic on an UDP connection, without specifying an application protocol.\n\nDefault value is 'http'", - "type": "string", - "default": "http", - "enum": [ - "http", - "https", - "ws", - "wss", - "tcp", - "udp" - ] - }, - "secure": { - "description": "Describes whether the endpoint should be secured and protected by some authentication process. This requires a protocol of 'https' or 'wss'.", - "type": "boolean" - }, - "targetPort": { - "type": "integer" - } - }, - "additionalProperties": false - } - }, - "env": { - "description": "Environment variables used in this container.\n\nThe following variables are reserved and cannot be overridden via env:\n\n - '$PROJECTS_ROOT'\n\n - '$PROJECT_SOURCE'", - "type": "array", - "items": { - "type": "object", - "required": [ - "name", - "value" - ], - "properties": { - "name": { - "type": "string" - }, - "value": { - "type": "string" - } - }, - "additionalProperties": false - } - }, - "image": { - "type": "string" - }, - "memoryLimit": { - "type": "string" - }, - "memoryRequest": { - "type": "string" - }, - "mountSources": { - "description": "Toggles whether or not the project source code should be mounted in the component.\n\nDefaults to true for all component types except plugins and components that set 'dedicatedPod' to true.", - "type": "boolean" - }, - "sourceMapping": { - "description": "Optional specification of the path in the container where project sources should be transferred/mounted when 'mountSources' is 'true'. When omitted, the default value of /projects is used.", - "type": "string", - "default": "/projects" - }, - "volumeMounts": { - "description": "List of volumes mounts that should be mounted is this container.", - "type": "array", - "items": { - "description": "Volume that should be mounted to a component container", - "type": "object", - "required": [ - "name" - ], - "properties": { - "name": { - "description": "The volume mount name is the name of an existing 'Volume' component. If several containers mount the same volume name then they will reuse the same volume and will be able to access to the same files.", - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "path": { - "description": "The path in the component container where the volume should be mounted. If not path is mentioned, default path is the is '/\u003cname\u003e'.", - "type": "string" - } - }, - "additionalProperties": false - } - } - }, - "additionalProperties": false - }, - "kubernetes": { - "description": "Allows importing into the devworkspace the Kubernetes resources defined in a given manifest. For example this allows reusing the Kubernetes definitions used to deploy some runtime components in production.", - "type": "object", - "oneOf": [ - { - "required": [ - "uri" - ] - }, - { - "required": [ - "inlined" - ] - } - ], - "properties": { - "endpoints": { - "type": "array", - "items": { - "type": "object", - "required": [ - "name", - "targetPort" - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant string-based free-form attributes.\n\nExamples of Che-specific attributes:\n- cookiesAuthEnabled: \"true\" / \"false\",\n- type: \"terminal\" / \"ide\" / \"ide-dev\",", - "type": "object", - "additionalProperties": true - }, - "exposure": { - "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main devworkspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main devworkspace POD, on a local address.\n\nDefault value is 'public'", - "type": "string", - "default": "public", - "enum": [ - "public", - "internal", - "none" - ] - }, - "name": { - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "path": { - "description": "Path of the endpoint URL", - "type": "string" - }, - "protocol": { - "description": "Describes the application and transport protocols of the traffic that will go through this endpoint.\n- 'http': Endpoint will have 'http' traffic, typically on a TCP connection. It will be automaticaly promoted to 'https' when the 'secure' field is set to 'true'.\n- 'https': Endpoint will have 'https' traffic, typically on a TCP connection.\n- 'ws': Endpoint will have 'ws' traffic, typically on a TCP connection. It will be automaticaly promoted to 'wss' when the 'secure' field is set to 'true'.\n- 'wss': Endpoint will have 'wss' traffic, typically on a TCP connection.\n- 'tcp': Endpoint will have traffic on a TCP connection, without specifying an application protocol.\n- 'udp': Endpoint will have traffic on an UDP connection, without specifying an application protocol.\n\nDefault value is 'http'", - "type": "string", - "default": "http", - "enum": [ - "http", - "https", - "ws", - "wss", - "tcp", - "udp" - ] - }, - "secure": { - "description": "Describes whether the endpoint should be secured and protected by some authentication process. This requires a protocol of 'https' or 'wss'.", - "type": "boolean" - }, - "targetPort": { - "type": "integer" - } - }, - "additionalProperties": false - } - }, - "inlined": { - "description": "Inlined manifest", - "type": "string" - }, - "uri": { - "description": "Location in a file fetched from a uri.", - "type": "string" - } - }, - "additionalProperties": false - }, - "name": { - "description": "Mandatory name that allows referencing the component from other elements (such as commands) or from an external devfile that may reference this component through a parent or a plugin.", - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "openshift": { - "description": "Allows importing into the devworkspace the OpenShift resources defined in a given manifest. For example this allows reusing the OpenShift definitions used to deploy some runtime components in production.", - "type": "object", - "oneOf": [ - { - "required": [ - "uri" - ] - }, - { - "required": [ - "inlined" - ] - } - ], - "properties": { - "endpoints": { - "type": "array", - "items": { - "type": "object", - "required": [ - "name", - "targetPort" - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant string-based free-form attributes.\n\nExamples of Che-specific attributes:\n- cookiesAuthEnabled: \"true\" / \"false\",\n- type: \"terminal\" / \"ide\" / \"ide-dev\",", - "type": "object", - "additionalProperties": true - }, - "exposure": { - "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main devworkspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main devworkspace POD, on a local address.\n\nDefault value is 'public'", - "type": "string", - "default": "public", - "enum": [ - "public", - "internal", - "none" - ] - }, - "name": { - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "path": { - "description": "Path of the endpoint URL", - "type": "string" - }, - "protocol": { - "description": "Describes the application and transport protocols of the traffic that will go through this endpoint.\n- 'http': Endpoint will have 'http' traffic, typically on a TCP connection. It will be automaticaly promoted to 'https' when the 'secure' field is set to 'true'.\n- 'https': Endpoint will have 'https' traffic, typically on a TCP connection.\n- 'ws': Endpoint will have 'ws' traffic, typically on a TCP connection. It will be automaticaly promoted to 'wss' when the 'secure' field is set to 'true'.\n- 'wss': Endpoint will have 'wss' traffic, typically on a TCP connection.\n- 'tcp': Endpoint will have traffic on a TCP connection, without specifying an application protocol.\n- 'udp': Endpoint will have traffic on an UDP connection, without specifying an application protocol.\n\nDefault value is 'http'", - "type": "string", - "default": "http", - "enum": [ - "http", - "https", - "ws", - "wss", - "tcp", - "udp" - ] - }, - "secure": { - "description": "Describes whether the endpoint should be secured and protected by some authentication process. This requires a protocol of 'https' or 'wss'.", - "type": "boolean" - }, - "targetPort": { - "type": "integer" - } - }, - "additionalProperties": false - } - }, - "inlined": { - "description": "Inlined manifest", - "type": "string" - }, - "uri": { - "description": "Location in a file fetched from a uri.", - "type": "string" - } - }, - "additionalProperties": false - }, - "volume": { - "description": "Allows specifying the definition of a volume shared by several other components", - "type": "object", - "properties": { - "ephemeral": { - "description": "Ephemeral volumes are not stored persistently across restarts. Defaults to false", - "type": "boolean" - }, - "size": { - "description": "Size of the volume", - "type": "string" - } - }, - "additionalProperties": false - } - }, - "additionalProperties": false - } - }, - "events": { - "description": "Bindings of commands to events. Each command is referred-to by its name.", - "type": "object", - "properties": { - "postStart": { - "description": "IDs of commands that should be executed after the devworkspace is completely started. In the case of Che-Theia, these commands should be executed after all plugins and extensions have started, including project cloning. This means that those commands are not triggered until the user opens the IDE in his browser.", - "type": "array", - "items": { - "type": "string" - } - }, - "postStop": { - "description": "IDs of commands that should be executed after stopping the devworkspace.", - "type": "array", - "items": { - "type": "string" - } - }, - "preStart": { - "description": "IDs of commands that should be executed before the devworkspace start. Kubernetes-wise, these commands would typically be executed in init containers of the devworkspace POD.", - "type": "array", - "items": { - "type": "string" - } - }, - "preStop": { - "description": "IDs of commands that should be executed before stopping the devworkspace.", - "type": "array", - "items": { - "type": "string" - } - } - }, - "additionalProperties": false - }, - "metadata": { - "description": "Optional metadata", - "type": "object", - "properties": { - "attributes": { - "description": "Map of implementation-dependant free-form YAML attributes. Deprecated, use the top-level attributes field instead.", - "type": "object", - "additionalProperties": true - }, - "description": { - "description": "Optional devfile description", - "type": "string" - }, - "displayName": { - "description": "Optional devfile display name", - "type": "string" - }, - "globalMemoryLimit": { - "description": "Optional devfile global memory limit", - "type": "string" - }, - "icon": { - "description": "Optional devfile icon, can be a URI or a relative path in the project", - "type": "string" - }, - "language": { - "description": "Optional devfile language", - "type": "string" - }, - "name": { - "description": "Optional devfile name", - "type": "string" - }, - "projectType": { - "description": "Optional devfile project type", - "type": "string" - }, - "tags": { - "description": "Optional devfile tags", - "type": "array", - "items": { - "type": "string" - } - }, - "version": { - "description": "Optional semver-compatible version", - "type": "string", - "pattern": "^([0-9]+)\\.([0-9]+)\\.([0-9]+)(\\-[0-9a-z-]+(\\.[0-9a-z-]+)*)?(\\+[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?$" - }, - "website": { - "description": "Optional devfile website", - "type": "string" - } - }, - "additionalProperties": true - }, - "parent": { - "description": "Parent devworkspace template", - "type": "object", - "oneOf": [ - { - "required": [ - "uri" - ] - }, - { - "required": [ - "id" - ] - }, - { - "required": [ - "kubernetes" - ] - } - ], - "properties": { - "attributes": { - "description": "Overrides of attributes encapsulated in a parent devfile. Overriding is done according to K8S strategic merge patch standard rules.", - "type": "object", - "additionalProperties": true - }, - "commands": { - "description": "Overrides of commands encapsulated in a parent devfile or a plugin. Overriding is done according to K8S strategic merge patch standard rules.", - "type": "array", - "items": { - "type": "object", - "required": [ - "id" - ], - "oneOf": [ - { - "required": [ - "exec" - ] - }, - { - "required": [ - "apply" - ] - }, - { - "required": [ - "composite" - ] - } - ], - "properties": { - "apply": { - "description": "Command that consists in applying a given component definition, typically bound to a devworkspace event.\n\nFor example, when an 'apply' command is bound to a 'preStart' event, and references a 'container' component, it will start the container as a K8S initContainer in the devworkspace POD, unless the component has its 'dedicatedPod' field set to 'true'.\n\nWhen no 'apply' command exist for a given component, it is assumed the component will be applied at devworkspace start by default.", - "type": "object", - "properties": { - "component": { - "description": "Describes component that will be applied", - "type": "string" - }, - "group": { - "description": "Defines the group this command is part of", - "type": "object", - "properties": { - "isDefault": { - "description": "Identifies the default command for a given group kind", - "type": "boolean" - }, - "kind": { - "description": "Kind of group the command is part of", - "type": "string", - "enum": [ - "build", - "run", - "test", - "debug" - ] - } - }, - "additionalProperties": false - }, - "label": { - "description": "Optional label that provides a label for this command to be used in Editor UI menus for example", - "type": "string" - } - }, - "additionalProperties": false - }, - "attributes": { - "description": "Map of implementation-dependant free-form YAML attributes.", - "type": "object", - "additionalProperties": true - }, - "composite": { - "description": "Composite command that allows executing several sub-commands either sequentially or concurrently", - "type": "object", - "properties": { - "commands": { - "description": "The commands that comprise this composite command", - "type": "array", - "items": { - "type": "string" - } - }, - "group": { - "description": "Defines the group this command is part of", - "type": "object", - "properties": { - "isDefault": { - "description": "Identifies the default command for a given group kind", - "type": "boolean" - }, - "kind": { - "description": "Kind of group the command is part of", - "type": "string", - "enum": [ - "build", - "run", - "test", - "debug" - ] - } - }, - "additionalProperties": false - }, - "label": { - "description": "Optional label that provides a label for this command to be used in Editor UI menus for example", - "type": "string" - }, - "parallel": { - "description": "Indicates if the sub-commands should be executed concurrently", - "type": "boolean" - } - }, - "additionalProperties": false - }, - "exec": { - "description": "CLI Command executed in an existing component container", - "type": "object", - "properties": { - "commandLine": { - "description": "The actual command-line string\n\nSpecial variables that can be used:\n\n - '$PROJECTS_ROOT': A path where projects sources are mounted as defined by container component's sourceMapping.\n\n - '$PROJECT_SOURCE': A path to a project source ($PROJECTS_ROOT/\u003cproject-name\u003e). If there are multiple projects, this will point to the directory of the first one.", - "type": "string" - }, - "component": { - "description": "Describes component to which given action relates", - "type": "string" - }, - "env": { - "description": "Optional list of environment variables that have to be set before running the command", - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "name": { - "type": "string" - }, - "value": { - "type": "string" - } - }, - "additionalProperties": false - } - }, - "group": { - "description": "Defines the group this command is part of", - "type": "object", - "properties": { - "isDefault": { - "description": "Identifies the default command for a given group kind", - "type": "boolean" - }, - "kind": { - "description": "Kind of group the command is part of", - "type": "string", - "enum": [ - "build", - "run", - "test", - "debug" - ] - } - }, - "additionalProperties": false - }, - "hotReloadCapable": { - "description": "Whether the command is capable to reload itself when source code changes. If set to 'true' the command won't be restarted and it is expected to handle file changes on its own.\n\nDefault value is 'false'", - "type": "boolean" - }, - "label": { - "description": "Optional label that provides a label for this command to be used in Editor UI menus for example", - "type": "string" - }, - "workingDir": { - "description": "Working directory where the command should be executed\n\nSpecial variables that can be used:\n\n - '$PROJECTS_ROOT': A path where projects sources are mounted as defined by container component's sourceMapping.\n\n - '$PROJECT_SOURCE': A path to a project source ($PROJECTS_ROOT/\u003cproject-name\u003e). If there are multiple projects, this will point to the directory of the first one.", - "type": "string" - } - }, - "additionalProperties": false - }, - "id": { - "description": "Mandatory identifier that allows referencing this command in composite commands, from a parent, or in events.", - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - } - }, - "additionalProperties": false - } - }, - "components": { - "description": "Overrides of components encapsulated in a parent devfile or a plugin. Overriding is done according to K8S strategic merge patch standard rules.", - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "oneOf": [ - { - "required": [ - "container" - ] - }, - { - "required": [ - "kubernetes" - ] - }, - { - "required": [ - "openshift" - ] - }, - { - "required": [ - "volume" - ] - } - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant free-form YAML attributes.", - "type": "object", - "additionalProperties": true - }, - "container": { - "description": "Allows adding and configuring devworkspace-related containers", - "type": "object", - "properties": { - "args": { - "description": "The arguments to supply to the command running the dockerimage component. The arguments are supplied either to the default command provided in the image or to the overridden command.\n\nDefaults to an empty array, meaning use whatever is defined in the image.", - "type": "array", - "items": { - "type": "string" - } - }, - "command": { - "description": "The command to run in the dockerimage component instead of the default one provided in the image.\n\nDefaults to an empty array, meaning use whatever is defined in the image.", - "type": "array", - "items": { - "type": "string" - } - }, - "cpuLimit": { - "type": "string" - }, - "cpuRequest": { - "type": "string" - }, - "dedicatedPod": { - "description": "Specify if a container should run in its own separated pod, instead of running as part of the main development environment pod.\n\nDefault value is 'false'", - "type": "boolean" - }, - "endpoints": { - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant string-based free-form attributes.\n\nExamples of Che-specific attributes:\n- cookiesAuthEnabled: \"true\" / \"false\",\n- type: \"terminal\" / \"ide\" / \"ide-dev\",", - "type": "object", - "additionalProperties": true - }, - "exposure": { - "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main devworkspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main devworkspace POD, on a local address.\n\nDefault value is 'public'", - "type": "string", - "enum": [ - "public", - "internal", - "none" - ] - }, - "name": { - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "path": { - "description": "Path of the endpoint URL", - "type": "string" - }, - "protocol": { - "description": "Describes the application and transport protocols of the traffic that will go through this endpoint.\n- 'http': Endpoint will have 'http' traffic, typically on a TCP connection. It will be automaticaly promoted to 'https' when the 'secure' field is set to 'true'.\n- 'https': Endpoint will have 'https' traffic, typically on a TCP connection.\n- 'ws': Endpoint will have 'ws' traffic, typically on a TCP connection. It will be automaticaly promoted to 'wss' when the 'secure' field is set to 'true'.\n- 'wss': Endpoint will have 'wss' traffic, typically on a TCP connection.\n- 'tcp': Endpoint will have traffic on a TCP connection, without specifying an application protocol.\n- 'udp': Endpoint will have traffic on an UDP connection, without specifying an application protocol.\n\nDefault value is 'http'", - "type": "string", - "enum": [ - "http", - "https", - "ws", - "wss", - "tcp", - "udp" - ] - }, - "secure": { - "description": "Describes whether the endpoint should be secured and protected by some authentication process. This requires a protocol of 'https' or 'wss'.", - "type": "boolean" - }, - "targetPort": { - "type": "integer" - } - }, - "additionalProperties": false - } - }, - "env": { - "description": "Environment variables used in this container.\n\nThe following variables are reserved and cannot be overridden via env:\n\n - '$PROJECTS_ROOT'\n\n - '$PROJECT_SOURCE'", - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "name": { - "type": "string" - }, - "value": { - "type": "string" - } - }, - "additionalProperties": false - } - }, - "image": { - "type": "string" - }, - "memoryLimit": { - "type": "string" - }, - "memoryRequest": { - "type": "string" - }, - "mountSources": { - "description": "Toggles whether or not the project source code should be mounted in the component.\n\nDefaults to true for all component types except plugins and components that set 'dedicatedPod' to true.", - "type": "boolean" - }, - "sourceMapping": { - "description": "Optional specification of the path in the container where project sources should be transferred/mounted when 'mountSources' is 'true'. When omitted, the default value of /projects is used.", - "type": "string" - }, - "volumeMounts": { - "description": "List of volumes mounts that should be mounted is this container.", - "type": "array", - "items": { - "description": "Volume that should be mounted to a component container", - "type": "object", - "required": [ - "name" - ], - "properties": { - "name": { - "description": "The volume mount name is the name of an existing 'Volume' component. If several containers mount the same volume name then they will reuse the same volume and will be able to access to the same files.", - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "path": { - "description": "The path in the component container where the volume should be mounted. If not path is mentioned, default path is the is '/\u003cname\u003e'.", - "type": "string" - } - }, - "additionalProperties": false - } - } - }, - "additionalProperties": false - }, - "kubernetes": { - "description": "Allows importing into the devworkspace the Kubernetes resources defined in a given manifest. For example this allows reusing the Kubernetes definitions used to deploy some runtime components in production.", - "type": "object", - "oneOf": [ - { - "required": [ - "uri" - ] - }, - { - "required": [ - "inlined" - ] - } - ], - "properties": { - "endpoints": { - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant string-based free-form attributes.\n\nExamples of Che-specific attributes:\n- cookiesAuthEnabled: \"true\" / \"false\",\n- type: \"terminal\" / \"ide\" / \"ide-dev\",", - "type": "object", - "additionalProperties": true - }, - "exposure": { - "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main devworkspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main devworkspace POD, on a local address.\n\nDefault value is 'public'", - "type": "string", - "enum": [ - "public", - "internal", - "none" - ] - }, - "name": { - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "path": { - "description": "Path of the endpoint URL", - "type": "string" - }, - "protocol": { - "description": "Describes the application and transport protocols of the traffic that will go through this endpoint.\n- 'http': Endpoint will have 'http' traffic, typically on a TCP connection. It will be automaticaly promoted to 'https' when the 'secure' field is set to 'true'.\n- 'https': Endpoint will have 'https' traffic, typically on a TCP connection.\n- 'ws': Endpoint will have 'ws' traffic, typically on a TCP connection. It will be automaticaly promoted to 'wss' when the 'secure' field is set to 'true'.\n- 'wss': Endpoint will have 'wss' traffic, typically on a TCP connection.\n- 'tcp': Endpoint will have traffic on a TCP connection, without specifying an application protocol.\n- 'udp': Endpoint will have traffic on an UDP connection, without specifying an application protocol.\n\nDefault value is 'http'", - "type": "string", - "enum": [ - "http", - "https", - "ws", - "wss", - "tcp", - "udp" - ] - }, - "secure": { - "description": "Describes whether the endpoint should be secured and protected by some authentication process. This requires a protocol of 'https' or 'wss'.", - "type": "boolean" - }, - "targetPort": { - "type": "integer" - } - }, - "additionalProperties": false - } - }, - "inlined": { - "description": "Inlined manifest", - "type": "string" - }, - "uri": { - "description": "Location in a file fetched from a uri.", - "type": "string" - } - }, - "additionalProperties": false - }, - "name": { - "description": "Mandatory name that allows referencing the component from other elements (such as commands) or from an external devfile that may reference this component through a parent or a plugin.", - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "openshift": { - "description": "Allows importing into the devworkspace the OpenShift resources defined in a given manifest. For example this allows reusing the OpenShift definitions used to deploy some runtime components in production.", - "type": "object", - "oneOf": [ - { - "required": [ - "uri" - ] - }, - { - "required": [ - "inlined" - ] - } - ], - "properties": { - "endpoints": { - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant string-based free-form attributes.\n\nExamples of Che-specific attributes:\n- cookiesAuthEnabled: \"true\" / \"false\",\n- type: \"terminal\" / \"ide\" / \"ide-dev\",", - "type": "object", - "additionalProperties": true - }, - "exposure": { - "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main devworkspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main devworkspace POD, on a local address.\n\nDefault value is 'public'", - "type": "string", - "enum": [ - "public", - "internal", - "none" - ] - }, - "name": { - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "path": { - "description": "Path of the endpoint URL", - "type": "string" - }, - "protocol": { - "description": "Describes the application and transport protocols of the traffic that will go through this endpoint.\n- 'http': Endpoint will have 'http' traffic, typically on a TCP connection. It will be automaticaly promoted to 'https' when the 'secure' field is set to 'true'.\n- 'https': Endpoint will have 'https' traffic, typically on a TCP connection.\n- 'ws': Endpoint will have 'ws' traffic, typically on a TCP connection. It will be automaticaly promoted to 'wss' when the 'secure' field is set to 'true'.\n- 'wss': Endpoint will have 'wss' traffic, typically on a TCP connection.\n- 'tcp': Endpoint will have traffic on a TCP connection, without specifying an application protocol.\n- 'udp': Endpoint will have traffic on an UDP connection, without specifying an application protocol.\n\nDefault value is 'http'", - "type": "string", - "enum": [ - "http", - "https", - "ws", - "wss", - "tcp", - "udp" - ] - }, - "secure": { - "description": "Describes whether the endpoint should be secured and protected by some authentication process. This requires a protocol of 'https' or 'wss'.", - "type": "boolean" - }, - "targetPort": { - "type": "integer" - } - }, - "additionalProperties": false - } - }, - "inlined": { - "description": "Inlined manifest", - "type": "string" - }, - "uri": { - "description": "Location in a file fetched from a uri.", - "type": "string" - } - }, - "additionalProperties": false - }, - "volume": { - "description": "Allows specifying the definition of a volume shared by several other components", - "type": "object", - "properties": { - "ephemeral": { - "description": "Ephemeral volumes are not stored persistently across restarts. Defaults to false", - "type": "boolean" - }, - "size": { - "description": "Size of the volume", - "type": "string" - } - }, - "additionalProperties": false - } - }, - "additionalProperties": false - } - }, - "id": { - "description": "Id in a registry that contains a Devfile yaml file", - "type": "string" - }, - "kubernetes": { - "description": "Reference to a Kubernetes CRD of type DevWorkspaceTemplate", - "type": "object", - "required": [ - "name" - ], - "properties": { - "name": { - "type": "string" - }, - "namespace": { - "type": "string" - } - }, - "additionalProperties": false - }, - "projects": { - "description": "Overrides of projects encapsulated in a parent devfile. Overriding is done according to K8S strategic merge patch standard rules.", - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "oneOf": [ - { - "required": [ - "git" - ] - }, - { - "required": [ - "zip" - ] - } - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant free-form YAML attributes.", - "type": "object", - "additionalProperties": true - }, - "clonePath": { - "description": "Path relative to the root of the projects to which this project should be cloned into. This is a unix-style relative path (i.e. uses forward slashes). The path is invalid if it is absolute or tries to escape the project root through the usage of '..'. If not specified, defaults to the project name.", - "type": "string" - }, - "git": { - "description": "Project's Git source", - "type": "object", - "properties": { - "checkoutFrom": { - "description": "Defines from what the project should be checked out. Required if there are more than one remote configured", - "type": "object", - "properties": { - "remote": { - "description": "The remote name should be used as init. Required if there are more than one remote configured", - "type": "string" - }, - "revision": { - "description": "The revision to checkout from. Should be branch name, tag or commit id. Default branch is used if missing or specified revision is not found.", - "type": "string" - } - }, - "additionalProperties": false - }, - "remotes": { - "description": "The remotes map which should be initialized in the git project. Must have at least one remote configured", - "type": "object", - "additionalProperties": { - "type": "string" - } - } - }, - "additionalProperties": false - }, - "name": { - "description": "Project name", - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "zip": { - "description": "Project's Zip source", - "type": "object", - "properties": { - "location": { - "description": "Zip project's source location address. Should be file path of the archive, e.g. file://$FILE_PATH", - "type": "string" - } - }, - "additionalProperties": false - } - }, - "additionalProperties": false - } - }, - "registryUrl": { - "description": "Registry URL to pull the parent devfile from when using id in the parent reference. To ensure the parent devfile gets resolved consistently in different environments, it is recommended to always specify the 'regsitryURL' when 'Id' is used.", - "type": "string" - }, - "starterProjects": { - "description": "Overrides of starterProjects encapsulated in a parent devfile. Overriding is done according to K8S strategic merge patch standard rules.", - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "oneOf": [ - { - "required": [ - "git" - ] - }, - { - "required": [ - "zip" - ] - } - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant free-form YAML attributes.", - "type": "object", - "additionalProperties": true - }, - "description": { - "description": "Description of a starter project", - "type": "string" - }, - "git": { - "description": "Project's Git source", - "type": "object", - "properties": { - "checkoutFrom": { - "description": "Defines from what the project should be checked out. Required if there are more than one remote configured", - "type": "object", - "properties": { - "remote": { - "description": "The remote name should be used as init. Required if there are more than one remote configured", - "type": "string" - }, - "revision": { - "description": "The revision to checkout from. Should be branch name, tag or commit id. Default branch is used if missing or specified revision is not found.", - "type": "string" - } - }, - "additionalProperties": false - }, - "remotes": { - "description": "The remotes map which should be initialized in the git project. Must have at least one remote configured", - "type": "object", - "additionalProperties": { - "type": "string" - } - } - }, - "additionalProperties": false - }, - "name": { - "description": "Project name", - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "subDir": { - "description": "Sub-directory from a starter project to be used as root for starter project.", - "type": "string" - }, - "zip": { - "description": "Project's Zip source", - "type": "object", - "properties": { - "location": { - "description": "Zip project's source location address. Should be file path of the archive, e.g. file://$FILE_PATH", - "type": "string" - } - }, - "additionalProperties": false - } - }, - "additionalProperties": false - } - }, - "uri": { - "description": "URI Reference of a parent devfile YAML file. It can be a full URL or a relative URI with the current devfile as the base URI.", - "type": "string" - }, - "variables": { - "description": "Overrides of variables encapsulated in a parent devfile. Overriding is done according to K8S strategic merge patch standard rules.", - "type": "object", - "additionalProperties": { - "type": "string" - } - } - }, - "additionalProperties": false - }, - "projects": { - "description": "Projects worked on in the devworkspace, containing names and sources locations", - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "oneOf": [ - { - "required": [ - "git" - ] - }, - { - "required": [ - "zip" - ] - } - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant free-form YAML attributes.", - "type": "object", - "additionalProperties": true - }, - "clonePath": { - "description": "Path relative to the root of the projects to which this project should be cloned into. This is a unix-style relative path (i.e. uses forward slashes). The path is invalid if it is absolute or tries to escape the project root through the usage of '..'. If not specified, defaults to the project name.", - "type": "string" - }, - "git": { - "description": "Project's Git source", - "type": "object", - "required": [ - "remotes" - ], - "properties": { - "checkoutFrom": { - "description": "Defines from what the project should be checked out. Required if there are more than one remote configured", - "type": "object", - "properties": { - "remote": { - "description": "The remote name should be used as init. Required if there are more than one remote configured", - "type": "string" - }, - "revision": { - "description": "The revision to checkout from. Should be branch name, tag or commit id. Default branch is used if missing or specified revision is not found.", - "type": "string" - } - }, - "additionalProperties": false - }, - "remotes": { - "description": "The remotes map which should be initialized in the git project. Must have at least one remote configured", - "type": "object", - "additionalProperties": { - "type": "string" - } - } - }, - "additionalProperties": false - }, - "name": { - "description": "Project name", - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "zip": { - "description": "Project's Zip source", - "type": "object", - "properties": { - "location": { - "description": "Zip project's source location address. Should be file path of the archive, e.g. file://$FILE_PATH", - "type": "string" - } - }, - "additionalProperties": false - } - }, - "additionalProperties": false - } - }, - "schemaVersion": { - "description": "Devfile schema version", - "type": "string", - "pattern": "^([2-9])\\.([0-9]+)\\.([0-9]+)(\\-[0-9a-z-]+(\\.[0-9a-z-]+)*)?(\\+[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?$" - }, - "starterProjects": { - "description": "StarterProjects is a project that can be used as a starting point when bootstrapping new projects", - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "oneOf": [ - { - "required": [ - "git" - ] - }, - { - "required": [ - "zip" - ] - } - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant free-form YAML attributes.", - "type": "object", - "additionalProperties": true - }, - "description": { - "description": "Description of a starter project", - "type": "string" - }, - "git": { - "description": "Project's Git source", - "type": "object", - "required": [ - "remotes" - ], - "properties": { - "checkoutFrom": { - "description": "Defines from what the project should be checked out. Required if there are more than one remote configured", - "type": "object", - "properties": { - "remote": { - "description": "The remote name should be used as init. Required if there are more than one remote configured", - "type": "string" - }, - "revision": { - "description": "The revision to checkout from. Should be branch name, tag or commit id. Default branch is used if missing or specified revision is not found.", - "type": "string" - } - }, - "additionalProperties": false - }, - "remotes": { - "description": "The remotes map which should be initialized in the git project. Must have at least one remote configured", - "type": "object", - "additionalProperties": { - "type": "string" - } - } - }, - "additionalProperties": false - }, - "name": { - "description": "Project name", - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "subDir": { - "description": "Sub-directory from a starter project to be used as root for starter project.", - "type": "string" - }, - "zip": { - "description": "Project's Zip source", - "type": "object", - "properties": { - "location": { - "description": "Zip project's source location address. Should be file path of the archive, e.g. file://$FILE_PATH", - "type": "string" - } - }, - "additionalProperties": false - } - }, - "additionalProperties": false - } - }, - "variables": { - "description": "Map of key-value variables used for string replacement in the devfile. Values can can be referenced via {{variable-key}} to replace the corresponding value in string fields in the devfile. Replacement cannot be used for\n\n - schemaVersion, metadata, parent source - element identifiers, e.g. command id, component name, endpoint name, project name - references to identifiers, e.g. in events, a command's component, container's volume mount name - string enums, e.g. command group kind, endpoint exposure", - "type": "object", - "additionalProperties": { - "type": "string" - } - } - }, - "additionalProperties": false -} -` diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/2.2.0/devfileJsonSchema220.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/2.2.0/devfileJsonSchema220.go deleted file mode 100644 index f485c74477f..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/2.2.0/devfileJsonSchema220.go +++ /dev/null @@ -1,2067 +0,0 @@ -// -// Copyright 2022 Red Hat, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package version220 - -// https://raw.githubusercontent.com/devfile/api/main/schemas/latest/devfile.json -const JsonSchema220 = `{ - "description": "Devfile describes the structure of a cloud-native devworkspace and development environment.", - "type": "object", - "title": "Devfile schema - Version 2.2.0-alpha", - "required": [ - "schemaVersion" - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant free-form YAML attributes.", - "type": "object", - "additionalProperties": true - }, - "commands": { - "description": "Predefined, ready-to-use, devworkspace-related commands", - "type": "array", - "items": { - "type": "object", - "required": [ - "id" - ], - "oneOf": [ - { - "required": [ - "exec" - ] - }, - { - "required": [ - "apply" - ] - }, - { - "required": [ - "composite" - ] - } - ], - "properties": { - "apply": { - "description": "Command that consists in applying a given component definition, typically bound to a devworkspace event.\n\nFor example, when an 'apply' command is bound to a 'preStart' event, and references a 'container' component, it will start the container as a K8S initContainer in the devworkspace POD, unless the component has its 'dedicatedPod' field set to 'true'.\n\nWhen no 'apply' command exist for a given component, it is assumed the component will be applied at devworkspace start by default, unless 'deployByDefault' for that component is set to false.", - "type": "object", - "required": [ - "component" - ], - "properties": { - "component": { - "description": "Describes component that will be applied", - "type": "string" - }, - "group": { - "description": "Defines the group this command is part of", - "type": "object", - "required": [ - "kind" - ], - "properties": { - "isDefault": { - "description": "Identifies the default command for a given group kind", - "type": "boolean" - }, - "kind": { - "description": "Kind of group the command is part of", - "type": "string", - "enum": [ - "build", - "run", - "test", - "debug", - "deploy" - ] - } - }, - "additionalProperties": false - }, - "label": { - "description": "Optional label that provides a label for this command to be used in Editor UI menus for example", - "type": "string" - } - }, - "additionalProperties": false - }, - "attributes": { - "description": "Map of implementation-dependant free-form YAML attributes.", - "type": "object", - "additionalProperties": true - }, - "composite": { - "description": "Composite command that allows executing several sub-commands either sequentially or concurrently", - "type": "object", - "properties": { - "commands": { - "description": "The commands that comprise this composite command", - "type": "array", - "items": { - "type": "string" - } - }, - "group": { - "description": "Defines the group this command is part of", - "type": "object", - "required": [ - "kind" - ], - "properties": { - "isDefault": { - "description": "Identifies the default command for a given group kind", - "type": "boolean" - }, - "kind": { - "description": "Kind of group the command is part of", - "type": "string", - "enum": [ - "build", - "run", - "test", - "debug", - "deploy" - ] - } - }, - "additionalProperties": false - }, - "label": { - "description": "Optional label that provides a label for this command to be used in Editor UI menus for example", - "type": "string" - }, - "parallel": { - "description": "Indicates if the sub-commands should be executed concurrently", - "type": "boolean" - } - }, - "additionalProperties": false - }, - "exec": { - "description": "CLI Command executed in an existing component container", - "type": "object", - "required": [ - "commandLine", - "component" - ], - "properties": { - "commandLine": { - "description": "The actual command-line string\n\nSpecial variables that can be used:\n\n - '$PROJECTS_ROOT': A path where projects sources are mounted as defined by container component's sourceMapping.\n\n - '$PROJECT_SOURCE': A path to a project source ($PROJECTS_ROOT/\u003cproject-name\u003e). If there are multiple projects, this will point to the directory of the first one.", - "type": "string" - }, - "component": { - "description": "Describes component to which given action relates", - "type": "string" - }, - "env": { - "description": "Optional list of environment variables that have to be set before running the command", - "type": "array", - "items": { - "type": "object", - "required": [ - "name", - "value" - ], - "properties": { - "name": { - "type": "string" - }, - "value": { - "type": "string" - } - }, - "additionalProperties": false - } - }, - "group": { - "description": "Defines the group this command is part of", - "type": "object", - "required": [ - "kind" - ], - "properties": { - "isDefault": { - "description": "Identifies the default command for a given group kind", - "type": "boolean" - }, - "kind": { - "description": "Kind of group the command is part of", - "type": "string", - "enum": [ - "build", - "run", - "test", - "debug", - "deploy" - ] - } - }, - "additionalProperties": false - }, - "hotReloadCapable": { - "description": "Whether the command is capable to reload itself when source code changes. If set to 'true' the command won't be restarted and it is expected to handle file changes on its own.\n\nDefault value is 'false'", - "type": "boolean" - }, - "label": { - "description": "Optional label that provides a label for this command to be used in Editor UI menus for example", - "type": "string" - }, - "workingDir": { - "description": "Working directory where the command should be executed\n\nSpecial variables that can be used:\n\n - '$PROJECTS_ROOT': A path where projects sources are mounted as defined by container component's sourceMapping.\n\n - '$PROJECT_SOURCE': A path to a project source ($PROJECTS_ROOT/\u003cproject-name\u003e). If there are multiple projects, this will point to the directory of the first one.", - "type": "string" - } - }, - "additionalProperties": false - }, - "id": { - "description": "Mandatory identifier that allows referencing this command in composite commands, from a parent, or in events.", - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - } - }, - "additionalProperties": false - } - }, - "components": { - "description": "List of the devworkspace components, such as editor and plugins, user-provided containers, or other types of components", - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "oneOf": [ - { - "required": [ - "container" - ] - }, - { - "required": [ - "kubernetes" - ] - }, - { - "required": [ - "openshift" - ] - }, - { - "required": [ - "volume" - ] - }, - { - "required": [ - "image" - ] - } - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant free-form YAML attributes.", - "type": "object", - "additionalProperties": true - }, - "container": { - "description": "Allows adding and configuring devworkspace-related containers", - "type": "object", - "required": [ - "image" - ], - "properties": { - "annotation": { - "description": "Annotations that should be added to specific resources for this container", - "type": "object", - "properties": { - "deployment": { - "description": "Annotations to be added to deployment", - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "service": { - "description": "Annotations to be added to service", - "type": "object", - "additionalProperties": { - "type": "string" - } - } - }, - "additionalProperties": false - }, - "args": { - "description": "The arguments to supply to the command running the dockerimage component. The arguments are supplied either to the default command provided in the image or to the overridden command.\n\nDefaults to an empty array, meaning use whatever is defined in the image.", - "type": "array", - "items": { - "type": "string" - } - }, - "command": { - "description": "The command to run in the dockerimage component instead of the default one provided in the image.\n\nDefaults to an empty array, meaning use whatever is defined in the image.", - "type": "array", - "items": { - "type": "string" - } - }, - "cpuLimit": { - "type": "string" - }, - "cpuRequest": { - "type": "string" - }, - "dedicatedPod": { - "description": "Specify if a container should run in its own separated pod, instead of running as part of the main development environment pod.\n\nDefault value is 'false'", - "type": "boolean" - }, - "endpoints": { - "type": "array", - "items": { - "type": "object", - "required": [ - "name", - "targetPort" - ], - "properties": { - "annotation": { - "description": "Annotations to be added to Kubernetes Ingress or Openshift Route", - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "attributes": { - "description": "Map of implementation-dependant string-based free-form attributes.\n\nExamples of Che-specific attributes:\n- cookiesAuthEnabled: \"true\" / \"false\",\n- type: \"terminal\" / \"ide\" / \"ide-dev\",", - "type": "object", - "additionalProperties": true - }, - "exposure": { - "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main devworkspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main devworkspace POD, on a local address.\n\nDefault value is 'public'", - "type": "string", - "default": "public", - "enum": [ - "public", - "internal", - "none" - ] - }, - "name": { - "type": "string", - "maxLength": 15, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "path": { - "description": "Path of the endpoint URL", - "type": "string" - }, - "protocol": { - "description": "Describes the application and transport protocols of the traffic that will go through this endpoint.\n- 'http': Endpoint will have 'http' traffic, typically on a TCP connection. It will be automaticaly promoted to 'https' when the 'secure' field is set to 'true'.\n- 'https': Endpoint will have 'https' traffic, typically on a TCP connection.\n- 'ws': Endpoint will have 'ws' traffic, typically on a TCP connection. It will be automaticaly promoted to 'wss' when the 'secure' field is set to 'true'.\n- 'wss': Endpoint will have 'wss' traffic, typically on a TCP connection.\n- 'tcp': Endpoint will have traffic on a TCP connection, without specifying an application protocol.\n- 'udp': Endpoint will have traffic on an UDP connection, without specifying an application protocol.\n\nDefault value is 'http'", - "type": "string", - "default": "http", - "enum": [ - "http", - "https", - "ws", - "wss", - "tcp", - "udp" - ] - }, - "secure": { - "description": "Describes whether the endpoint should be secured and protected by some authentication process. This requires a protocol of 'https' or 'wss'.", - "type": "boolean" - }, - "targetPort": { - "description": "Port number to be used within the container component. The same port cannot be used by two different container components.", - "type": "integer" - } - }, - "additionalProperties": false - } - }, - "env": { - "description": "Environment variables used in this container.\n\nThe following variables are reserved and cannot be overridden via env:\n\n - '$PROJECTS_ROOT'\n\n - '$PROJECT_SOURCE'", - "type": "array", - "items": { - "type": "object", - "required": [ - "name", - "value" - ], - "properties": { - "name": { - "type": "string" - }, - "value": { - "type": "string" - } - }, - "additionalProperties": false - } - }, - "image": { - "type": "string" - }, - "memoryLimit": { - "type": "string" - }, - "memoryRequest": { - "type": "string" - }, - "mountSources": { - "description": "Toggles whether or not the project source code should be mounted in the component.\n\nDefaults to true for all component types except plugins and components that set 'dedicatedPod' to true.", - "type": "boolean" - }, - "sourceMapping": { - "description": "Optional specification of the path in the container where project sources should be transferred/mounted when 'mountSources' is 'true'. When omitted, the default value of /projects is used.", - "type": "string", - "default": "/projects" - }, - "volumeMounts": { - "description": "List of volumes mounts that should be mounted is this container.", - "type": "array", - "items": { - "description": "Volume that should be mounted to a component container", - "type": "object", - "required": [ - "name" - ], - "properties": { - "name": { - "description": "The volume mount name is the name of an existing 'Volume' component. If several containers mount the same volume name then they will reuse the same volume and will be able to access to the same files.", - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "path": { - "description": "The path in the component container where the volume should be mounted. If not path is mentioned, default path is the is '/\u003cname\u003e'.", - "type": "string" - } - }, - "additionalProperties": false - } - } - }, - "additionalProperties": false - }, - "image": { - "description": "Allows specifying the definition of an image for outer loop builds", - "type": "object", - "required": [ - "imageName" - ], - "oneOf": [ - { - "required": [ - "dockerfile" - ] - } - ], - "properties": { - "autoBuild": { - "description": "Defines if the image should be built during startup.\n\nDefault value is 'false'", - "type": "boolean" - }, - "dockerfile": { - "description": "Allows specifying dockerfile type build", - "type": "object", - "oneOf": [ - { - "required": [ - "uri" - ] - }, - { - "required": [ - "devfileRegistry" - ] - }, - { - "required": [ - "git" - ] - } - ], - "properties": { - "args": { - "description": "The arguments to supply to the dockerfile build.", - "type": "array", - "items": { - "type": "string" - } - }, - "buildContext": { - "description": "Path of source directory to establish build context. Defaults to ${PROJECT_SOURCE} in the container", - "type": "string" - }, - "devfileRegistry": { - "description": "Dockerfile's Devfile Registry source", - "type": "object", - "required": [ - "id" - ], - "properties": { - "id": { - "description": "Id in a devfile registry that contains a Dockerfile. The src in the OCI registry required for the Dockerfile build will be downloaded for building the image.", - "type": "string" - }, - "registryUrl": { - "description": "Devfile Registry URL to pull the Dockerfile from when using the Devfile Registry as Dockerfile src. To ensure the Dockerfile gets resolved consistently in different environments, it is recommended to always specify the 'devfileRegistryUrl' when 'Id' is used.", - "type": "string" - } - }, - "additionalProperties": false - }, - "git": { - "description": "Dockerfile's Git source", - "type": "object", - "required": [ - "remotes" - ], - "properties": { - "checkoutFrom": { - "description": "Defines from what the project should be checked out. Required if there are more than one remote configured", - "type": "object", - "properties": { - "remote": { - "description": "The remote name should be used as init. Required if there are more than one remote configured", - "type": "string" - }, - "revision": { - "description": "The revision to checkout from. Should be branch name, tag or commit id. Default branch is used if missing or specified revision is not found.", - "type": "string" - } - }, - "additionalProperties": false - }, - "fileLocation": { - "description": "Location of the Dockerfile in the Git repository when using git as Dockerfile src. Defaults to Dockerfile.", - "type": "string" - }, - "remotes": { - "description": "The remotes map which should be initialized in the git project. Projects must have at least one remote configured while StarterProjects \u0026 Image Component's Git source can only have at most one remote configured.", - "type": "object", - "additionalProperties": { - "type": "string" - } - } - }, - "additionalProperties": false - }, - "rootRequired": { - "description": "Specify if a privileged builder pod is required.\n\nDefault value is 'false'", - "type": "boolean" - }, - "uri": { - "description": "URI Reference of a Dockerfile. It can be a full URL or a relative URI from the current devfile as the base URI.", - "type": "string" - } - }, - "additionalProperties": false - }, - "imageName": { - "description": "Name of the image for the resulting outerloop build", - "type": "string" - } - }, - "additionalProperties": false - }, - "kubernetes": { - "description": "Allows importing into the devworkspace the Kubernetes resources defined in a given manifest. For example this allows reusing the Kubernetes definitions used to deploy some runtime components in production.", - "type": "object", - "oneOf": [ - { - "required": [ - "uri" - ] - }, - { - "required": [ - "inlined" - ] - } - ], - "properties": { - "deployByDefault": { - "description": "Defines if the component should be deployed during startup.\n\nDefault value is 'false'", - "type": "boolean" - }, - "endpoints": { - "type": "array", - "items": { - "type": "object", - "required": [ - "name", - "targetPort" - ], - "properties": { - "annotation": { - "description": "Annotations to be added to Kubernetes Ingress or Openshift Route", - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "attributes": { - "description": "Map of implementation-dependant string-based free-form attributes.\n\nExamples of Che-specific attributes:\n- cookiesAuthEnabled: \"true\" / \"false\",\n- type: \"terminal\" / \"ide\" / \"ide-dev\",", - "type": "object", - "additionalProperties": true - }, - "exposure": { - "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main devworkspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main devworkspace POD, on a local address.\n\nDefault value is 'public'", - "type": "string", - "default": "public", - "enum": [ - "public", - "internal", - "none" - ] - }, - "name": { - "type": "string", - "maxLength": 15, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "path": { - "description": "Path of the endpoint URL", - "type": "string" - }, - "protocol": { - "description": "Describes the application and transport protocols of the traffic that will go through this endpoint.\n- 'http': Endpoint will have 'http' traffic, typically on a TCP connection. It will be automaticaly promoted to 'https' when the 'secure' field is set to 'true'.\n- 'https': Endpoint will have 'https' traffic, typically on a TCP connection.\n- 'ws': Endpoint will have 'ws' traffic, typically on a TCP connection. It will be automaticaly promoted to 'wss' when the 'secure' field is set to 'true'.\n- 'wss': Endpoint will have 'wss' traffic, typically on a TCP connection.\n- 'tcp': Endpoint will have traffic on a TCP connection, without specifying an application protocol.\n- 'udp': Endpoint will have traffic on an UDP connection, without specifying an application protocol.\n\nDefault value is 'http'", - "type": "string", - "default": "http", - "enum": [ - "http", - "https", - "ws", - "wss", - "tcp", - "udp" - ] - }, - "secure": { - "description": "Describes whether the endpoint should be secured and protected by some authentication process. This requires a protocol of 'https' or 'wss'.", - "type": "boolean" - }, - "targetPort": { - "description": "Port number to be used within the container component. The same port cannot be used by two different container components.", - "type": "integer" - } - }, - "additionalProperties": false - } - }, - "inlined": { - "description": "Inlined manifest", - "type": "string" - }, - "uri": { - "description": "Location in a file fetched from a uri.", - "type": "string" - } - }, - "additionalProperties": false - }, - "name": { - "description": "Mandatory name that allows referencing the component from other elements (such as commands) or from an external devfile that may reference this component through a parent or a plugin.", - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "openshift": { - "description": "Allows importing into the devworkspace the OpenShift resources defined in a given manifest. For example this allows reusing the OpenShift definitions used to deploy some runtime components in production.", - "type": "object", - "oneOf": [ - { - "required": [ - "uri" - ] - }, - { - "required": [ - "inlined" - ] - } - ], - "properties": { - "deployByDefault": { - "description": "Defines if the component should be deployed during startup.\n\nDefault value is 'false'", - "type": "boolean" - }, - "endpoints": { - "type": "array", - "items": { - "type": "object", - "required": [ - "name", - "targetPort" - ], - "properties": { - "annotation": { - "description": "Annotations to be added to Kubernetes Ingress or Openshift Route", - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "attributes": { - "description": "Map of implementation-dependant string-based free-form attributes.\n\nExamples of Che-specific attributes:\n- cookiesAuthEnabled: \"true\" / \"false\",\n- type: \"terminal\" / \"ide\" / \"ide-dev\",", - "type": "object", - "additionalProperties": true - }, - "exposure": { - "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main devworkspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main devworkspace POD, on a local address.\n\nDefault value is 'public'", - "type": "string", - "default": "public", - "enum": [ - "public", - "internal", - "none" - ] - }, - "name": { - "type": "string", - "maxLength": 15, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "path": { - "description": "Path of the endpoint URL", - "type": "string" - }, - "protocol": { - "description": "Describes the application and transport protocols of the traffic that will go through this endpoint.\n- 'http': Endpoint will have 'http' traffic, typically on a TCP connection. It will be automaticaly promoted to 'https' when the 'secure' field is set to 'true'.\n- 'https': Endpoint will have 'https' traffic, typically on a TCP connection.\n- 'ws': Endpoint will have 'ws' traffic, typically on a TCP connection. It will be automaticaly promoted to 'wss' when the 'secure' field is set to 'true'.\n- 'wss': Endpoint will have 'wss' traffic, typically on a TCP connection.\n- 'tcp': Endpoint will have traffic on a TCP connection, without specifying an application protocol.\n- 'udp': Endpoint will have traffic on an UDP connection, without specifying an application protocol.\n\nDefault value is 'http'", - "type": "string", - "default": "http", - "enum": [ - "http", - "https", - "ws", - "wss", - "tcp", - "udp" - ] - }, - "secure": { - "description": "Describes whether the endpoint should be secured and protected by some authentication process. This requires a protocol of 'https' or 'wss'.", - "type": "boolean" - }, - "targetPort": { - "description": "Port number to be used within the container component. The same port cannot be used by two different container components.", - "type": "integer" - } - }, - "additionalProperties": false - } - }, - "inlined": { - "description": "Inlined manifest", - "type": "string" - }, - "uri": { - "description": "Location in a file fetched from a uri.", - "type": "string" - } - }, - "additionalProperties": false - }, - "volume": { - "description": "Allows specifying the definition of a volume shared by several other components", - "type": "object", - "properties": { - "ephemeral": { - "description": "Ephemeral volumes are not stored persistently across restarts. Defaults to false", - "type": "boolean" - }, - "size": { - "description": "Size of the volume", - "type": "string" - } - }, - "additionalProperties": false - } - }, - "additionalProperties": false - } - }, - "events": { - "description": "Bindings of commands to events. Each command is referred-to by its name.", - "type": "object", - "properties": { - "postStart": { - "description": "IDs of commands that should be executed after the devworkspace is completely started. In the case of Che-Theia, these commands should be executed after all plugins and extensions have started, including project cloning. This means that those commands are not triggered until the user opens the IDE in his browser.", - "type": "array", - "items": { - "type": "string" - } - }, - "postStop": { - "description": "IDs of commands that should be executed after stopping the devworkspace.", - "type": "array", - "items": { - "type": "string" - } - }, - "preStart": { - "description": "IDs of commands that should be executed before the devworkspace start. Kubernetes-wise, these commands would typically be executed in init containers of the devworkspace POD.", - "type": "array", - "items": { - "type": "string" - } - }, - "preStop": { - "description": "IDs of commands that should be executed before stopping the devworkspace.", - "type": "array", - "items": { - "type": "string" - } - } - }, - "additionalProperties": false - }, - "metadata": { - "description": "Optional metadata", - "type": "object", - "properties": { - "architectures": { - "description": "Optional list of processor architectures that the devfile supports, empty list suggests that the devfile can be used on any architecture", - "type": "array", - "uniqueItems": true, - "items": { - "description": "Architecture describes the architecture type", - "type": "string", - "enum": [ - "amd64", - "arm64", - "ppc64le", - "s390x" - ] - } - }, - "attributes": { - "description": "Map of implementation-dependant free-form YAML attributes. Deprecated, use the top-level attributes field instead.", - "type": "object", - "additionalProperties": true - }, - "description": { - "description": "Optional devfile description", - "type": "string" - }, - "displayName": { - "description": "Optional devfile display name", - "type": "string" - }, - "globalMemoryLimit": { - "description": "Optional devfile global memory limit", - "type": "string" - }, - "icon": { - "description": "Optional devfile icon, can be a URI or a relative path in the project", - "type": "string" - }, - "language": { - "description": "Optional devfile language", - "type": "string" - }, - "name": { - "description": "Optional devfile name", - "type": "string" - }, - "projectType": { - "description": "Optional devfile project type", - "type": "string" - }, - "provider": { - "description": "Optional devfile provider information", - "type": "string" - }, - "supportUrl": { - "description": "Optional link to a page that provides support information", - "type": "string" - }, - "tags": { - "description": "Optional devfile tags", - "type": "array", - "items": { - "type": "string" - } - }, - "version": { - "description": "Optional semver-compatible version", - "type": "string", - "pattern": "^([0-9]+)\\.([0-9]+)\\.([0-9]+)(\\-[0-9a-z-]+(\\.[0-9a-z-]+)*)?(\\+[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?$" - }, - "website": { - "description": "Optional devfile website", - "type": "string" - } - }, - "additionalProperties": true - }, - "parent": { - "description": "Parent devworkspace template", - "type": "object", - "oneOf": [ - { - "required": [ - "uri" - ] - }, - { - "required": [ - "id" - ] - }, - { - "required": [ - "kubernetes" - ] - } - ], - "properties": { - "attributes": { - "description": "Overrides of attributes encapsulated in a parent devfile. Overriding is done according to K8S strategic merge patch standard rules.", - "type": "object", - "additionalProperties": true - }, - "commands": { - "description": "Overrides of commands encapsulated in a parent devfile or a plugin. Overriding is done according to K8S strategic merge patch standard rules.", - "type": "array", - "items": { - "type": "object", - "required": [ - "id" - ], - "oneOf": [ - { - "required": [ - "exec" - ] - }, - { - "required": [ - "apply" - ] - }, - { - "required": [ - "composite" - ] - } - ], - "properties": { - "apply": { - "description": "Command that consists in applying a given component definition, typically bound to a devworkspace event.\n\nFor example, when an 'apply' command is bound to a 'preStart' event, and references a 'container' component, it will start the container as a K8S initContainer in the devworkspace POD, unless the component has its 'dedicatedPod' field set to 'true'.\n\nWhen no 'apply' command exist for a given component, it is assumed the component will be applied at devworkspace start by default, unless 'deployByDefault' for that component is set to false.", - "type": "object", - "properties": { - "component": { - "description": "Describes component that will be applied", - "type": "string" - }, - "group": { - "description": "Defines the group this command is part of", - "type": "object", - "properties": { - "isDefault": { - "description": "Identifies the default command for a given group kind", - "type": "boolean" - }, - "kind": { - "description": "Kind of group the command is part of", - "type": "string", - "enum": [ - "build", - "run", - "test", - "debug", - "deploy" - ] - } - }, - "additionalProperties": false - }, - "label": { - "description": "Optional label that provides a label for this command to be used in Editor UI menus for example", - "type": "string" - } - }, - "additionalProperties": false - }, - "attributes": { - "description": "Map of implementation-dependant free-form YAML attributes.", - "type": "object", - "additionalProperties": true - }, - "composite": { - "description": "Composite command that allows executing several sub-commands either sequentially or concurrently", - "type": "object", - "properties": { - "commands": { - "description": "The commands that comprise this composite command", - "type": "array", - "items": { - "type": "string" - } - }, - "group": { - "description": "Defines the group this command is part of", - "type": "object", - "properties": { - "isDefault": { - "description": "Identifies the default command for a given group kind", - "type": "boolean" - }, - "kind": { - "description": "Kind of group the command is part of", - "type": "string", - "enum": [ - "build", - "run", - "test", - "debug", - "deploy" - ] - } - }, - "additionalProperties": false - }, - "label": { - "description": "Optional label that provides a label for this command to be used in Editor UI menus for example", - "type": "string" - }, - "parallel": { - "description": "Indicates if the sub-commands should be executed concurrently", - "type": "boolean" - } - }, - "additionalProperties": false - }, - "exec": { - "description": "CLI Command executed in an existing component container", - "type": "object", - "properties": { - "commandLine": { - "description": "The actual command-line string\n\nSpecial variables that can be used:\n\n - '$PROJECTS_ROOT': A path where projects sources are mounted as defined by container component's sourceMapping.\n\n - '$PROJECT_SOURCE': A path to a project source ($PROJECTS_ROOT/\u003cproject-name\u003e). If there are multiple projects, this will point to the directory of the first one.", - "type": "string" - }, - "component": { - "description": "Describes component to which given action relates", - "type": "string" - }, - "env": { - "description": "Optional list of environment variables that have to be set before running the command", - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "name": { - "type": "string" - }, - "value": { - "type": "string" - } - }, - "additionalProperties": false - } - }, - "group": { - "description": "Defines the group this command is part of", - "type": "object", - "properties": { - "isDefault": { - "description": "Identifies the default command for a given group kind", - "type": "boolean" - }, - "kind": { - "description": "Kind of group the command is part of", - "type": "string", - "enum": [ - "build", - "run", - "test", - "debug", - "deploy" - ] - } - }, - "additionalProperties": false - }, - "hotReloadCapable": { - "description": "Whether the command is capable to reload itself when source code changes. If set to 'true' the command won't be restarted and it is expected to handle file changes on its own.\n\nDefault value is 'false'", - "type": "boolean" - }, - "label": { - "description": "Optional label that provides a label for this command to be used in Editor UI menus for example", - "type": "string" - }, - "workingDir": { - "description": "Working directory where the command should be executed\n\nSpecial variables that can be used:\n\n - '$PROJECTS_ROOT': A path where projects sources are mounted as defined by container component's sourceMapping.\n\n - '$PROJECT_SOURCE': A path to a project source ($PROJECTS_ROOT/\u003cproject-name\u003e). If there are multiple projects, this will point to the directory of the first one.", - "type": "string" - } - }, - "additionalProperties": false - }, - "id": { - "description": "Mandatory identifier that allows referencing this command in composite commands, from a parent, or in events.", - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - } - }, - "additionalProperties": false - } - }, - "components": { - "description": "Overrides of components encapsulated in a parent devfile or a plugin. Overriding is done according to K8S strategic merge patch standard rules.", - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "oneOf": [ - { - "required": [ - "container" - ] - }, - { - "required": [ - "kubernetes" - ] - }, - { - "required": [ - "openshift" - ] - }, - { - "required": [ - "volume" - ] - }, - { - "required": [ - "image" - ] - } - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant free-form YAML attributes.", - "type": "object", - "additionalProperties": true - }, - "container": { - "description": "Allows adding and configuring devworkspace-related containers", - "type": "object", - "properties": { - "annotation": { - "description": "Annotations that should be added to specific resources for this container", - "type": "object", - "properties": { - "deployment": { - "description": "Annotations to be added to deployment", - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "service": { - "description": "Annotations to be added to service", - "type": "object", - "additionalProperties": { - "type": "string" - } - } - }, - "additionalProperties": false - }, - "args": { - "description": "The arguments to supply to the command running the dockerimage component. The arguments are supplied either to the default command provided in the image or to the overridden command.\n\nDefaults to an empty array, meaning use whatever is defined in the image.", - "type": "array", - "items": { - "type": "string" - } - }, - "command": { - "description": "The command to run in the dockerimage component instead of the default one provided in the image.\n\nDefaults to an empty array, meaning use whatever is defined in the image.", - "type": "array", - "items": { - "type": "string" - } - }, - "cpuLimit": { - "type": "string" - }, - "cpuRequest": { - "type": "string" - }, - "dedicatedPod": { - "description": "Specify if a container should run in its own separated pod, instead of running as part of the main development environment pod.\n\nDefault value is 'false'", - "type": "boolean" - }, - "endpoints": { - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "annotation": { - "description": "Annotations to be added to Kubernetes Ingress or Openshift Route", - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "attributes": { - "description": "Map of implementation-dependant string-based free-form attributes.\n\nExamples of Che-specific attributes:\n- cookiesAuthEnabled: \"true\" / \"false\",\n- type: \"terminal\" / \"ide\" / \"ide-dev\",", - "type": "object", - "additionalProperties": true - }, - "exposure": { - "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main devworkspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main devworkspace POD, on a local address.\n\nDefault value is 'public'", - "type": "string", - "enum": [ - "public", - "internal", - "none" - ] - }, - "name": { - "type": "string", - "maxLength": 15, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "path": { - "description": "Path of the endpoint URL", - "type": "string" - }, - "protocol": { - "description": "Describes the application and transport protocols of the traffic that will go through this endpoint.\n- 'http': Endpoint will have 'http' traffic, typically on a TCP connection. It will be automaticaly promoted to 'https' when the 'secure' field is set to 'true'.\n- 'https': Endpoint will have 'https' traffic, typically on a TCP connection.\n- 'ws': Endpoint will have 'ws' traffic, typically on a TCP connection. It will be automaticaly promoted to 'wss' when the 'secure' field is set to 'true'.\n- 'wss': Endpoint will have 'wss' traffic, typically on a TCP connection.\n- 'tcp': Endpoint will have traffic on a TCP connection, without specifying an application protocol.\n- 'udp': Endpoint will have traffic on an UDP connection, without specifying an application protocol.\n\nDefault value is 'http'", - "type": "string", - "enum": [ - "http", - "https", - "ws", - "wss", - "tcp", - "udp" - ] - }, - "secure": { - "description": "Describes whether the endpoint should be secured and protected by some authentication process. This requires a protocol of 'https' or 'wss'.", - "type": "boolean" - }, - "targetPort": { - "description": "Port number to be used within the container component. The same port cannot be used by two different container components.", - "type": "integer" - } - }, - "additionalProperties": false - } - }, - "env": { - "description": "Environment variables used in this container.\n\nThe following variables are reserved and cannot be overridden via env:\n\n - '$PROJECTS_ROOT'\n\n - '$PROJECT_SOURCE'", - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "name": { - "type": "string" - }, - "value": { - "type": "string" - } - }, - "additionalProperties": false - } - }, - "image": { - "type": "string" - }, - "memoryLimit": { - "type": "string" - }, - "memoryRequest": { - "type": "string" - }, - "mountSources": { - "description": "Toggles whether or not the project source code should be mounted in the component.\n\nDefaults to true for all component types except plugins and components that set 'dedicatedPod' to true.", - "type": "boolean" - }, - "sourceMapping": { - "description": "Optional specification of the path in the container where project sources should be transferred/mounted when 'mountSources' is 'true'. When omitted, the default value of /projects is used.", - "type": "string" - }, - "volumeMounts": { - "description": "List of volumes mounts that should be mounted is this container.", - "type": "array", - "items": { - "description": "Volume that should be mounted to a component container", - "type": "object", - "required": [ - "name" - ], - "properties": { - "name": { - "description": "The volume mount name is the name of an existing 'Volume' component. If several containers mount the same volume name then they will reuse the same volume and will be able to access to the same files.", - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "path": { - "description": "The path in the component container where the volume should be mounted. If not path is mentioned, default path is the is '/\u003cname\u003e'.", - "type": "string" - } - }, - "additionalProperties": false - } - } - }, - "additionalProperties": false - }, - "image": { - "description": "Allows specifying the definition of an image for outer loop builds", - "type": "object", - "oneOf": [ - { - "required": [ - "dockerfile" - ] - }, - { - "required": [ - "autoBuild" - ] - } - ], - "properties": { - "autoBuild": { - "description": "Defines if the image should be built during startup.\n\nDefault value is 'false'", - "type": "boolean" - }, - "dockerfile": { - "description": "Allows specifying dockerfile type build", - "type": "object", - "oneOf": [ - { - "required": [ - "uri" - ] - }, - { - "required": [ - "devfileRegistry" - ] - }, - { - "required": [ - "git" - ] - } - ], - "properties": { - "args": { - "description": "The arguments to supply to the dockerfile build.", - "type": "array", - "items": { - "type": "string" - } - }, - "buildContext": { - "description": "Path of source directory to establish build context. Defaults to ${PROJECT_SOURCE} in the container", - "type": "string" - }, - "devfileRegistry": { - "description": "Dockerfile's Devfile Registry source", - "type": "object", - "properties": { - "id": { - "description": "Id in a devfile registry that contains a Dockerfile. The src in the OCI registry required for the Dockerfile build will be downloaded for building the image.", - "type": "string" - }, - "registryUrl": { - "description": "Devfile Registry URL to pull the Dockerfile from when using the Devfile Registry as Dockerfile src. To ensure the Dockerfile gets resolved consistently in different environments, it is recommended to always specify the 'devfileRegistryUrl' when 'Id' is used.", - "type": "string" - } - }, - "additionalProperties": false - }, - "git": { - "description": "Dockerfile's Git source", - "type": "object", - "properties": { - "checkoutFrom": { - "description": "Defines from what the project should be checked out. Required if there are more than one remote configured", - "type": "object", - "properties": { - "remote": { - "description": "The remote name should be used as init. Required if there are more than one remote configured", - "type": "string" - }, - "revision": { - "description": "The revision to checkout from. Should be branch name, tag or commit id. Default branch is used if missing or specified revision is not found.", - "type": "string" - } - }, - "additionalProperties": false - }, - "fileLocation": { - "description": "Location of the Dockerfile in the Git repository when using git as Dockerfile src. Defaults to Dockerfile.", - "type": "string" - }, - "remotes": { - "description": "The remotes map which should be initialized in the git project. Projects must have at least one remote configured while StarterProjects \u0026 Image Component's Git source can only have at most one remote configured.", - "type": "object", - "additionalProperties": { - "type": "string" - } - } - }, - "additionalProperties": false - }, - "rootRequired": { - "description": "Specify if a privileged builder pod is required.\n\nDefault value is 'false'", - "type": "boolean" - }, - "uri": { - "description": "URI Reference of a Dockerfile. It can be a full URL or a relative URI from the current devfile as the base URI.", - "type": "string" - } - }, - "additionalProperties": false - }, - "imageName": { - "description": "Name of the image for the resulting outerloop build", - "type": "string" - } - }, - "additionalProperties": false - }, - "kubernetes": { - "description": "Allows importing into the devworkspace the Kubernetes resources defined in a given manifest. For example this allows reusing the Kubernetes definitions used to deploy some runtime components in production.", - "type": "object", - "oneOf": [ - { - "required": [ - "uri" - ] - }, - { - "required": [ - "inlined" - ] - } - ], - "properties": { - "deployByDefault": { - "description": "Defines if the component should be deployed during startup.\n\nDefault value is 'false'", - "type": "boolean" - }, - "endpoints": { - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "annotation": { - "description": "Annotations to be added to Kubernetes Ingress or Openshift Route", - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "attributes": { - "description": "Map of implementation-dependant string-based free-form attributes.\n\nExamples of Che-specific attributes:\n- cookiesAuthEnabled: \"true\" / \"false\",\n- type: \"terminal\" / \"ide\" / \"ide-dev\",", - "type": "object", - "additionalProperties": true - }, - "exposure": { - "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main devworkspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main devworkspace POD, on a local address.\n\nDefault value is 'public'", - "type": "string", - "enum": [ - "public", - "internal", - "none" - ] - }, - "name": { - "type": "string", - "maxLength": 15, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "path": { - "description": "Path of the endpoint URL", - "type": "string" - }, - "protocol": { - "description": "Describes the application and transport protocols of the traffic that will go through this endpoint.\n- 'http': Endpoint will have 'http' traffic, typically on a TCP connection. It will be automaticaly promoted to 'https' when the 'secure' field is set to 'true'.\n- 'https': Endpoint will have 'https' traffic, typically on a TCP connection.\n- 'ws': Endpoint will have 'ws' traffic, typically on a TCP connection. It will be automaticaly promoted to 'wss' when the 'secure' field is set to 'true'.\n- 'wss': Endpoint will have 'wss' traffic, typically on a TCP connection.\n- 'tcp': Endpoint will have traffic on a TCP connection, without specifying an application protocol.\n- 'udp': Endpoint will have traffic on an UDP connection, without specifying an application protocol.\n\nDefault value is 'http'", - "type": "string", - "enum": [ - "http", - "https", - "ws", - "wss", - "tcp", - "udp" - ] - }, - "secure": { - "description": "Describes whether the endpoint should be secured and protected by some authentication process. This requires a protocol of 'https' or 'wss'.", - "type": "boolean" - }, - "targetPort": { - "description": "Port number to be used within the container component. The same port cannot be used by two different container components.", - "type": "integer" - } - }, - "additionalProperties": false - } - }, - "inlined": { - "description": "Inlined manifest", - "type": "string" - }, - "uri": { - "description": "Location in a file fetched from a uri.", - "type": "string" - } - }, - "additionalProperties": false - }, - "name": { - "description": "Mandatory name that allows referencing the component from other elements (such as commands) or from an external devfile that may reference this component through a parent or a plugin.", - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "openshift": { - "description": "Allows importing into the devworkspace the OpenShift resources defined in a given manifest. For example this allows reusing the OpenShift definitions used to deploy some runtime components in production.", - "type": "object", - "oneOf": [ - { - "required": [ - "uri" - ] - }, - { - "required": [ - "inlined" - ] - } - ], - "properties": { - "deployByDefault": { - "description": "Defines if the component should be deployed during startup.\n\nDefault value is 'false'", - "type": "boolean" - }, - "endpoints": { - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "properties": { - "annotation": { - "description": "Annotations to be added to Kubernetes Ingress or Openshift Route", - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "attributes": { - "description": "Map of implementation-dependant string-based free-form attributes.\n\nExamples of Che-specific attributes:\n- cookiesAuthEnabled: \"true\" / \"false\",\n- type: \"terminal\" / \"ide\" / \"ide-dev\",", - "type": "object", - "additionalProperties": true - }, - "exposure": { - "description": "Describes how the endpoint should be exposed on the network.\n- 'public' means that the endpoint will be exposed on the public network, typically through a K8S ingress or an OpenShift route.\n- 'internal' means that the endpoint will be exposed internally outside of the main devworkspace POD, typically by K8S services, to be consumed by other elements running on the same cloud internal network.\n- 'none' means that the endpoint will not be exposed and will only be accessible inside the main devworkspace POD, on a local address.\n\nDefault value is 'public'", - "type": "string", - "enum": [ - "public", - "internal", - "none" - ] - }, - "name": { - "type": "string", - "maxLength": 15, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "path": { - "description": "Path of the endpoint URL", - "type": "string" - }, - "protocol": { - "description": "Describes the application and transport protocols of the traffic that will go through this endpoint.\n- 'http': Endpoint will have 'http' traffic, typically on a TCP connection. It will be automaticaly promoted to 'https' when the 'secure' field is set to 'true'.\n- 'https': Endpoint will have 'https' traffic, typically on a TCP connection.\n- 'ws': Endpoint will have 'ws' traffic, typically on a TCP connection. It will be automaticaly promoted to 'wss' when the 'secure' field is set to 'true'.\n- 'wss': Endpoint will have 'wss' traffic, typically on a TCP connection.\n- 'tcp': Endpoint will have traffic on a TCP connection, without specifying an application protocol.\n- 'udp': Endpoint will have traffic on an UDP connection, without specifying an application protocol.\n\nDefault value is 'http'", - "type": "string", - "enum": [ - "http", - "https", - "ws", - "wss", - "tcp", - "udp" - ] - }, - "secure": { - "description": "Describes whether the endpoint should be secured and protected by some authentication process. This requires a protocol of 'https' or 'wss'.", - "type": "boolean" - }, - "targetPort": { - "description": "Port number to be used within the container component. The same port cannot be used by two different container components.", - "type": "integer" - } - }, - "additionalProperties": false - } - }, - "inlined": { - "description": "Inlined manifest", - "type": "string" - }, - "uri": { - "description": "Location in a file fetched from a uri.", - "type": "string" - } - }, - "additionalProperties": false - }, - "volume": { - "description": "Allows specifying the definition of a volume shared by several other components", - "type": "object", - "properties": { - "ephemeral": { - "description": "Ephemeral volumes are not stored persistently across restarts. Defaults to false", - "type": "boolean" - }, - "size": { - "description": "Size of the volume", - "type": "string" - } - }, - "additionalProperties": false - } - }, - "additionalProperties": false - } - }, - "id": { - "description": "Id in a registry that contains a Devfile yaml file", - "type": "string" - }, - "kubernetes": { - "description": "Reference to a Kubernetes CRD of type DevWorkspaceTemplate", - "type": "object", - "required": [ - "name" - ], - "properties": { - "name": { - "type": "string" - }, - "namespace": { - "type": "string" - } - }, - "additionalProperties": false - }, - "projects": { - "description": "Overrides of projects encapsulated in a parent devfile. Overriding is done according to K8S strategic merge patch standard rules.", - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "oneOf": [ - { - "required": [ - "git" - ] - }, - { - "required": [ - "zip" - ] - } - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant free-form YAML attributes.", - "type": "object", - "additionalProperties": true - }, - "clonePath": { - "description": "Path relative to the root of the projects to which this project should be cloned into. This is a unix-style relative path (i.e. uses forward slashes). The path is invalid if it is absolute or tries to escape the project root through the usage of '..'. If not specified, defaults to the project name.", - "type": "string" - }, - "git": { - "description": "Project's Git source", - "type": "object", - "properties": { - "checkoutFrom": { - "description": "Defines from what the project should be checked out. Required if there are more than one remote configured", - "type": "object", - "properties": { - "remote": { - "description": "The remote name should be used as init. Required if there are more than one remote configured", - "type": "string" - }, - "revision": { - "description": "The revision to checkout from. Should be branch name, tag or commit id. Default branch is used if missing or specified revision is not found.", - "type": "string" - } - }, - "additionalProperties": false - }, - "remotes": { - "description": "The remotes map which should be initialized in the git project. Projects must have at least one remote configured while StarterProjects \u0026 Image Component's Git source can only have at most one remote configured.", - "type": "object", - "additionalProperties": { - "type": "string" - } - } - }, - "additionalProperties": false - }, - "name": { - "description": "Project name", - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "zip": { - "description": "Project's Zip source", - "type": "object", - "properties": { - "location": { - "description": "Zip project's source location address. Should be file path of the archive, e.g. file://$FILE_PATH", - "type": "string" - } - }, - "additionalProperties": false - } - }, - "additionalProperties": false - } - }, - "registryUrl": { - "description": "Registry URL to pull the parent devfile from when using id in the parent reference. To ensure the parent devfile gets resolved consistently in different environments, it is recommended to always specify the 'registryUrl' when 'id' is used.", - "type": "string" - }, - "starterProjects": { - "description": "Overrides of starterProjects encapsulated in a parent devfile. Overriding is done according to K8S strategic merge patch standard rules.", - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "oneOf": [ - { - "required": [ - "git" - ] - }, - { - "required": [ - "zip" - ] - } - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant free-form YAML attributes.", - "type": "object", - "additionalProperties": true - }, - "description": { - "description": "Description of a starter project", - "type": "string" - }, - "git": { - "description": "Project's Git source", - "type": "object", - "properties": { - "checkoutFrom": { - "description": "Defines from what the project should be checked out. Required if there are more than one remote configured", - "type": "object", - "properties": { - "remote": { - "description": "The remote name should be used as init. Required if there are more than one remote configured", - "type": "string" - }, - "revision": { - "description": "The revision to checkout from. Should be branch name, tag or commit id. Default branch is used if missing or specified revision is not found.", - "type": "string" - } - }, - "additionalProperties": false - }, - "remotes": { - "description": "The remotes map which should be initialized in the git project. Projects must have at least one remote configured while StarterProjects \u0026 Image Component's Git source can only have at most one remote configured.", - "type": "object", - "additionalProperties": { - "type": "string" - } - } - }, - "additionalProperties": false - }, - "name": { - "description": "Project name", - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "subDir": { - "description": "Sub-directory from a starter project to be used as root for starter project.", - "type": "string" - }, - "zip": { - "description": "Project's Zip source", - "type": "object", - "properties": { - "location": { - "description": "Zip project's source location address. Should be file path of the archive, e.g. file://$FILE_PATH", - "type": "string" - } - }, - "additionalProperties": false - } - }, - "additionalProperties": false - } - }, - "uri": { - "description": "URI Reference of a parent devfile YAML file. It can be a full URL or a relative URI with the current devfile as the base URI.", - "type": "string" - }, - "variables": { - "description": "Overrides of variables encapsulated in a parent devfile. Overriding is done according to K8S strategic merge patch standard rules.", - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "version": { - "description": "Specific stack/sample version to pull the parent devfile from, when using id in the parent reference. To specify 'version', 'id' must be defined and used as the import reference source. 'version' can be either a specific stack version, or 'latest'. If no 'version' specified, default version will be used.", - "type": "string", - "pattern": "^(latest)|(([1-9])\\.([0-9]+)\\.([0-9]+)(\\-[0-9a-z-]+(\\.[0-9a-z-]+)*)?(\\+[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?)$" - } - }, - "additionalProperties": false - }, - "projects": { - "description": "Projects worked on in the devworkspace, containing names and sources locations", - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "oneOf": [ - { - "required": [ - "git" - ] - }, - { - "required": [ - "zip" - ] - } - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant free-form YAML attributes.", - "type": "object", - "additionalProperties": true - }, - "clonePath": { - "description": "Path relative to the root of the projects to which this project should be cloned into. This is a unix-style relative path (i.e. uses forward slashes). The path is invalid if it is absolute or tries to escape the project root through the usage of '..'. If not specified, defaults to the project name.", - "type": "string" - }, - "git": { - "description": "Project's Git source", - "type": "object", - "required": [ - "remotes" - ], - "properties": { - "checkoutFrom": { - "description": "Defines from what the project should be checked out. Required if there are more than one remote configured", - "type": "object", - "properties": { - "remote": { - "description": "The remote name should be used as init. Required if there are more than one remote configured", - "type": "string" - }, - "revision": { - "description": "The revision to checkout from. Should be branch name, tag or commit id. Default branch is used if missing or specified revision is not found.", - "type": "string" - } - }, - "additionalProperties": false - }, - "remotes": { - "description": "The remotes map which should be initialized in the git project. Projects must have at least one remote configured while StarterProjects \u0026 Image Component's Git source can only have at most one remote configured.", - "type": "object", - "additionalProperties": { - "type": "string" - } - } - }, - "additionalProperties": false - }, - "name": { - "description": "Project name", - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "zip": { - "description": "Project's Zip source", - "type": "object", - "properties": { - "location": { - "description": "Zip project's source location address. Should be file path of the archive, e.g. file://$FILE_PATH", - "type": "string" - } - }, - "additionalProperties": false - } - }, - "additionalProperties": false - } - }, - "schemaVersion": { - "description": "Devfile schema version", - "type": "string", - "pattern": "^([2-9])\\.([0-9]+)\\.([0-9]+)(\\-[0-9a-z-]+(\\.[0-9a-z-]+)*)?(\\+[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?$" - }, - "starterProjects": { - "description": "StarterProjects is a project that can be used as a starting point when bootstrapping new projects", - "type": "array", - "items": { - "type": "object", - "required": [ - "name" - ], - "oneOf": [ - { - "required": [ - "git" - ] - }, - { - "required": [ - "zip" - ] - } - ], - "properties": { - "attributes": { - "description": "Map of implementation-dependant free-form YAML attributes.", - "type": "object", - "additionalProperties": true - }, - "description": { - "description": "Description of a starter project", - "type": "string" - }, - "git": { - "description": "Project's Git source", - "type": "object", - "required": [ - "remotes" - ], - "properties": { - "checkoutFrom": { - "description": "Defines from what the project should be checked out. Required if there are more than one remote configured", - "type": "object", - "properties": { - "remote": { - "description": "The remote name should be used as init. Required if there are more than one remote configured", - "type": "string" - }, - "revision": { - "description": "The revision to checkout from. Should be branch name, tag or commit id. Default branch is used if missing or specified revision is not found.", - "type": "string" - } - }, - "additionalProperties": false - }, - "remotes": { - "description": "The remotes map which should be initialized in the git project. Projects must have at least one remote configured while StarterProjects \u0026 Image Component's Git source can only have at most one remote configured.", - "type": "object", - "additionalProperties": { - "type": "string" - } - } - }, - "additionalProperties": false - }, - "name": { - "description": "Project name", - "type": "string", - "maxLength": 63, - "pattern": "^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" - }, - "subDir": { - "description": "Sub-directory from a starter project to be used as root for starter project.", - "type": "string" - }, - "zip": { - "description": "Project's Zip source", - "type": "object", - "properties": { - "location": { - "description": "Zip project's source location address. Should be file path of the archive, e.g. file://$FILE_PATH", - "type": "string" - } - }, - "additionalProperties": false - } - }, - "additionalProperties": false - } - }, - "variables": { - "description": "Map of key-value variables used for string replacement in the devfile. Values can be referenced via {{variable-key}} to replace the corresponding value in string fields in the devfile. Replacement cannot be used for\n\n - schemaVersion, metadata, parent source\n\n - element identifiers, e.g. command id, component name, endpoint name, project name\n\n - references to identifiers, e.g. in events, a command's component, container's volume mount name\n\n - string enums, e.g. command group kind, endpoint exposure", - "type": "object", - "additionalProperties": { - "type": "string" - } - } - }, - "additionalProperties": false -} -` diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/attributes.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/attributes.go deleted file mode 100644 index 6d7ba88a626..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/attributes.go +++ /dev/null @@ -1,67 +0,0 @@ -// -// Copyright 2022 Red Hat, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2 - -import ( - "fmt" - - "github.com/devfile/api/v2/pkg/attributes" -) - -// GetAttributes gets the devfile top level attributes -func (d *DevfileV2) GetAttributes() (attributes.Attributes, error) { - // This feature was introduced in 2.1.0; so any version 2.1.0 and up should use the 2.1.0 implementation - switch d.SchemaVersion { - case "2.0.0": - return attributes.Attributes{}, fmt.Errorf("top-level attributes is not supported in devfile schema version 2.0.0") - default: - return d.Attributes, nil - } -} - -// UpdateAttributes updates the devfile top level attribute for the specific key, err out if key is absent -func (d *DevfileV2) UpdateAttributes(key string, value interface{}) error { - var err error - - // This feature was introduced in 2.1.0; so any version 2.1.0 and up should use the 2.1.0 implementation - switch d.SchemaVersion { - case "2.0.0": - return fmt.Errorf("top-level attributes is not supported in devfile schema version 2.0.0") - default: - if d.Attributes.Exists(key) { - d.Attributes.Put(key, value, &err) - } else { - return fmt.Errorf("cannot update top-level attribute, key %s is not present", key) - } - } - - return err -} - -// AddAttributes adds to the devfile top level attributes, value will be overwritten if key is already present -func (d *DevfileV2) AddAttributes(key string, value interface{}) error { - var err error - - // This feature was introduced in 2.1.0; so any version 2.1.0 and up should use the 2.1.0 implementation - switch d.SchemaVersion { - case "2.0.0": - return fmt.Errorf("top-level attributes is not supported in devfile schema version 2.0.0") - default: - d.Attributes.Put(key, value, &err) - } - - return err -} diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/commands.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/commands.go deleted file mode 100644 index 02e4b57a5e1..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/commands.go +++ /dev/null @@ -1,119 +0,0 @@ -// -// Copyright 2022 Red Hat, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2 - -import ( - "fmt" - v1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" - "github.com/devfile/library/v2/pkg/devfile/parser/data/v2/common" - "reflect" - "strings" -) - -// GetCommands returns the slice of Command objects parsed from the Devfile -func (d *DevfileV2) GetCommands(options common.DevfileOptions) ([]v1.Command, error) { - - if reflect.DeepEqual(options, common.DevfileOptions{}) { - return d.Commands, nil - } - - var commands []v1.Command - for _, command := range d.Commands { - // Filter Command Attributes - filterIn, err := common.FilterDevfileObject(command.Attributes, options) - if err != nil { - return nil, err - } else if !filterIn { - continue - } - - // Filter Command Type - Exec, Composite, etc. - commandType, err := common.GetCommandType(command) - if err != nil { - return nil, err - } - if options.CommandOptions.CommandType != "" && commandType != options.CommandOptions.CommandType { - continue - } - - // Filter Command Group Kind - Run, Build, etc. - commandGroup := common.GetGroup(command) - // exclude conditions: - // 1. options group is present and command group is present but does not match - // 2. options group is present and command group is not present - if options.CommandOptions.CommandGroupKind != "" && ((commandGroup != nil && options.CommandOptions.CommandGroupKind != commandGroup.Kind) || commandGroup == nil) { - continue - } - - if options.FilterByName == "" || command.Id == options.FilterByName { - commands = append(commands, command) - } - } - - return commands, nil -} - -// AddCommands adds the slice of Command objects to the Devfile's commands -// a command is considered as invalid if it is already defined -// command list passed in will be all processed, and returns a total error of all invalid commands -func (d *DevfileV2) AddCommands(commands []v1.Command) error { - var errorsList []string - for _, command := range commands { - var err error - for _, devfileCommand := range d.Commands { - if command.Id == devfileCommand.Id { - err = &common.FieldAlreadyExistError{Name: command.Id, Field: "command"} - errorsList = append(errorsList, err.Error()) - break - } - } - if err == nil { - d.Commands = append(d.Commands, command) - } - } - if len(errorsList) > 0 { - return fmt.Errorf("errors while adding commands:\n%s", strings.Join(errorsList, "\n")) - } - return nil -} - -// UpdateCommand updates the command with the given id -// return an error if the command is not found -func (d *DevfileV2) UpdateCommand(command v1.Command) error { - for i := range d.Commands { - if d.Commands[i].Id == command.Id { - d.Commands[i] = command - return nil - } - } - return fmt.Errorf("update command failed: command %s not found", command.Id) -} - -// DeleteCommand removes the specified command -func (d *DevfileV2) DeleteCommand(id string) error { - - for i := range d.Commands { - if d.Commands[i].Id == id { - d.Commands = append(d.Commands[:i], d.Commands[i+1:]...) - return nil - } - } - - return &common.FieldNotFoundError{ - Field: "command", - Name: id, - } -} diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/common/command_helper.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/common/command_helper.go deleted file mode 100644 index f4a5cabf311..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/common/command_helper.go +++ /dev/null @@ -1,120 +0,0 @@ -// -// Copyright 2022 Red Hat, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package common - -import ( - "fmt" - - v1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" -) - -// GetGroup returns the group the command belongs to -func GetGroup(dc v1.Command) *v1.CommandGroup { - switch { - case dc.Composite != nil: - return dc.Composite.Group - case dc.Exec != nil: - return dc.Exec.Group - case dc.Apply != nil: - return dc.Apply.Group - case dc.Custom != nil: - return dc.Custom.Group - - default: - return nil - } -} - -// GetExecComponent returns the component of the exec command -func GetExecComponent(dc v1.Command) string { - if dc.Exec != nil { - return dc.Exec.Component - } - - return "" -} - -// GetExecCommandLine returns the command line of the exec command -func GetExecCommandLine(dc v1.Command) string { - if dc.Exec != nil { - return dc.Exec.CommandLine - } - - return "" -} - -// GetExecWorkingDir returns the working dir of the exec command -func GetExecWorkingDir(dc v1.Command) string { - if dc.Exec != nil { - return dc.Exec.WorkingDir - } - - return "" -} - -// GetApplyComponent returns the component of the apply command -func GetApplyComponent(dc v1.Command) string { - if dc.Apply != nil { - return dc.Apply.Component - } - - return "" -} - -// GetCommandType returns the command type of a given command -func GetCommandType(command v1.Command) (v1.CommandType, error) { - switch { - case command.Apply != nil: - return v1.ApplyCommandType, nil - case command.Composite != nil: - return v1.CompositeCommandType, nil - case command.Exec != nil: - return v1.ExecCommandType, nil - case command.Custom != nil: - return v1.CustomCommandType, nil - - default: - return "", fmt.Errorf("unknown command type") - } -} - -// GetCommandsMap returns a map of the command Id to the command -func GetCommandsMap(commands []v1.Command) map[string]v1.Command { - commandMap := make(map[string]v1.Command, len(commands)) - for _, command := range commands { - commandMap[command.Id] = command - } - return commandMap -} - -// GetCommandsFromEvent returns the list of commands from the event name. -// If the event is a composite command, it returns the sub-commands from the tree -func GetCommandsFromEvent(commandsMap map[string]v1.Command, eventName string) []string { - var commands []string - - if command, ok := commandsMap[eventName]; ok { - if command.Composite != nil { - for _, compositeSubCmd := range command.Composite.Commands { - subCommands := GetCommandsFromEvent(commandsMap, compositeSubCmd) - commands = append(commands, subCommands...) - } - } else { - commands = append(commands, command.Id) - } - } - - return commands -} diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/common/component_helper.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/common/component_helper.go deleted file mode 100644 index fdb34371602..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/common/component_helper.go +++ /dev/null @@ -1,55 +0,0 @@ -// -// Copyright 2022 Red Hat, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package common - -import ( - "fmt" - - v1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" -) - -// IsContainer checks if the component is a container -func IsContainer(component v1.Component) bool { - return component.Container != nil -} - -// IsVolume checks if the component is a volume -func IsVolume(component v1.Component) bool { - return component.Volume != nil -} - -// GetComponentType returns the component type of a given component -func GetComponentType(component v1.Component) (v1.ComponentType, error) { - switch { - case component.Container != nil: - return v1.ContainerComponentType, nil - case component.Volume != nil: - return v1.VolumeComponentType, nil - case component.Plugin != nil: - return v1.PluginComponentType, nil - case component.Kubernetes != nil: - return v1.KubernetesComponentType, nil - case component.Openshift != nil: - return v1.OpenshiftComponentType, nil - case component.Image != nil: - return v1.ImageComponentType, nil - case component.Custom != nil: - return v1.CustomComponentType, nil - - default: - return "", fmt.Errorf("unknown component type") - } -} diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/common/errors.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/common/errors.go deleted file mode 100644 index fd192d3d5d9..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/common/errors.go +++ /dev/null @@ -1,42 +0,0 @@ -// -// Copyright 2022 Red Hat, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package common - -import "fmt" - -// FieldAlreadyExistError error returned if tried to add already exisitng field -type FieldAlreadyExistError struct { - // field which already exist - Field string - // field name - Name string -} - -func (e *FieldAlreadyExistError) Error() string { - return fmt.Sprintf("%s %s already exists in devfile", e.Field, e.Name) -} - -// FieldNotFoundError error returned if the field with the name is not found -type FieldNotFoundError struct { - // field which doesn't exist - Field string - // field name - Name string -} - -func (e *FieldNotFoundError) Error() string { - return fmt.Sprintf("%s %s is not found in the devfile", e.Field, e.Name) -} diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/common/options.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/common/options.go deleted file mode 100644 index 4817788e303..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/common/options.go +++ /dev/null @@ -1,84 +0,0 @@ -// -// Copyright 2022 Red Hat, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package common - -import ( - "reflect" - - v1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" - apiAttributes "github.com/devfile/api/v2/pkg/attributes" -) - -// DevfileOptions provides options for Devfile operations -type DevfileOptions struct { - // Filter is a map that lets filter devfile object against their attributes. Interface can be string, float, boolean or a map - Filter map[string]interface{} - - // CommandOptions specifies the various options available to filter commands - CommandOptions CommandOptions - - // ComponentOptions specifies the various options available to filter components - ComponentOptions ComponentOptions - - // ProjectOptions specifies the various options available to filter projects/starterProjects - ProjectOptions ProjectOptions - - // FilterByName specifies the name for the particular devfile object that's been looking for - FilterByName string -} - -// CommandOptions specifies the various options available to filter commands -type CommandOptions struct { - // CommandGroupKind is an option that allows to filter command based on their kind - CommandGroupKind v1.CommandGroupKind - - // CommandType is an option that allows to filter command based on their type - CommandType v1.CommandType -} - -// ComponentOptions specifies the various options available to filter components -type ComponentOptions struct { - - // ComponentType is an option that allows to filter component based on their type - ComponentType v1.ComponentType -} - -// ProjectOptions specifies the various options available to filter projects/starterProjects -type ProjectOptions struct { - - // ProjectSourceType is an option that allows to filter project based on their source type - ProjectSourceType v1.ProjectSourceType -} - -// FilterDevfileObject filters devfile attributes with the given options -func FilterDevfileObject(attributes apiAttributes.Attributes, options DevfileOptions) (bool, error) { - filterIn := true - for key, value := range options.Filter { - var err error - currentFilterIn := false - attrValue := attributes.Get(key, &err) - var keyNotFoundErr = &apiAttributes.KeyNotFoundError{Key: key} - if err != nil && err.Error() != keyNotFoundErr.Error() { - return false, err - } else if reflect.DeepEqual(attrValue, value) { - currentFilterIn = true - } - - filterIn = filterIn && currentFilterIn - } - - return filterIn, nil -} diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/common/project_helper.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/common/project_helper.go deleted file mode 100644 index 13291a878f4..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/common/project_helper.go +++ /dev/null @@ -1,73 +0,0 @@ -// -// Copyright 2022 Red Hat, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package common - -import ( - "fmt" - - v1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" -) - -// GetDefaultSource get information about primary source -// returns 3 strings: remote name, remote URL, reference(revision) -func GetDefaultSource(ps v1.GitLikeProjectSource) (remoteName string, remoteURL string, revision string, err error) { - // get git checkout information - // if there are multiple remotes we are ignoring them, as we don't need to setup git repository as it is defined here, - // the only thing that we need is to download the content - - if ps.CheckoutFrom != nil && ps.CheckoutFrom.Revision != "" { - revision = ps.CheckoutFrom.Revision - } - if len(ps.Remotes) > 1 { - if ps.CheckoutFrom == nil { - err = fmt.Errorf("there are multiple git remotes but no checkoutFrom information") - return "", "", "", err - } - remoteName = ps.CheckoutFrom.Remote - if val, ok := ps.Remotes[remoteName]; ok { - remoteURL = val - } else { - err = fmt.Errorf("checkoutFrom.Remote is not defined in Remotes") - return "", "", "", err - - } - } else { - // there is only one remote, using range to get it as there are not indexes - for name, url := range ps.Remotes { - remoteName = name - remoteURL = url - } - - } - - return remoteName, remoteURL, revision, err - -} - -// GetProjectSourceType returns the source type of a given project source -func GetProjectSourceType(projectSrc v1.ProjectSource) (v1.ProjectSourceType, error) { - switch { - case projectSrc.Git != nil: - return v1.GitProjectSourceType, nil - case projectSrc.Zip != nil: - return v1.ZipProjectSourceType, nil - case projectSrc.Custom != nil: - return v1.CustomProjectSourceType, nil - - default: - return "", fmt.Errorf("unknown project source type") - } -} diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/components.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/components.go deleted file mode 100644 index 1ea32835ee2..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/components.go +++ /dev/null @@ -1,143 +0,0 @@ -// -// Copyright 2022 Red Hat, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2 - -import ( - "fmt" - "reflect" - "strings" - - v1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" - "github.com/devfile/library/v2/pkg/devfile/parser/data/v2/common" -) - -// GetComponents returns the slice of Component objects parsed from the Devfile -func (d *DevfileV2) GetComponents(options common.DevfileOptions) ([]v1.Component, error) { - - if reflect.DeepEqual(options, common.DevfileOptions{}) { - return d.Components, nil - } - - var components []v1.Component - for _, component := range d.Components { - // Filter Component Attributes - filterIn, err := common.FilterDevfileObject(component.Attributes, options) - if err != nil { - return nil, err - } else if !filterIn { - continue - } - - // Filter Component Type - Container, Volume, etc. - componentType, err := common.GetComponentType(component) - if err != nil { - return nil, err - } - if options.ComponentOptions.ComponentType != "" && componentType != options.ComponentOptions.ComponentType { - continue - } - - if options.FilterByName == "" || component.Name == options.FilterByName { - components = append(components, component) - } - } - - return components, nil -} - -// GetDevfileContainerComponents iterates through the components in the devfile and returns a list of devfile container components. -// Deprecated, use GetComponents() with the DevfileOptions. -func (d *DevfileV2) GetDevfileContainerComponents(options common.DevfileOptions) ([]v1.Component, error) { - var components []v1.Component - devfileComponents, err := d.GetComponents(options) - if err != nil { - return nil, err - } - for _, comp := range devfileComponents { - if comp.Container != nil { - components = append(components, comp) - } - } - return components, nil -} - -// GetDevfileVolumeComponents iterates through the components in the devfile and returns a list of devfile volume components. -// Deprecated, use GetComponents() with the DevfileOptions. -func (d *DevfileV2) GetDevfileVolumeComponents(options common.DevfileOptions) ([]v1.Component, error) { - var components []v1.Component - devfileComponents, err := d.GetComponents(options) - if err != nil { - return nil, err - } - for _, comp := range devfileComponents { - if comp.Volume != nil { - components = append(components, comp) - } - } - return components, nil -} - -// AddComponents adds the slice of Component objects to the devfile's components -// a component is considered as invalid if it is already defined -// component list passed in will be all processed, and returns a total error of all invalid components -func (d *DevfileV2) AddComponents(components []v1.Component) error { - var errorsList []string - for _, component := range components { - var err error - for _, devfileComponent := range d.Components { - if component.Name == devfileComponent.Name { - err = &common.FieldAlreadyExistError{Name: component.Name, Field: "component"} - errorsList = append(errorsList, err.Error()) - break - } - } - if err == nil { - d.Components = append(d.Components, component) - } - } - if len(errorsList) > 0 { - return fmt.Errorf("errors while adding components:\n%s", strings.Join(errorsList, "\n")) - } - return nil -} - -// UpdateComponent updates the component with the given name -// return an error if the component is not found -func (d *DevfileV2) UpdateComponent(component v1.Component) error { - for i := range d.Components { - if d.Components[i].Name == component.Name { - d.Components[i] = component - return nil - } - } - return fmt.Errorf("update component failed: component %s not found", component.Name) -} - -// DeleteComponent removes the specified component -func (d *DevfileV2) DeleteComponent(name string) error { - - for i := range d.Components { - if d.Components[i].Name == name { - d.Components = append(d.Components[:i], d.Components[i+1:]...) - return nil - } - } - - return &common.FieldNotFoundError{ - Field: "component", - Name: name, - } -} diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/containers.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/containers.go deleted file mode 100644 index c62ea195cfd..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/containers.go +++ /dev/null @@ -1,294 +0,0 @@ -// -// Copyright 2022 Red Hat, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2 - -import ( - "fmt" - "strconv" - "strings" - - v1alpha2 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" - "github.com/devfile/library/v2/pkg/devfile/parser/data/v2/common" - corev1 "k8s.io/api/core/v1" -) - -// AddEnvVars accepts a map of container name mapped to an array of the env vars to be set; -// it adds the envirnoment variables to a given container name of the DevfileV2 object -// Example of containerEnvMap : {"runtime": {{Name: "Foo", Value: "Bar"}}} -func (d *DevfileV2) AddEnvVars(containerEnvMap map[string][]v1alpha2.EnvVar) error { - components, err := d.GetComponents(common.DevfileOptions{}) - if err != nil { - return err - } - for _, component := range components { - if component.Container != nil { - component.Container.Env = merge(component.Container.Env, containerEnvMap[component.Name]) - _ = d.UpdateComponent(component) - } - } - return nil -} - -// RemoveEnvVars accepts a map of container name mapped to an array of environment variables to be removed; -// it removes the env vars from the specified container name of the DevfileV2 object -func (d *DevfileV2) RemoveEnvVars(containerEnvMap map[string][]string) error { - components, err := d.GetComponents(common.DevfileOptions{}) - if err != nil { - return err - } - for _, component := range components { - if component.Container != nil { - component.Container.Env, err = removeEnvVarsFromList(component.Container.Env, containerEnvMap[component.Name]) - if err != nil { - return err - } - _ = d.UpdateComponent(component) - } - } - return nil -} - -// SetPorts accepts a map of container name mapped to an array of port numbers to be set; -// it converts ports to endpoints, sets the endpoint to a given container name of the DevfileV2 object -// Example of containerPortsMap: {"runtime": {"8080", "9000"}, "wildfly": {"12956"}} -func (d *DevfileV2) SetPorts(containerPortsMap map[string][]string) error { - components, err := d.GetComponents(common.DevfileOptions{}) - if err != nil { - return err - } - for _, component := range components { - endpoints, err := portsToEndpoints(containerPortsMap[component.Name]...) - if err != nil { - return err - } - if component.Container != nil { - component.Container.Endpoints = addEndpoints(component.Container.Endpoints, endpoints) - _ = d.UpdateComponent(component) - } - } - - return nil -} - -// RemovePorts accepts a map of container name mapped to an array of port numbers to be removed; -// it removes the container endpoints with the specified port numbers of the specified container of the DevfileV2 object -// Example of containerPortsMap: {"runtime": {"8080", "9000"}, "wildfly": {"12956"}} -func (d *DevfileV2) RemovePorts(containerPortsMap map[string][]string) error { - components, err := d.GetComponents(common.DevfileOptions{}) - if err != nil { - return err - } - for _, component := range components { - if component.Container != nil { - component.Container.Endpoints, err = removePortsFromList(component.Container.Endpoints, containerPortsMap[component.Name]) - if err != nil { - return err - } - _ = d.UpdateComponent(component) - } - } - - return nil -} - -// removeEnvVarsFromList removes the env variables based on the keys provided -// and returns a new EnvVarList -func removeEnvVarsFromList(envVarList []v1alpha2.EnvVar, keys []string) ([]v1alpha2.EnvVar, error) { - // convert the array of envVarList to a map such that it can easily search for env var(s) - // to remove from the component - envVarListMap := map[string]bool{} - for _, env := range envVarList { - if !envVarListMap[env.Name] { - envVarListMap[env.Name] = true - } - } - - // convert the array of keys to a map so that it can do a fast search for environment variable(s) - // to remove from the component - envVarToBeRemoved := map[string]bool{} - // now check if the environment variable(s) requested for removal exists in - // the env vars currently set in the component - // if an env var requested for removal is not currently set, then raise an error - // else add the env var to the envVarToBeRemoved map - for _, key := range keys { - if !envVarListMap[key] { - return envVarList, fmt.Errorf("unable to find environment variable %s in the component", key) - } - envVarToBeRemoved[key] = true - } - - // finally, let's remove the environment variables(s) requested by the user - newEnvVarList := []v1alpha2.EnvVar{} - for _, envVar := range envVarList { - // if the env is in the keys(env var(s) to be removed), we skip it - if envVarToBeRemoved[envVar.Name] { - continue - } - newEnvVarList = append(newEnvVarList, envVar) - } - return newEnvVarList, nil -} - -// removePortsFromList removes the ports from a given Endpoint list based on the provided port numbers -// and returns a new list of Endpoint -func removePortsFromList(endpoints []v1alpha2.Endpoint, ports []string) ([]v1alpha2.Endpoint, error) { - // convert the array of Endpoint to a map such that it can easily search for port(s) - // to remove from the component - portInEndpoint := map[string]bool{} - for _, ep := range endpoints { - port := strconv.Itoa(ep.TargetPort) - if !portInEndpoint[port] { - portInEndpoint[port] = true - } - } - - // convert the array of ports to a map so that it can do a fast search for port(s) - // to remove from the component - portsToBeRemoved := map[string]bool{} - - // now check if the port(s) requested for removal exists in - // the ports currently present in the component; - // if a port requested for removal is not currently present, then raise an error - // else add the port to the portsToBeRemoved map - for _, port := range ports { - if !portInEndpoint[port] { - return endpoints, fmt.Errorf("unable to find port %q in the component", port) - } - portsToBeRemoved[port] = true - } - - // finally, let's remove the port(s) requested by the user - newEndpointsList := []v1alpha2.Endpoint{} - for _, ep := range endpoints { - // if the port is in the port(s)(to be removed), we skip it - if portsToBeRemoved[strconv.Itoa(ep.TargetPort)] { - continue - } - newEndpointsList = append(newEndpointsList, ep) - } - return newEndpointsList, nil -} - -// merge merges the other EnvVarlist with keeping last value for duplicate EnvVars -// and returns a new EnvVarList -func merge(original []v1alpha2.EnvVar, other []v1alpha2.EnvVar) []v1alpha2.EnvVar { - - var dedupNewEvl []v1alpha2.EnvVar - newEvl := append(original, other...) - uniqueMap := make(map[string]string) - // last value will be kept in case of duplicate env vars - for _, envVar := range newEvl { - uniqueMap[envVar.Name] = envVar.Value - } - - for key, value := range uniqueMap { - dedupNewEvl = append(dedupNewEvl, v1alpha2.EnvVar{ - Name: key, - Value: value, - }) - } - - return dedupNewEvl - -} - -// portsToEndpoints converts an array of ports to an array of v1alpha2.Endpoint -func portsToEndpoints(ports ...string) ([]v1alpha2.Endpoint, error) { - var endpoints []v1alpha2.Endpoint - conPorts, err := getContainerPortsFromStrings(ports) - if err != nil { - return nil, err - } - for _, port := range conPorts { - - endpoint := v1alpha2.Endpoint{ - Name: fmt.Sprintf("port-%d-%s", port.ContainerPort, strings.ToLower(string(port.Protocol))), - TargetPort: int(port.ContainerPort), - Protocol: v1alpha2.EndpointProtocol(strings.ToLower(string(port.Protocol))), - } - endpoints = append(endpoints, endpoint) - } - return endpoints, nil - -} - -// addEndpoints appends two arrays of v1alpha2.Endpoint objects -func addEndpoints(current []v1alpha2.Endpoint, other []v1alpha2.Endpoint) []v1alpha2.Endpoint { - newList := make([]v1alpha2.Endpoint, len(current)) - copy(newList, current) - for _, ep := range other { - present := false - - for _, presentep := range newList { - - protocol := presentep.Protocol - if protocol == "" { - // endpoint protocol default value is http - protocol = "http" - } - // if the target port and protocol match, we add a case where the protocol is not provided and hence we assume that to be "tcp" - if presentep.TargetPort == ep.TargetPort && (ep.Protocol == protocol) { - present = true - break - } - } - if !present { - newList = append(newList, ep) - } - } - - return newList -} - -// getContainerPortsFromStrings generates ContainerPort values from the array of string port values -// ports is the array containing the string port values -func getContainerPortsFromStrings(ports []string) ([]corev1.ContainerPort, error) { - var containerPorts []corev1.ContainerPort - for _, port := range ports { - splits := strings.Split(port, "/") - if len(splits) < 1 || len(splits) > 2 { - return nil, fmt.Errorf("unable to parse the port string %s", port) - } - - portNumberI64, err := strconv.ParseInt(splits[0], 10, 32) - if err != nil { - return nil, fmt.Errorf("invalid port number %s", splits[0]) - } - portNumber := int32(portNumberI64) - - var portProto corev1.Protocol - if len(splits) == 2 { - switch strings.ToUpper(splits[1]) { - case "TCP": - portProto = corev1.ProtocolTCP - case "UDP": - portProto = corev1.ProtocolUDP - default: - return nil, fmt.Errorf("invalid port protocol %s", splits[1]) - } - } else { - portProto = corev1.ProtocolTCP - } - - port := corev1.ContainerPort{ - Name: fmt.Sprintf("%d-%s", portNumber, strings.ToLower(string(portProto))), - ContainerPort: portNumber, - Protocol: portProto, - } - containerPorts = append(containerPorts, port) - } - return containerPorts, nil -} diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/events.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/events.go deleted file mode 100644 index d7caf1a80b8..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/events.go +++ /dev/null @@ -1,99 +0,0 @@ -// -// Copyright 2022 Red Hat, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2 - -import ( - "fmt" - v1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" - "github.com/devfile/library/v2/pkg/devfile/parser/data/v2/common" - "strings" -) - -// GetEvents returns the Events Object parsed from devfile -func (d *DevfileV2) GetEvents() v1.Events { - if d.Events != nil { - return *d.Events - } - return v1.Events{} -} - -// AddEvents adds the Events Object to the devfile's events -// an event field is considered as invalid if it is already defined -// all event fields will be checked and processed, and returns a total error of all event fields -func (d *DevfileV2) AddEvents(events v1.Events) error { - - if d.Events == nil { - d.Events = &v1.Events{} - } - var errorsList []string - if len(events.PreStop) > 0 { - if len(d.Events.PreStop) > 0 { - errorsList = append(errorsList, (&common.FieldAlreadyExistError{Field: "event field", Name: "pre stop"}).Error()) - } else { - d.Events.PreStop = events.PreStop - } - } - - if len(events.PreStart) > 0 { - if len(d.Events.PreStart) > 0 { - errorsList = append(errorsList, (&common.FieldAlreadyExistError{Field: "event field", Name: "pre start"}).Error()) - } else { - d.Events.PreStart = events.PreStart - } - } - - if len(events.PostStop) > 0 { - if len(d.Events.PostStop) > 0 { - errorsList = append(errorsList, (&common.FieldAlreadyExistError{Field: "event field", Name: "post stop"}).Error()) - } else { - d.Events.PostStop = events.PostStop - } - } - - if len(events.PostStart) > 0 { - if len(d.Events.PostStart) > 0 { - errorsList = append(errorsList, (&common.FieldAlreadyExistError{Field: "event field", Name: "post start"}).Error()) - } else { - d.Events.PostStart = events.PostStart - } - } - if len(errorsList) > 0 { - return fmt.Errorf("errors while adding events:\n%s", strings.Join(errorsList, "\n")) - } - return nil -} - -// UpdateEvents updates the devfile's events -// it only updates the events passed to it -func (d *DevfileV2) UpdateEvents(postStart, postStop, preStart, preStop []string) { - - if d.Events == nil { - d.Events = &v1.Events{} - } - - if postStart != nil { - d.Events.PostStart = postStart - } - if postStop != nil { - d.Events.PostStop = postStop - } - if preStart != nil { - d.Events.PreStart = preStart - } - if preStop != nil { - d.Events.PreStop = preStop - } -} diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/header.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/header.go deleted file mode 100644 index 5bc4854b33b..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/header.go +++ /dev/null @@ -1,40 +0,0 @@ -// -// Copyright 2022 Red Hat, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2 - -import ( - devfilepkg "github.com/devfile/api/v2/pkg/devfile" -) - -//GetSchemaVersion gets devfile schema version -func (d *DevfileV2) GetSchemaVersion() string { - return d.SchemaVersion -} - -//SetSchemaVersion sets devfile schema version -func (d *DevfileV2) SetSchemaVersion(version string) { - d.SchemaVersion = version -} - -// GetMetadata returns the DevfileMetadata Object parsed from devfile -func (d *DevfileV2) GetMetadata() devfilepkg.DevfileMetadata { - return d.Metadata -} - -// SetMetadata sets the metadata for devfile -func (d *DevfileV2) SetMetadata(metadata devfilepkg.DevfileMetadata) { - d.Metadata = metadata -} diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/parent.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/parent.go deleted file mode 100644 index 932f932310b..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/parent.go +++ /dev/null @@ -1,30 +0,0 @@ -// -// Copyright 2022 Red Hat, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2 - -import ( - v1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" -) - -// GetParent returns the Parent object parsed from devfile -func (d *DevfileV2) GetParent() *v1.Parent { - return d.Parent -} - -// SetParent sets the parent for the devfile -func (d *DevfileV2) SetParent(parent *v1.Parent) { - d.Parent = parent -} diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/projects.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/projects.go deleted file mode 100644 index d5ae17e4387..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/projects.go +++ /dev/null @@ -1,194 +0,0 @@ -// -// Copyright 2022 Red Hat, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2 - -import ( - "fmt" - v1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" - "github.com/devfile/library/v2/pkg/devfile/parser/data/v2/common" - "reflect" - "strings" -) - -// GetProjects returns the Project Object parsed from devfile -func (d *DevfileV2) GetProjects(options common.DevfileOptions) ([]v1.Project, error) { - - if reflect.DeepEqual(options, common.DevfileOptions{}) { - return d.Projects, nil - } - - var projects []v1.Project - for _, project := range d.Projects { - // Filter Project Attributes - filterIn, err := common.FilterDevfileObject(project.Attributes, options) - if err != nil { - return nil, err - } else if !filterIn { - continue - } - - // Filter Project Source Type - Git, Zip, etc. - projectSourceType, err := common.GetProjectSourceType(project.ProjectSource) - if err != nil { - return nil, err - } - if options.ProjectOptions.ProjectSourceType != "" && projectSourceType != options.ProjectOptions.ProjectSourceType { - continue - } - if options.FilterByName == "" || project.Name == options.FilterByName { - projects = append(projects, project) - } - } - - return projects, nil -} - -// AddProjects adss the slice of Devfile projects to the Devfile's project list -// a project is considered as invalid if it is already defined -// project list passed in will be all processed, and returns a total error of all invalid projects -func (d *DevfileV2) AddProjects(projects []v1.Project) error { - projectsMap := make(map[string]bool) - var errorsList []string - for _, project := range d.Projects { - projectsMap[project.Name] = true - } - - for _, project := range projects { - if _, ok := projectsMap[project.Name]; !ok { - d.Projects = append(d.Projects, project) - } else { - errorsList = append(errorsList, (&common.FieldAlreadyExistError{Name: project.Name, Field: "project"}).Error()) - continue - } - } - if len(errorsList) > 0 { - return fmt.Errorf("errors while adding projects:\n%s", strings.Join(errorsList, "\n")) - } - return nil -} - -// UpdateProject updates the slice of Devfile projects parsed from the Devfile -// return an error if the project is not found -func (d *DevfileV2) UpdateProject(project v1.Project) error { - for i := range d.Projects { - if d.Projects[i].Name == project.Name { - d.Projects[i] = project - return nil - } - } - return fmt.Errorf("update project failed: project %s not found", project.Name) -} - -// DeleteProject removes the specified project -func (d *DevfileV2) DeleteProject(name string) error { - - for i := range d.Projects { - if d.Projects[i].Name == name { - d.Projects = append(d.Projects[:i], d.Projects[i+1:]...) - return nil - } - } - - return &common.FieldNotFoundError{ - Field: "project", - Name: name, - } -} - -//GetStarterProjects returns the DevfileStarterProject parsed from devfile -func (d *DevfileV2) GetStarterProjects(options common.DevfileOptions) ([]v1.StarterProject, error) { - - if reflect.DeepEqual(options, common.DevfileOptions{}) { - return d.StarterProjects, nil - } - - var starterProjects []v1.StarterProject - for _, starterProject := range d.StarterProjects { - // Filter Starter Project Attributes - filterIn, err := common.FilterDevfileObject(starterProject.Attributes, options) - if err != nil { - return nil, err - } else if !filterIn { - continue - } - - // Filter Starter Project Source Type - Git, Zip, etc. - starterProjectSourceType, err := common.GetProjectSourceType(starterProject.ProjectSource) - if err != nil { - return nil, err - } - if options.ProjectOptions.ProjectSourceType != "" && starterProjectSourceType != options.ProjectOptions.ProjectSourceType { - continue - } - - if options.FilterByName == "" || starterProject.Name == options.FilterByName { - starterProjects = append(starterProjects, starterProject) - } - } - - return starterProjects, nil -} - -// AddStarterProjects adds the slice of Devfile starter projects to the Devfile's starter project list -// a starterProject is considered as invalid if it is already defined -// starterProject list passed in will be all processed, and returns a total error of all invalid starterProjects -func (d *DevfileV2) AddStarterProjects(projects []v1.StarterProject) error { - projectsMap := make(map[string]bool) - var errorsList []string - for _, project := range d.StarterProjects { - projectsMap[project.Name] = true - } - - for _, project := range projects { - if _, ok := projectsMap[project.Name]; !ok { - d.StarterProjects = append(d.StarterProjects, project) - } else { - errorsList = append(errorsList, (&common.FieldAlreadyExistError{Name: project.Name, Field: "starterProject"}).Error()) - continue - } - } - if len(errorsList) > 0 { - return fmt.Errorf("errors while adding starterProjects:\n%s", strings.Join(errorsList, "\n")) - } - return nil -} - -// UpdateStarterProject updates the slice of Devfile starter projects parsed from the Devfile -func (d *DevfileV2) UpdateStarterProject(project v1.StarterProject) error { - for i := range d.StarterProjects { - if d.StarterProjects[i].Name == project.Name { - d.StarterProjects[i] = project - return nil - } - } - return fmt.Errorf("update starter project failed: starter project %s not found", project.Name) -} - -// DeleteStarterProject removes the specified starter project -func (d *DevfileV2) DeleteStarterProject(name string) error { - - for i := range d.StarterProjects { - if d.StarterProjects[i].Name == name { - d.StarterProjects = append(d.StarterProjects[:i], d.StarterProjects[i+1:]...) - return nil - } - } - - return &common.FieldNotFoundError{ - Field: "starter project", - Name: name, - } -} diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/types.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/types.go deleted file mode 100644 index c35b0d7ed14..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/types.go +++ /dev/null @@ -1,25 +0,0 @@ -// -// Copyright 2022 Red Hat, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2 - -import ( - v1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" -) - -// DevfileV2 is the devfile go struct from devfile/api -type DevfileV2 struct { - v1.Devfile -} diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/volumes.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/volumes.go deleted file mode 100644 index 8f3a5d15ef5..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/volumes.go +++ /dev/null @@ -1,117 +0,0 @@ -// -// Copyright 2022 Red Hat, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2 - -import ( - "fmt" - "strings" - - v1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" - "github.com/devfile/library/v2/pkg/devfile/parser/data/v2/common" -) - -// AddVolumeMounts adds the volume mounts to the specified container component -func (d *DevfileV2) AddVolumeMounts(containerName string, volumeMounts []v1.VolumeMount) error { - var pathErrorContainers []string - found := false - for _, component := range d.Components { - if component.Container != nil && component.Name == containerName { - found = true - for _, devfileVolumeMount := range component.Container.VolumeMounts { - for _, volumeMount := range volumeMounts { - if devfileVolumeMount.Path == volumeMount.Path { - pathErrorContainers = append(pathErrorContainers, fmt.Sprintf("unable to mount volume %s, as another volume %s is mounted to the same path %s in the container %s", volumeMount.Name, devfileVolumeMount.Name, volumeMount.Path, component.Name)) - } - } - } - if len(pathErrorContainers) == 0 { - component.Container.VolumeMounts = append(component.Container.VolumeMounts, volumeMounts...) - } - } - } - - if !found { - return &common.FieldNotFoundError{ - Field: "container component", - Name: containerName, - } - } - - if len(pathErrorContainers) > 0 { - return fmt.Errorf("errors while adding volume mounts:\n%s", strings.Join(pathErrorContainers, "\n")) - } - - return nil -} - -// DeleteVolumeMount deletes the volume mount from container components -func (d *DevfileV2) DeleteVolumeMount(name string) error { - found := false - for i := range d.Components { - if d.Components[i].Container != nil && d.Components[i].Name != name { - // Volume Mounts can have multiple instances of a volume mounted at different paths - // As arrays are rearraged/shifted for deletion, we lose one element every time there is a match - // Looping backward is efficient, otherwise we would have to manually decrement counter - // if we looped forward - for j := len(d.Components[i].Container.VolumeMounts) - 1; j >= 0; j-- { - if d.Components[i].Container.VolumeMounts[j].Name == name { - found = true - d.Components[i].Container.VolumeMounts = append(d.Components[i].Container.VolumeMounts[:j], d.Components[i].Container.VolumeMounts[j+1:]...) - } - } - } - } - - if !found { - return &common.FieldNotFoundError{ - Field: "volume mount", - Name: name, - } - } - - return nil -} - -// GetVolumeMountPaths gets all the mount paths of the specified volume mount from the specified container component. -// A container can mount at different paths for a given volume. -func (d *DevfileV2) GetVolumeMountPaths(mountName, containerName string) ([]string, error) { - componentFound := false - var mountPaths []string - - for _, component := range d.Components { - if component.Container != nil && component.Name == containerName { - componentFound = true - for _, volumeMount := range component.Container.VolumeMounts { - if volumeMount.Name == mountName { - mountPaths = append(mountPaths, volumeMount.Path) - } - } - } - } - - if !componentFound { - return mountPaths, &common.FieldNotFoundError{ - Field: "container component", - Name: containerName, - } - } - - if len(mountPaths) == 0 { - return mountPaths, fmt.Errorf("volume %s not mounted to component %s", mountName, containerName) - } - - return mountPaths, nil -} diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/workspace.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/workspace.go deleted file mode 100644 index a1793e1a30d..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/v2/workspace.go +++ /dev/null @@ -1,40 +0,0 @@ -// -// Copyright 2022 Red Hat, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v2 - -import ( - v1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" -) - -// GetDevfileWorkspaceSpecContent returns the workspace spec content for the devfile -func (d *DevfileV2) GetDevfileWorkspaceSpecContent() *v1.DevWorkspaceTemplateSpecContent { - - return &d.DevWorkspaceTemplateSpecContent -} - -// SetDevfileWorkspaceSpecContent sets the workspace spec content -func (d *DevfileV2) SetDevfileWorkspaceSpecContent(content v1.DevWorkspaceTemplateSpecContent) { - d.DevWorkspaceTemplateSpecContent = content -} - -func (d *DevfileV2) GetDevfileWorkspaceSpec() *v1.DevWorkspaceTemplateSpec { - return &d.DevWorkspaceTemplateSpec -} - -// SetDevfileWorkspaceSpec sets the workspace spec -func (d *DevfileV2) SetDevfileWorkspaceSpec(spec v1.DevWorkspaceTemplateSpec) { - d.DevWorkspaceTemplateSpec = spec -} diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/versions.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/versions.go deleted file mode 100644 index 0a92dc32f6f..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/data/versions.go +++ /dev/null @@ -1,63 +0,0 @@ -// -// Copyright 2022 Red Hat, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package data - -import ( - "reflect" - - v2 "github.com/devfile/library/v2/pkg/devfile/parser/data/v2" - v200 "github.com/devfile/library/v2/pkg/devfile/parser/data/v2/2.0.0" - v210 "github.com/devfile/library/v2/pkg/devfile/parser/data/v2/2.1.0" - v220 "github.com/devfile/library/v2/pkg/devfile/parser/data/v2/2.2.0" -) - -// SupportedApiVersions stores the supported devfile API versions -type supportedApiVersion string - -// Supported devfile API versions -const ( - APISchemaVersion200 supportedApiVersion = "2.0.0" - APISchemaVersion210 supportedApiVersion = "2.1.0" - APISchemaVersion220 supportedApiVersion = "2.2.0" - APIVersionAlpha2 supportedApiVersion = "v1alpha2" -) - -// ------------- Init functions ------------- // - -// apiVersionToDevfileStruct maps supported devfile API versions to their corresponding devfile structs -var apiVersionToDevfileStruct map[supportedApiVersion]reflect.Type - -// Initializes a map of supported devfile api versions and devfile structs -func init() { - apiVersionToDevfileStruct = make(map[supportedApiVersion]reflect.Type) - apiVersionToDevfileStruct[APISchemaVersion200] = reflect.TypeOf(v2.DevfileV2{}) - apiVersionToDevfileStruct[APISchemaVersion210] = reflect.TypeOf(v2.DevfileV2{}) - apiVersionToDevfileStruct[APISchemaVersion220] = reflect.TypeOf(v2.DevfileV2{}) - apiVersionToDevfileStruct[APIVersionAlpha2] = reflect.TypeOf(v2.DevfileV2{}) -} - -// Map to store mappings between supported devfile API versions and respective devfile JSON schemas -var devfileApiVersionToJSONSchema map[supportedApiVersion]string - -// init initializes a map of supported devfile apiVersions with it's respective devfile JSON schema -func init() { - devfileApiVersionToJSONSchema = make(map[supportedApiVersion]string) - devfileApiVersionToJSONSchema[APISchemaVersion200] = v200.JsonSchema200 - devfileApiVersionToJSONSchema[APISchemaVersion210] = v210.JsonSchema210 - devfileApiVersionToJSONSchema[APISchemaVersion220] = v220.JsonSchema220 - // should use hightest v2 schema version since it is expected to be backward compatible with the same api version - devfileApiVersionToJSONSchema[APIVersionAlpha2] = v220.JsonSchema220 -} diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/devfileobj.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/devfileobj.go deleted file mode 100644 index 1043896047b..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/devfileobj.go +++ /dev/null @@ -1,37 +0,0 @@ -// -// Copyright 2022 Red Hat, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package parser - -import ( - devfileCtx "github.com/devfile/library/v2/pkg/devfile/parser/context" - "github.com/devfile/library/v2/pkg/devfile/parser/data" -) - -// Default filenames for create devfile -const ( - OutputDevfileYamlPath = "devfile.yaml" - K8sLikeComponentOriginalURIKey = "api.devfile.io/k8sLikeComponent-originalURI" -) - -// DevfileObj is the runtime devfile object -type DevfileObj struct { - - // Ctx has devfile context info - Ctx devfileCtx.DevfileCtx - - // Data has the devfile data - Data data.DevfileData -} diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/parse.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/parse.go deleted file mode 100644 index 51a2f206bfc..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/parse.go +++ /dev/null @@ -1,757 +0,0 @@ -// -// Copyright 2022 Red Hat, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package parser - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - "net/url" - "os" - "path" - "strings" - - "github.com/devfile/api/v2/pkg/attributes" - - "reflect" - - devfileCtx "github.com/devfile/library/v2/pkg/devfile/parser/context" - "github.com/devfile/library/v2/pkg/devfile/parser/data" - "github.com/devfile/library/v2/pkg/devfile/parser/data/v2/common" - "github.com/devfile/library/v2/pkg/util" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/tools/clientcmd" - "k8s.io/klog" - "sigs.k8s.io/controller-runtime/pkg/client" - - v1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" - apiOverride "github.com/devfile/api/v2/pkg/utils/overriding" - "github.com/devfile/api/v2/pkg/validation" - versionpkg "github.com/hashicorp/go-version" - "github.com/pkg/errors" -) - -// ParseDevfile func validates the devfile integrity. -// Creates devfile context and runtime objects -func parseDevfile(d DevfileObj, resolveCtx *resolutionContextTree, tool resolverTools, flattenedDevfile bool) (DevfileObj, error) { - - // Validate devfile - err := d.Ctx.Validate() - if err != nil { - return d, err - } - - // Create a new devfile data object - d.Data, err = data.NewDevfileData(d.Ctx.GetApiVersion()) - if err != nil { - return d, err - } - - // Unmarshal devfile content into devfile struct - err = json.Unmarshal(d.Ctx.GetDevfileContent(), &d.Data) - if err != nil { - return d, errors.Wrapf(err, "failed to decode devfile content") - } - - if flattenedDevfile { - err = parseParentAndPlugin(d, resolveCtx, tool) - if err != nil { - return DevfileObj{}, err - } - } - - // Successful - return d, nil -} - -// ParserArgs is the struct to pass into parser functions which contains required info for parsing devfile. -// It accepts devfile path, devfile URL or devfile content in []byte format. -type ParserArgs struct { - // Path is a relative or absolute devfile path. - Path string - // URL is the URL address of the specific devfile. - URL string - // Data is the devfile content in []byte format. - Data []byte - // FlattenedDevfile defines if the returned devfileObj is flattened content (true) or raw content (false). - // The value is default to be true. - FlattenedDevfile *bool - // ConvertKubernetesContentInUri defines if the kubernetes resources definition from uri will be converted to inlined in devObj(true) or not (false). - // The value is default to be true. - ConvertKubernetesContentInUri *bool - // RegistryURLs is a list of registry hosts which parser should pull parent devfile from. - // If registryUrl is defined in devfile, this list will be ignored. - RegistryURLs []string - // DefaultNamespace is the default namespace to use - // If namespace is defined under devfile's parent kubernetes object, this namespace will be ignored. - DefaultNamespace string - // Context is the context used for making Kubernetes requests - Context context.Context - // K8sClient is the Kubernetes client instance used for interacting with a cluster - K8sClient client.Client - // ExternalVariables override variables defined in the Devfile - ExternalVariables map[string]string - // HTTPTimeout overrides the request and response timeout values for reading a parent devfile reference from the registry. If a negative value is specified, the default timeout will be used. - HTTPTimeout *int -} - -// ParseDevfile func populates the devfile data, parses and validates the devfile integrity. -// Creates devfile context and runtime objects -func ParseDevfile(args ParserArgs) (d DevfileObj, err error) { - if args.Data != nil { - d.Ctx, err = devfileCtx.NewByteContentDevfileCtx(args.Data) - if err != nil { - return d, errors.Wrap(err, "failed to set devfile content from bytes") - } - } else if args.Path != "" { - d.Ctx = devfileCtx.NewDevfileCtx(args.Path) - } else if args.URL != "" { - d.Ctx = devfileCtx.NewURLDevfileCtx(args.URL) - } else { - return d, errors.Wrap(err, "the devfile source is not provided") - } - - tool := resolverTools{ - defaultNamespace: args.DefaultNamespace, - registryURLs: args.RegistryURLs, - context: args.Context, - k8sClient: args.K8sClient, - httpTimeout: args.HTTPTimeout, - } - - flattenedDevfile := true - if args.FlattenedDevfile != nil { - flattenedDevfile = *args.FlattenedDevfile - } - - d, err = populateAndParseDevfile(d, &resolutionContextTree{}, tool, flattenedDevfile) - if err != nil { - return d, errors.Wrap(err, "failed to populateAndParseDevfile") - } - - //set defaults only if we are flattening parent and parsing succeeded - if flattenedDevfile && err == nil { - err = setDefaults(d) - if err != nil { - return d, errors.Wrap(err, "failed to setDefaults") - } - } - - convertUriToInlined := true - if args.ConvertKubernetesContentInUri != nil { - convertUriToInlined = *args.ConvertKubernetesContentInUri - } - - if convertUriToInlined { - d.Ctx.SetConvertUriToInlined(true) - err = parseKubeResourceFromURI(d) - if err != nil { - return d, errors.Wrapf(err, "failed to parse kubernetes/openshift component from uri to inlined") - } - } - - return d, err -} - -// resolverTools contains required structs and data for resolving remote components of a devfile (plugins and parents) -type resolverTools struct { - // DefaultNamespace is the default namespace to use for resolving Kubernetes ImportReferences that do not include one - defaultNamespace string - // RegistryURLs is a list of registry hosts which parser should pull parent devfile from. - // If registryUrl is defined in devfile, this list will be ignored. - registryURLs []string - // Context is the context used for making Kubernetes or HTTP requests - context context.Context - // K8sClient is the Kubernetes client instance used for interacting with a cluster - k8sClient client.Client - // httpTimeout is the timeout value in seconds passed in from the client. - httpTimeout *int -} - -func populateAndParseDevfile(d DevfileObj, resolveCtx *resolutionContextTree, tool resolverTools, flattenedDevfile bool) (DevfileObj, error) { - var err error - if err = resolveCtx.hasCycle(); err != nil { - return DevfileObj{}, err - } - // Fill the fields of DevfileCtx struct - if d.Ctx.GetURL() != "" { - err = d.Ctx.PopulateFromURL() - } else if d.Ctx.GetDevfileContent() != nil { - err = d.Ctx.PopulateFromRaw() - } else { - err = d.Ctx.Populate() - } - if err != nil { - return d, err - } - - return parseDevfile(d, resolveCtx, tool, flattenedDevfile) -} - -// Parse func populates the flattened devfile data, parses and validates the devfile integrity. -// Creates devfile context and runtime objects -// Deprecated, use ParseDevfile() instead -func Parse(path string) (d DevfileObj, err error) { - - // NewDevfileCtx - d.Ctx = devfileCtx.NewDevfileCtx(path) - return populateAndParseDevfile(d, &resolutionContextTree{}, resolverTools{}, true) -} - -// ParseRawDevfile populates the raw devfile data without overriding and merging -// Deprecated, use ParseDevfile() instead -func ParseRawDevfile(path string) (d DevfileObj, err error) { - // NewDevfileCtx - d.Ctx = devfileCtx.NewDevfileCtx(path) - return populateAndParseDevfile(d, &resolutionContextTree{}, resolverTools{}, false) -} - -// ParseFromURL func parses and validates the devfile integrity. -// Creates devfile context and runtime objects -// Deprecated, use ParseDevfile() instead -func ParseFromURL(url string) (d DevfileObj, err error) { - d.Ctx = devfileCtx.NewURLDevfileCtx(url) - return populateAndParseDevfile(d, &resolutionContextTree{}, resolverTools{}, true) -} - -// ParseFromData func parses and validates the devfile integrity. -// Creates devfile context and runtime objects -// Deprecated, use ParseDevfile() instead -func ParseFromData(data []byte) (d DevfileObj, err error) { - d.Ctx, err = devfileCtx.NewByteContentDevfileCtx(data) - if err != nil { - return d, errors.Wrap(err, "failed to set devfile content from bytes") - } - return populateAndParseDevfile(d, &resolutionContextTree{}, resolverTools{}, true) -} - -func parseParentAndPlugin(d DevfileObj, resolveCtx *resolutionContextTree, tool resolverTools) (err error) { - flattenedParent := &v1.DevWorkspaceTemplateSpecContent{} - var mainDevfileVersion, parentDevfileVerson, pluginDevfileVerson *versionpkg.Version - var devfileVersion string - if devfileVersion = d.Ctx.GetApiVersion(); devfileVersion == "" { - devfileVersion = d.Data.GetSchemaVersion() - } - - if devfileVersion != "" { - mainDevfileVersion, err = versionpkg.NewVersion(devfileVersion) - if err != nil { - return fmt.Errorf("fail to parse version of the main devfile") - } - } - parent := d.Data.GetParent() - if parent != nil { - if !reflect.DeepEqual(parent, &v1.Parent{}) { - - var parentDevfileObj DevfileObj - switch { - case parent.Uri != "": - parentDevfileObj, err = parseFromURI(parent.ImportReference, d.Ctx, resolveCtx, tool) - case parent.Id != "": - parentDevfileObj, err = parseFromRegistry(parent.ImportReference, resolveCtx, tool) - case parent.Kubernetes != nil: - parentDevfileObj, err = parseFromKubeCRD(parent.ImportReference, resolveCtx, tool) - default: - return fmt.Errorf("devfile parent does not define any resources") - } - if err != nil { - return err - } - var devfileVersion string - if devfileVersion = parentDevfileObj.Ctx.GetApiVersion(); devfileVersion == "" { - devfileVersion = parentDevfileObj.Data.GetSchemaVersion() - } - - if devfileVersion != "" { - parentDevfileVerson, err = versionpkg.NewVersion(devfileVersion) - if err != nil { - return fmt.Errorf("fail to parse version of parent devfile from: %v", resolveImportReference(parent.ImportReference)) - } - if parentDevfileVerson.GreaterThan(mainDevfileVersion) { - return fmt.Errorf("the parent devfile version from %v is greater than the child devfile version from %v", resolveImportReference(parent.ImportReference), resolveImportReference(resolveCtx.importReference)) - } - } - parentWorkspaceContent := parentDevfileObj.Data.GetDevfileWorkspaceSpecContent() - // add attribute to parent elements - err = addSourceAttributesForOverrideAndMerge(parent.ImportReference, parentWorkspaceContent) - if err != nil { - return err - } - if !reflect.DeepEqual(parent.ParentOverrides, v1.ParentOverrides{}) { - // add attribute to parentOverrides elements - curNodeImportReference := resolveCtx.importReference - err = addSourceAttributesForOverrideAndMerge(curNodeImportReference, &parent.ParentOverrides) - if err != nil { - return err - } - flattenedParent, err = apiOverride.OverrideDevWorkspaceTemplateSpec(parentWorkspaceContent, parent.ParentOverrides) - if err != nil { - return err - } - } else { - flattenedParent = parentWorkspaceContent - } - - klog.V(4).Infof("adding data of devfile with URI: %v", parent.Uri) - } - } - - flattenedPlugins := []*v1.DevWorkspaceTemplateSpecContent{} - components, err := d.Data.GetComponents(common.DevfileOptions{}) - if err != nil { - return err - } - for _, component := range components { - if component.Plugin != nil && !reflect.DeepEqual(component.Plugin, &v1.PluginComponent{}) { - plugin := component.Plugin - var pluginDevfileObj DevfileObj - switch { - case plugin.Uri != "": - pluginDevfileObj, err = parseFromURI(plugin.ImportReference, d.Ctx, resolveCtx, tool) - case plugin.Id != "": - pluginDevfileObj, err = parseFromRegistry(plugin.ImportReference, resolveCtx, tool) - case plugin.Kubernetes != nil: - pluginDevfileObj, err = parseFromKubeCRD(plugin.ImportReference, resolveCtx, tool) - default: - return fmt.Errorf("plugin %s does not define any resources", component.Name) - } - if err != nil { - return err - } - var devfileVersion string - if devfileVersion = pluginDevfileObj.Ctx.GetApiVersion(); devfileVersion == "" { - devfileVersion = pluginDevfileObj.Data.GetSchemaVersion() - } - - if devfileVersion != "" { - pluginDevfileVerson, err = versionpkg.NewVersion(devfileVersion) - if err != nil { - return fmt.Errorf("fail to parse version of plugin devfile from: %v", resolveImportReference(component.Plugin.ImportReference)) - } - if pluginDevfileVerson.GreaterThan(mainDevfileVersion) { - return fmt.Errorf("the plugin devfile version from %v is greater than the child devfile version from %v", resolveImportReference(component.Plugin.ImportReference), resolveImportReference(resolveCtx.importReference)) - } - } - pluginWorkspaceContent := pluginDevfileObj.Data.GetDevfileWorkspaceSpecContent() - // add attribute to plugin elements - err = addSourceAttributesForOverrideAndMerge(plugin.ImportReference, pluginWorkspaceContent) - if err != nil { - return err - } - flattenedPlugin := pluginWorkspaceContent - if !reflect.DeepEqual(plugin.PluginOverrides, v1.PluginOverrides{}) { - // add attribute to pluginOverrides elements - curNodeImportReference := resolveCtx.importReference - err = addSourceAttributesForOverrideAndMerge(curNodeImportReference, &plugin.PluginOverrides) - if err != nil { - return err - } - flattenedPlugin, err = apiOverride.OverrideDevWorkspaceTemplateSpec(pluginWorkspaceContent, plugin.PluginOverrides) - if err != nil { - return err - } - } - flattenedPlugins = append(flattenedPlugins, flattenedPlugin) - } - } - - mergedContent, err := apiOverride.MergeDevWorkspaceTemplateSpec(d.Data.GetDevfileWorkspaceSpecContent(), flattenedParent, flattenedPlugins...) - if err != nil { - return err - } - d.Data.SetDevfileWorkspaceSpecContent(*mergedContent) - // remove parent from flatterned devfile - d.Data.SetParent(nil) - - return nil -} - -func parseFromURI(importReference v1.ImportReference, curDevfileCtx devfileCtx.DevfileCtx, resolveCtx *resolutionContextTree, tool resolverTools) (DevfileObj, error) { - uri := importReference.Uri - // validate URI - err := validation.ValidateURI(uri) - if err != nil { - return DevfileObj{}, err - } - // NewDevfileCtx - var d DevfileObj - absoluteURL := strings.HasPrefix(uri, "http://") || strings.HasPrefix(uri, "https://") - var newUri string - - // relative path on disk - if !absoluteURL && curDevfileCtx.GetAbsPath() != "" { - newUri = path.Join(path.Dir(curDevfileCtx.GetAbsPath()), uri) - d.Ctx = devfileCtx.NewDevfileCtx(newUri) - if util.ValidateFile(newUri) != nil { - return DevfileObj{}, fmt.Errorf("the provided path is not a valid filepath %s", newUri) - } - srcDir := path.Dir(newUri) - destDir := path.Dir(curDevfileCtx.GetAbsPath()) - if srcDir != destDir { - err := util.CopyAllDirFiles(srcDir, destDir) - if err != nil { - return DevfileObj{}, err - } - } - } else { - if absoluteURL { - // absolute URL address - newUri = uri - } else if curDevfileCtx.GetURL() != "" { - // relative path to a URL - u, err := url.Parse(curDevfileCtx.GetURL()) - if err != nil { - return DevfileObj{}, err - } - u.Path = path.Join(u.Path, uri) - newUri = u.String() - } else { - return DevfileObj{}, fmt.Errorf("failed to resolve parent uri, devfile context is missing absolute url and path to devfile. %s", resolveImportReference(importReference)) - } - - d.Ctx = devfileCtx.NewURLDevfileCtx(newUri) - if strings.Contains(newUri, "raw.githubusercontent.com") { - urlComponents, err := util.GetGitUrlComponentsFromRaw(newUri) - if err != nil { - return DevfileObj{}, err - } - destDir := path.Dir(curDevfileCtx.GetAbsPath()) - err = getResourcesFromGit(urlComponents, destDir) - if err != nil { - return DevfileObj{}, err - } - } - } - importReference.Uri = newUri - newResolveCtx := resolveCtx.appendNode(importReference) - - return populateAndParseDevfile(d, newResolveCtx, tool, true) -} - -func getResourcesFromGit(gitUrlComponents map[string]string, destDir string) error { - stackDir, err := ioutil.TempDir(os.TempDir(), fmt.Sprintf("git-resources")) - if err != nil { - return fmt.Errorf("failed to create dir: %s, error: %v", stackDir, err) - } - defer os.RemoveAll(stackDir) - - err = util.CloneGitRepo(gitUrlComponents, stackDir) - if err != nil { - return err - } - - dir := path.Dir(path.Join(stackDir, gitUrlComponents["file"])) - err = util.CopyAllDirFiles(dir, destDir) - if err != nil { - return err - } - - return nil -} - -func parseFromKubeCRD(importReference v1.ImportReference, resolveCtx *resolutionContextTree, tool resolverTools) (d DevfileObj, err error) { - - if tool.k8sClient == nil || tool.context == nil { - return DevfileObj{}, fmt.Errorf("Kubernetes client and context are required to parse from Kubernetes CRD") - } - namespace := importReference.Kubernetes.Namespace - - if namespace == "" { - // if namespace is not set in devfile, use default namespace provided in by consumer - if tool.defaultNamespace != "" { - namespace = tool.defaultNamespace - } else { - // use current namespace if namespace is not set in devfile and not provided by consumer - loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() - configOverrides := &clientcmd.ConfigOverrides{} - config := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides) - namespace, _, err = config.Namespace() - if err != nil { - return DevfileObj{}, fmt.Errorf("kubernetes namespace is not provided, and cannot get current running cluster's namespace: %v", err) - } - } - } - - var dwTemplate v1.DevWorkspaceTemplate - namespacedName := types.NamespacedName{ - Name: importReference.Kubernetes.Name, - Namespace: namespace, - } - err = tool.k8sClient.Get(tool.context, namespacedName, &dwTemplate) - if err != nil { - return DevfileObj{}, err - } - - d, err = convertDevWorskapceTemplateToDevObj(dwTemplate) - if err != nil { - return DevfileObj{}, err - } - - importReference.Kubernetes.Namespace = namespace - newResolveCtx := resolveCtx.appendNode(importReference) - - err = parseParentAndPlugin(d, newResolveCtx, tool) - return d, err - -} - -func convertDevWorskapceTemplateToDevObj(dwTemplate v1.DevWorkspaceTemplate) (d DevfileObj, err error) { - // APIVersion: group/version - // for example: APIVersion: "workspace.devfile.io/v1alpha2" uses api version v1alpha2, and match to v2 schemas - tempList := strings.Split(dwTemplate.APIVersion, "/") - apiversion := tempList[len(tempList)-1] - d.Data, err = data.NewDevfileData(apiversion) - if err != nil { - return DevfileObj{}, err - } - d.Data.SetDevfileWorkspaceSpec(dwTemplate.Spec) - - return d, nil - -} - -// setDefaults sets the default values for nil boolean properties after the merging of devWorkspaceTemplateSpec is complete -func setDefaults(d DevfileObj) (err error) { - - var devfileVersion string - if devfileVersion = d.Ctx.GetApiVersion(); devfileVersion == "" { - devfileVersion = d.Data.GetSchemaVersion() - } - - commands, err := d.Data.GetCommands(common.DevfileOptions{}) - - if err != nil { - return err - } - - //set defaults on the commands - var cmdGroup *v1.CommandGroup - for i := range commands { - command := commands[i] - cmdGroup = nil - - if command.Exec != nil { - exec := command.Exec - val := exec.GetHotReloadCapable() - exec.HotReloadCapable = &val - cmdGroup = exec.Group - - } else if command.Composite != nil { - composite := command.Composite - val := composite.GetParallel() - composite.Parallel = &val - cmdGroup = composite.Group - - } else if command.Apply != nil { - cmdGroup = command.Apply.Group - } - - if cmdGroup != nil { - setIsDefault(cmdGroup) - } - - } - - //set defaults on the components - - components, err := d.Data.GetComponents(common.DevfileOptions{}) - - if err != nil { - return err - } - - var endpoints []v1.Endpoint - for i := range components { - component := components[i] - endpoints = nil - - if component.Container != nil { - container := component.Container - val := container.GetDedicatedPod() - container.DedicatedPod = &val - - msVal := container.GetMountSources() - container.MountSources = &msVal - - endpoints = container.Endpoints - - } else if component.Kubernetes != nil { - endpoints = component.Kubernetes.Endpoints - if devfileVersion != string(data.APISchemaVersion200) && devfileVersion != string(data.APISchemaVersion210) { - val := component.Kubernetes.GetDeployByDefault() - component.Kubernetes.DeployByDefault = &val - } - } else if component.Openshift != nil { - endpoints = component.Openshift.Endpoints - if devfileVersion != string(data.APISchemaVersion200) && devfileVersion != string(data.APISchemaVersion210) { - val := component.Openshift.GetDeployByDefault() - component.Openshift.DeployByDefault = &val - } - - } else if component.Volume != nil && devfileVersion != string(data.APISchemaVersion200) { - volume := component.Volume - val := volume.GetEphemeral() - volume.Ephemeral = &val - - } else if component.Image != nil { //we don't need to do a schema version check since Image in v2.2.0. If used in older specs, a parser error would occur - dockerImage := component.Image.Dockerfile - if dockerImage != nil { - val := dockerImage.GetRootRequired() - dockerImage.RootRequired = &val - } - val := component.Image.GetAutoBuild() - component.Image.AutoBuild = &val - } - - if endpoints != nil { - setEndpoints(endpoints) - } - } - - return nil -} - -// /setIsDefault sets the default value of CommandGroup.IsDefault if nil -func setIsDefault(cmdGroup *v1.CommandGroup) { - val := cmdGroup.GetIsDefault() - cmdGroup.IsDefault = &val -} - -// setEndpoints sets the default value of Endpoint.Secure if nil -func setEndpoints(endpoints []v1.Endpoint) { - for i := range endpoints { - val := endpoints[i].GetSecure() - endpoints[i].Secure = &val - } -} - -// parseKubeResourceFromURI iterate through all kubernetes & openshift components, and parse from uri and update the content to inlined field in devfileObj -func parseKubeResourceFromURI(devObj DevfileObj) error { - getKubeCompOptions := common.DevfileOptions{ - ComponentOptions: common.ComponentOptions{ - ComponentType: v1.KubernetesComponentType, - }, - } - getOpenshiftCompOptions := common.DevfileOptions{ - ComponentOptions: common.ComponentOptions{ - ComponentType: v1.OpenshiftComponentType, - }, - } - kubeComponents, err := devObj.Data.GetComponents(getKubeCompOptions) - if err != nil { - return err - } - openshiftComponents, err := devObj.Data.GetComponents(getOpenshiftCompOptions) - if err != nil { - return err - } - for _, kubeComp := range kubeComponents { - if kubeComp.Kubernetes != nil && kubeComp.Kubernetes.Uri != "" { - /* #nosec G601 -- not an issue, kubeComp is de-referenced in sequence*/ - err := convertK8sLikeCompUriToInlined(&kubeComp, devObj.Ctx) - if err != nil { - return errors.Wrapf(err, "failed to convert Kubernetes Uri to inlined for component '%s'", kubeComp.Name) - } - err = devObj.Data.UpdateComponent(kubeComp) - if err != nil { - return err - } - } - } - for _, openshiftComp := range openshiftComponents { - if openshiftComp.Openshift != nil && openshiftComp.Openshift.Uri != "" { - /* #nosec G601 -- not an issue, openshiftComp is de-referenced in sequence*/ - err := convertK8sLikeCompUriToInlined(&openshiftComp, devObj.Ctx) - if err != nil { - return errors.Wrapf(err, "failed to convert Openshift Uri to inlined for component '%s'", openshiftComp.Name) - } - err = devObj.Data.UpdateComponent(openshiftComp) - if err != nil { - return err - } - } - } - return nil -} - -// convertK8sLikeCompUriToInlined read in kubernetes resources definition from uri and converts to kubernetest inlined field -func convertK8sLikeCompUriToInlined(component *v1.Component, d devfileCtx.DevfileCtx) error { - var uri string - if component.Kubernetes != nil { - uri = component.Kubernetes.Uri - } else if component.Openshift != nil { - uri = component.Openshift.Uri - } - data, err := getKubernetesDefinitionFromUri(uri, d) - if err != nil { - return err - } - if component.Kubernetes != nil { - component.Kubernetes.Inlined = string(data) - component.Kubernetes.Uri = "" - } else if component.Openshift != nil { - component.Openshift.Inlined = string(data) - component.Openshift.Uri = "" - } - if component.Attributes == nil { - component.Attributes = attributes.Attributes{} - } - component.Attributes.PutString(K8sLikeComponentOriginalURIKey, uri) - - return nil -} - -// getKubernetesDefinitionFromUri read in kubernetes resources definition from uri and returns the raw content -func getKubernetesDefinitionFromUri(uri string, d devfileCtx.DevfileCtx) ([]byte, error) { - // validate URI - err := validation.ValidateURI(uri) - if err != nil { - return nil, err - } - - absoluteURL := strings.HasPrefix(uri, "http://") || strings.HasPrefix(uri, "https://") - var newUri string - var data []byte - // relative path on disk - if !absoluteURL && d.GetAbsPath() != "" { - newUri = path.Join(path.Dir(d.GetAbsPath()), uri) - fs := d.GetFs() - data, err = fs.ReadFile(newUri) - if err != nil { - return nil, errors.Wrapf(err, "failed to read kubernetes resources definition from path '%s'", newUri) - } - } else if absoluteURL || d.GetURL() != "" { - if d.GetURL() != "" { - // relative path to a URL - u, err := url.Parse(d.GetURL()) - if err != nil { - return nil, err - } - u.Path = path.Join(path.Dir(u.Path), uri) - newUri = u.String() - } else { - // absolute URL address - newUri = uri - } - params := util.HTTPRequestParams{URL: newUri} - data, err = util.DownloadInMemory(params) - if err != nil { - return nil, errors.Wrapf(err, "error getting kubernetes resources definition info from url '%s'", newUri) - } - } - return data, nil -} diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/reader.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/reader.go deleted file mode 100644 index 89b3a53d0e1..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/reader.go +++ /dev/null @@ -1,157 +0,0 @@ -// -// Copyright 2022 Red Hat, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package parser - -import ( - "bytes" - "fmt" - "io" - - "github.com/devfile/library/v2/pkg/util" - "github.com/pkg/errors" - "github.com/spf13/afero" - "gopkg.in/yaml.v3" - k8yaml "sigs.k8s.io/yaml" - - routev1 "github.com/openshift/api/route/v1" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - extensionsv1 "k8s.io/api/extensions/v1beta1" -) - -// YamlSrc specifies the src of the yaml in either Path, URL or Data format -type YamlSrc struct { - // Path to the yaml file - Path string - // URL of the yaml file - URL string - // Data is the yaml content in []byte format - Data []byte -} - -// KubernetesResources struct contains the Deployments, Services, -// Routes and Ingresses resources -type KubernetesResources struct { - Deployments []appsv1.Deployment - Services []corev1.Service - Routes []routev1.Route - Ingresses []extensionsv1.Ingress - Others []interface{} -} - -// ReadKubernetesYaml reads a yaml Kubernetes file from either the Path, URL or Data provided. -// It returns all the parsed Kubernetes objects as an array of interface. -// Consumers interested in the Kubernetes resources are expected to Unmarshal -// it to the struct of the respective Kubernetes resource. If a Path is being passed, -// provide a filesystem, otherwise nil can be passed in -func ReadKubernetesYaml(src YamlSrc, fs *afero.Afero) ([]interface{}, error) { - - var data []byte - var err error - - if src.URL != "" { - params := util.HTTPRequestParams{URL: src.URL} - data, err = util.DownloadInMemory(params) - if err != nil { - return nil, errors.Wrapf(err, "failed to download file %q", src.URL) - } - } else if src.Path != "" { - if fs == nil { - return nil, fmt.Errorf("cannot read from %s because fs passed in was nil", src.Path) - } - data, err = fs.ReadFile(src.Path) - if err != nil { - return nil, errors.Wrapf(err, "failed to read yaml from path %q", src.Path) - } - } else if len(src.Data) > 0 { - data = src.Data - } - - var values []interface{} - dec := yaml.NewDecoder(bytes.NewReader(data)) - for { - var value interface{} - err = dec.Decode(&value) - if err != nil { - if err == io.EOF { - break - } - return nil, err - } - values = append(values, value) - } - - return values, nil -} - -// ParseKubernetesYaml Unmarshals the interface array of the Kubernetes resources -// and returns it as a KubernetesResources struct. Only Deployment, Service, Route -// and Ingress are processed. Consumers interested in other Kubernetes resources -// are expected to parse the values interface array an Unmarshal it to their -// desired Kuberenetes struct -func ParseKubernetesYaml(values []interface{}) (KubernetesResources, error) { - var deployments []appsv1.Deployment - var services []corev1.Service - var routes []routev1.Route - var ingresses []extensionsv1.Ingress - var otherResources []interface{} - - for _, value := range values { - var deployment appsv1.Deployment - var service corev1.Service - var route routev1.Route - var ingress extensionsv1.Ingress - var otherResource interface{} - - byteData, err := k8yaml.Marshal(value) - if err != nil { - return KubernetesResources{}, err - } - - kubernetesMap := value.(map[string]interface{}) - kind := kubernetesMap["kind"] - - switch kind { - case "Deployment": - err = k8yaml.Unmarshal(byteData, &deployment) - deployments = append(deployments, deployment) - case "Service": - err = k8yaml.Unmarshal(byteData, &service) - services = append(services, service) - case "Route": - err = k8yaml.Unmarshal(byteData, &route) - routes = append(routes, route) - case "Ingress": - err = k8yaml.Unmarshal(byteData, &ingress) - ingresses = append(ingresses, ingress) - default: - err = k8yaml.Unmarshal(byteData, &otherResource) - otherResources = append(otherResources, otherResource) - } - - if err != nil { - return KubernetesResources{}, err - } - } - - return KubernetesResources{ - Deployments: deployments, - Services: services, - Routes: routes, - Ingresses: ingresses, - Others: otherResources, - }, nil -} diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/registry.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/registry.go deleted file mode 100644 index 85b7d49bbe4..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/registry.go +++ /dev/null @@ -1,101 +0,0 @@ -//go:build !js - -package parser - -import ( - "fmt" - "io/ioutil" - "os" - "path" - "strings" - - v1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" - devfileCtx "github.com/devfile/library/v2/pkg/devfile/parser/context" - "github.com/devfile/library/v2/pkg/util" - registryLibrary "github.com/devfile/registry-support/registry-library/library" - - "github.com/pkg/errors" -) - -func parseFromRegistry(importReference v1.ImportReference, resolveCtx *resolutionContextTree, tool resolverTools) (d DevfileObj, err error) { - id := importReference.Id - registryURL := importReference.RegistryUrl - destDir := path.Dir(d.Ctx.GetAbsPath()) - - if registryURL != "" { - devfileContent, err := getDevfileFromRegistry(id, registryURL, importReference.Version, tool.httpTimeout) - if err != nil { - return DevfileObj{}, err - } - d.Ctx, err = devfileCtx.NewByteContentDevfileCtx(devfileContent) - if err != nil { - return d, errors.Wrap(err, "failed to set devfile content from bytes") - } - newResolveCtx := resolveCtx.appendNode(importReference) - - err = getResourcesFromRegistry(id, registryURL, destDir) - if err != nil { - return DevfileObj{}, err - } - - return populateAndParseDevfile(d, newResolveCtx, tool, true) - - } else if tool.registryURLs != nil { - for _, registryURL := range tool.registryURLs { - devfileContent, err := getDevfileFromRegistry(id, registryURL, importReference.Version, tool.httpTimeout) - if devfileContent != nil && err == nil { - d.Ctx, err = devfileCtx.NewByteContentDevfileCtx(devfileContent) - if err != nil { - return d, errors.Wrap(err, "failed to set devfile content from bytes") - } - importReference.RegistryUrl = registryURL - newResolveCtx := resolveCtx.appendNode(importReference) - - err := getResourcesFromRegistry(id, registryURL, destDir) - if err != nil { - return DevfileObj{}, err - } - - return populateAndParseDevfile(d, newResolveCtx, tool, true) - } - } - } else { - return DevfileObj{}, fmt.Errorf("failed to fetch from registry, registry URL is not provided") - } - - return DevfileObj{}, fmt.Errorf("failed to get id: %s from registry URLs provided", id) -} - -func getDevfileFromRegistry(id, registryURL, version string, httpTimeout *int) ([]byte, error) { - if !strings.HasPrefix(registryURL, "http://") && !strings.HasPrefix(registryURL, "https://") { - return nil, fmt.Errorf("the provided registryURL: %s is not a valid URL", registryURL) - } - param := util.HTTPRequestParams{ - URL: fmt.Sprintf("%s/devfiles/%s/%s", registryURL, id, version), - } - - param.Timeout = httpTimeout - //suppress telemetry for parent uri references - param.TelemetryClientName = util.TelemetryIndirectDevfileCall - return util.HTTPGetRequest(param, 0) -} - -func getResourcesFromRegistry(id, registryURL, destDir string) error { - stackDir, err := ioutil.TempDir(os.TempDir(), fmt.Sprintf("registry-resources-%s", id)) - if err != nil { - return fmt.Errorf("failed to create dir: %s, error: %v", stackDir, err) - } - defer os.RemoveAll(stackDir) - //suppress telemetry for downloading resources from parent reference - err = registryLibrary.PullStackFromRegistry(registryURL, id, stackDir, registryLibrary.RegistryOptions{Telemetry: registryLibrary.TelemetryData{Client: util.TelemetryIndirectDevfileCall}}) - if err != nil { - return fmt.Errorf("failed to pull stack from registry %s", registryURL) - } - - err = util.CopyAllDirFiles(stackDir, destDir) - if err != nil { - return err - } - - return nil -} diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/registry_js.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/registry_js.go deleted file mode 100644 index f6998a00361..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/registry_js.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build js - -package parser - -import ( - v1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" -) - -func parseFromRegistry(importReference v1.ImportReference, resolveCtx *resolutionContextTree, tool resolverTools) (d DevfileObj, err error) { - return DevfileObj{}, nil -} diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/resolutionContext.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/resolutionContext.go deleted file mode 100644 index 799ecd64e24..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/resolutionContext.go +++ /dev/null @@ -1,79 +0,0 @@ -// -// Copyright 2022 Red Hat, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package parser - -import ( - "fmt" - "reflect" - - v1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" -) - -// resolutionContextTree is a recursive structure representing information about the devfile that is -// lost when flattening (e.g. plugins, parents) -type resolutionContextTree struct { - importReference v1.ImportReference - parentNode *resolutionContextTree -} - -// appendNode adds a new node to the resolution context. -func (t *resolutionContextTree) appendNode(importReference v1.ImportReference) *resolutionContextTree { - newNode := &resolutionContextTree{ - importReference: importReference, - parentNode: t, - } - return newNode -} - -// hasCycle checks if the current resolutionContextTree has a cycle -func (t *resolutionContextTree) hasCycle() error { - var seenRefs []v1.ImportReference - currNode := t - hasCycle := false - cycle := resolveImportReference(t.importReference) - - for currNode.parentNode != nil { - for _, seenRef := range seenRefs { - if reflect.DeepEqual(seenRef, currNode.importReference) { - hasCycle = true - } - } - seenRefs = append(seenRefs, currNode.importReference) - currNode = currNode.parentNode - cycle = fmt.Sprintf("%s -> %s", resolveImportReference(currNode.importReference), cycle) - } - - if hasCycle { - return fmt.Errorf("devfile has an cycle in references: %v", cycle) - } - return nil -} - -func resolveImportReference(importReference v1.ImportReference) string { - if !reflect.DeepEqual(importReference, v1.ImportReference{}) { - switch { - case importReference.Uri != "": - return fmt.Sprintf("uri: %s", importReference.Uri) - case importReference.Id != "": - return fmt.Sprintf("id: %s, registryURL: %s", importReference.Id, importReference.RegistryUrl) - case importReference.Kubernetes != nil: - return fmt.Sprintf("name: %s, namespace: %s", importReference.Kubernetes.Name, importReference.Kubernetes.Namespace) - } - - } - // the first node - return "main devfile" -} diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/sourceAttribute.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/sourceAttribute.go deleted file mode 100644 index b8949092760..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/sourceAttribute.go +++ /dev/null @@ -1,130 +0,0 @@ -// -// Copyright 2022 Red Hat, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package parser - -import ( - "fmt" - v1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" - "github.com/devfile/api/v2/pkg/attributes" - "github.com/devfile/api/v2/pkg/validation" -) - -const ( - importSourceAttribute = validation.ImportSourceAttribute - parentOverrideAttribute = validation.ParentOverrideAttribute - pluginOverrideAttribute = validation.PluginOverrideAttribute -) - -// addSourceAttributesForParentOverride adds an attribute 'api.devfile.io/imported-from=' -// to all elements of template spec content that support attributes. -func addSourceAttributesForTemplateSpecContent(sourceImportReference v1.ImportReference, template *v1.DevWorkspaceTemplateSpecContent) { - for idx, component := range template.Components { - if component.Attributes == nil { - template.Components[idx].Attributes = attributes.Attributes{} - } - template.Components[idx].Attributes.PutString(importSourceAttribute, resolveImportReference(sourceImportReference)) - } - for idx, command := range template.Commands { - if command.Attributes == nil { - template.Commands[idx].Attributes = attributes.Attributes{} - } - template.Commands[idx].Attributes.PutString(importSourceAttribute, resolveImportReference(sourceImportReference)) - } - for idx, project := range template.Projects { - if project.Attributes == nil { - template.Projects[idx].Attributes = attributes.Attributes{} - } - template.Projects[idx].Attributes.PutString(importSourceAttribute, resolveImportReference(sourceImportReference)) - } - for idx, project := range template.StarterProjects { - if project.Attributes == nil { - template.StarterProjects[idx].Attributes = attributes.Attributes{} - } - template.StarterProjects[idx].Attributes.PutString(importSourceAttribute, resolveImportReference(sourceImportReference)) - } -} - -// addSourceAttributesForParentOverride adds an attribute 'api.devfile.io/parent-override-from=' -// to all elements of parent override that support attributes. -func addSourceAttributesForParentOverride(sourceImportReference v1.ImportReference, parentOverrides *v1.ParentOverrides) { - for idx, component := range parentOverrides.Components { - if component.Attributes == nil { - parentOverrides.Components[idx].Attributes = attributes.Attributes{} - } - parentOverrides.Components[idx].Attributes.PutString(parentOverrideAttribute, resolveImportReference(sourceImportReference)) - } - for idx, command := range parentOverrides.Commands { - if command.Attributes == nil { - parentOverrides.Commands[idx].Attributes = attributes.Attributes{} - } - parentOverrides.Commands[idx].Attributes.PutString(parentOverrideAttribute, resolveImportReference(sourceImportReference)) - } - for idx, project := range parentOverrides.Projects { - if project.Attributes == nil { - parentOverrides.Projects[idx].Attributes = attributes.Attributes{} - } - parentOverrides.Projects[idx].Attributes.PutString(parentOverrideAttribute, resolveImportReference(sourceImportReference)) - } - for idx, project := range parentOverrides.StarterProjects { - if project.Attributes == nil { - parentOverrides.StarterProjects[idx].Attributes = attributes.Attributes{} - } - parentOverrides.StarterProjects[idx].Attributes.PutString(parentOverrideAttribute, resolveImportReference(sourceImportReference)) - } - -} - -// addSourceAttributesForPluginOverride adds an attribute 'api.devfile.io/plugin-override-from=' -// to all elements of plugin override that support attributes. -func addSourceAttributesForPluginOverride(sourceImportReference v1.ImportReference, pluginOverrides *v1.PluginOverrides) { - for idx, component := range pluginOverrides.Components { - if component.Attributes == nil { - pluginOverrides.Components[idx].Attributes = attributes.Attributes{} - } - pluginOverrides.Components[idx].Attributes.PutString(pluginOverrideAttribute, resolveImportReference(sourceImportReference)) - } - for idx, command := range pluginOverrides.Commands { - if command.Attributes == nil { - pluginOverrides.Commands[idx].Attributes = attributes.Attributes{} - } - pluginOverrides.Commands[idx].Attributes.PutString(pluginOverrideAttribute, resolveImportReference(sourceImportReference)) - } - -} - -// addSourceAttributesForOverrideAndMerge adds an attribute record the import reference to all elements of template that support attributes. -func addSourceAttributesForOverrideAndMerge(sourceImportReference v1.ImportReference, template interface{}) error { - if template == nil { - return fmt.Errorf("cannot add source attributes to nil") - } - - mainContent, isMainContent := template.(*v1.DevWorkspaceTemplateSpecContent) - parentOverride, isParentOverride := template.(*v1.ParentOverrides) - pluginOverride, isPluginOverride := template.(*v1.PluginOverrides) - - switch { - case isMainContent: - addSourceAttributesForTemplateSpecContent(sourceImportReference, mainContent) - case isParentOverride: - addSourceAttributesForParentOverride(sourceImportReference, parentOverride) - case isPluginOverride: - addSourceAttributesForPluginOverride(sourceImportReference, pluginOverride) - default: - return fmt.Errorf("unknown template type") - } - - return nil -} diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/utils.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/utils.go deleted file mode 100644 index d65ce6ff541..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/utils.go +++ /dev/null @@ -1,112 +0,0 @@ -// -// Copyright 2022 Red Hat, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package parser - -import ( - "fmt" - "reflect" - - devfilev1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" - "github.com/devfile/library/v2/pkg/devfile/parser/data" - "github.com/devfile/library/v2/pkg/devfile/parser/data/v2/common" -) - -// GetDeployComponents gets the default deploy command associated components -func GetDeployComponents(devfileData data.DevfileData) (map[string]string, error) { - deployCommandFilter := common.DevfileOptions{ - CommandOptions: common.CommandOptions{ - CommandGroupKind: devfilev1.DeployCommandGroupKind, - }, - } - deployCommands, err := devfileData.GetCommands(deployCommandFilter) - if err != nil { - return nil, err - } - - deployAssociatedComponents := make(map[string]string) - var deployAssociatedSubCommands []string - - for _, command := range deployCommands { - if command.Apply != nil { - if len(deployCommands) > 1 && command.Apply.Group.IsDefault != nil && !*command.Apply.Group.IsDefault { - continue - } - deployAssociatedComponents[command.Apply.Component] = command.Apply.Component - } else if command.Composite != nil { - if len(deployCommands) > 1 && command.Composite.Group.IsDefault != nil && !*command.Composite.Group.IsDefault { - continue - } - deployAssociatedSubCommands = append(deployAssociatedSubCommands, command.Composite.Commands...) - } - } - - applyCommandFilter := common.DevfileOptions{ - CommandOptions: common.CommandOptions{ - CommandType: devfilev1.ApplyCommandType, - }, - } - applyCommands, err := devfileData.GetCommands(applyCommandFilter) - if err != nil { - return nil, err - } - - for _, command := range applyCommands { - if command.Apply != nil { - for _, deployCommand := range deployAssociatedSubCommands { - if deployCommand == command.Id { - deployAssociatedComponents[command.Apply.Component] = command.Apply.Component - } - } - - } - } - - return deployAssociatedComponents, nil -} - -// GetImageBuildComponent gets the image build component from the deploy associated components -func GetImageBuildComponent(devfileData data.DevfileData, deployAssociatedComponents map[string]string) (devfilev1.Component, error) { - imageComponentFilter := common.DevfileOptions{ - ComponentOptions: common.ComponentOptions{ - ComponentType: devfilev1.ImageComponentType, - }, - } - - imageComponents, err := devfileData.GetComponents(imageComponentFilter) - if err != nil { - return devfilev1.Component{}, err - } - - var imageBuildComponent devfilev1.Component - for _, component := range imageComponents { - if _, ok := deployAssociatedComponents[component.Name]; ok && component.Image != nil { - if reflect.DeepEqual(imageBuildComponent, devfilev1.Component{}) { - imageBuildComponent = component - } else { - errMsg := "expected to find one devfile image component with a deploy command for build. Currently there is more than one image component" - return devfilev1.Component{}, fmt.Errorf(errMsg) - } - } - } - - // If there is not one image component defined in the deploy command, err out - if reflect.DeepEqual(imageBuildComponent, devfilev1.Component{}) { - errMsg := "expected to find one devfile image component with a deploy command for build. Currently there is no image component" - return devfilev1.Component{}, fmt.Errorf(errMsg) - } - - return imageBuildComponent, nil -} diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/writer.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/writer.go deleted file mode 100644 index ed3f287030e..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/parser/writer.go +++ /dev/null @@ -1,115 +0,0 @@ -// -// Copyright 2022 Red Hat, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package parser - -import ( - v1 "github.com/devfile/api/v2/pkg/apis/workspaces/v1alpha2" - apiAttributes "github.com/devfile/api/v2/pkg/attributes" - "github.com/devfile/library/v2/pkg/devfile/parser/data/v2/common" - "sigs.k8s.io/yaml" - - "github.com/devfile/library/v2/pkg/testingutil/filesystem" - "github.com/pkg/errors" - "k8s.io/klog" -) - -// WriteYamlDevfile creates a devfile.yaml file -func (d *DevfileObj) WriteYamlDevfile() error { - - // Check kubernetes components, and restore original uri content - if d.Ctx.GetConvertUriToInlined() { - err := restoreK8sCompURI(d) - if err != nil { - return errors.Wrapf(err, "failed to restore kubernetes component uri field") - } - } - // Encode data into YAML format - yamlData, err := yaml.Marshal(d.Data) - if err != nil { - return errors.Wrapf(err, "failed to marshal devfile object into yaml") - } - // Write to devfile.yaml - fs := d.Ctx.GetFs() - if fs == nil { - fs = filesystem.DefaultFs{} - } - err = fs.WriteFile(d.Ctx.GetAbsPath(), yamlData, 0644) - if err != nil { - return errors.Wrapf(err, "failed to create devfile yaml file") - } - - // Successful - klog.V(2).Infof("devfile yaml created at: '%s'", OutputDevfileYamlPath) - return nil -} - -func restoreK8sCompURI(devObj *DevfileObj) error { - getKubeCompOptions := common.DevfileOptions{ - ComponentOptions: common.ComponentOptions{ - ComponentType: v1.KubernetesComponentType, - }, - } - getOpenshiftCompOptions := common.DevfileOptions{ - ComponentOptions: common.ComponentOptions{ - ComponentType: v1.OpenshiftComponentType, - }, - } - kubeComponents, err := devObj.Data.GetComponents(getKubeCompOptions) - if err != nil { - return err - } - openshiftComponents, err := devObj.Data.GetComponents(getOpenshiftCompOptions) - if err != nil { - return err - } - - for _, kubeComp := range kubeComponents { - uri := kubeComp.Attributes.GetString(K8sLikeComponentOriginalURIKey, &err) - if err != nil { - if _, ok := err.(*apiAttributes.KeyNotFoundError); !ok { - return err - } - } - if uri != "" { - kubeComp.Kubernetes.Uri = uri - kubeComp.Kubernetes.Inlined = "" - delete(kubeComp.Attributes, K8sLikeComponentOriginalURIKey) - err = devObj.Data.UpdateComponent(kubeComp) - if err != nil { - return err - } - } - } - - for _, openshiftComp := range openshiftComponents { - uri := openshiftComp.Attributes.GetString(K8sLikeComponentOriginalURIKey, &err) - if err != nil { - if _, ok := err.(*apiAttributes.KeyNotFoundError); !ok { - return err - } - } - if uri != "" { - openshiftComp.Openshift.Uri = uri - openshiftComp.Openshift.Inlined = "" - delete(openshiftComp.Attributes, K8sLikeComponentOriginalURIKey) - err = devObj.Data.UpdateComponent(openshiftComp) - if err != nil { - return err - } - } - } - return nil -} diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/validate/validate.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/validate/validate.go deleted file mode 100644 index e40d89ab66a..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/devfile/validate/validate.go +++ /dev/null @@ -1,82 +0,0 @@ -// -// Copyright 2022 Red Hat, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package validate - -import ( - "fmt" - v2Validation "github.com/devfile/api/v2/pkg/validation" - devfileData "github.com/devfile/library/v2/pkg/devfile/parser/data" - v2 "github.com/devfile/library/v2/pkg/devfile/parser/data/v2" - "github.com/devfile/library/v2/pkg/devfile/parser/data/v2/common" - "github.com/hashicorp/go-multierror" -) - -// ValidateDevfileData validates whether sections of devfile are compatible -func ValidateDevfileData(data devfileData.DevfileData) error { - - commands, err := data.GetCommands(common.DevfileOptions{}) - if err != nil { - return err - } - components, err := data.GetComponents(common.DevfileOptions{}) - if err != nil { - return err - } - projects, err := data.GetProjects(common.DevfileOptions{}) - if err != nil { - return err - } - starterProjects, err := data.GetStarterProjects(common.DevfileOptions{}) - if err != nil { - return err - } - - var returnedErr error - switch d := data.(type) { - case *v2.DevfileV2: - // validate components - err = v2Validation.ValidateComponents(components) - if err != nil { - returnedErr = multierror.Append(returnedErr, err) - } - - // validate commands - err = v2Validation.ValidateCommands(commands, components) - if err != nil { - returnedErr = multierror.Append(returnedErr, err) - } - - err = v2Validation.ValidateEvents(data.GetEvents(), commands) - if err != nil { - returnedErr = multierror.Append(returnedErr, err) - } - - err = v2Validation.ValidateProjects(projects) - if err != nil { - returnedErr = multierror.Append(returnedErr, err) - } - - err = v2Validation.ValidateStarterProjects(starterProjects) - if err != nil { - returnedErr = multierror.Append(returnedErr, err) - } - - return returnedErr - - default: - return fmt.Errorf("unknown devfile type %T", d) - } -} diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/testingutil/filesystem/default_fs.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/testingutil/filesystem/default_fs.go deleted file mode 100644 index 9eea63fc320..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/testingutil/filesystem/default_fs.go +++ /dev/null @@ -1,179 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* - This package is a FORK of https://github.com/kubernetes/kubernetes/blob/master/pkg/util/filesystem/defaultfs.go - See above license -*/ - -package filesystem - -import ( - "io/ioutil" - "os" - "path/filepath" - "time" -) - -// DefaultFs implements Filesystem using same-named functions from "os" and "io/ioutil" -type DefaultFs struct{} - -var _ Filesystem = DefaultFs{} - -// Stat via os.Stat -func (DefaultFs) Stat(name string) (os.FileInfo, error) { - return os.Stat(name) -} - -// Create via os.Create -func (DefaultFs) Create(name string) (File, error) { - file, err := os.Create(name) - if err != nil { - return nil, err - } - return &defaultFile{file}, nil -} - -// Open via os.Open -func (DefaultFs) Open(name string) (File, error) { - file, err := os.Open(name) - if err != nil { - return nil, err - } - - return &defaultFile{file}, nil -} - -// OpenFile via os.OpenFile -func (DefaultFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - file, err := os.OpenFile(name, flag, perm) - if err != nil { - return nil, err - } - - return &defaultFile{file}, nil -} - -// Rename via os.Rename -func (DefaultFs) Rename(oldpath, newpath string) error { - return os.Rename(oldpath, newpath) -} - -// MkdirAll via os.MkdirAll -func (DefaultFs) MkdirAll(path string, perm os.FileMode) error { - return os.MkdirAll(path, perm) -} - -// Chtimes via os.Chtimes -func (DefaultFs) Chtimes(name string, atime time.Time, mtime time.Time) error { - return os.Chtimes(name, atime, mtime) -} - -// RemoveAll via os.RemoveAll -func (DefaultFs) RemoveAll(path string) error { - return os.RemoveAll(path) -} - -// Remove via os.RemoveAll -func (DefaultFs) Remove(name string) error { - return os.Remove(name) -} - -// Getwd via os.Getwd -func (DefaultFs) Getwd() (dir string, err error) { - return os.Getwd() -} - -// ReadFile via ioutil.ReadFile -func (DefaultFs) ReadFile(filename string) ([]byte, error) { - return ioutil.ReadFile(filename) -} - -// WriteFile via ioutil.WriteFile -func (DefaultFs) WriteFile(filename string, data []byte, perm os.FileMode) error { - return ioutil.WriteFile(filename, data, perm) -} - -// TempDir via ioutil.TempDir -func (DefaultFs) TempDir(dir, prefix string) (string, error) { - return ioutil.TempDir(dir, prefix) -} - -// TempFile via ioutil.TempFile -func (DefaultFs) TempFile(dir, prefix string) (File, error) { - file, err := ioutil.TempFile(dir, prefix) - if err != nil { - return nil, err - } - return &defaultFile{file}, nil -} - -// ReadDir via ioutil.ReadDir -func (DefaultFs) ReadDir(dirname string) ([]os.FileInfo, error) { - return ioutil.ReadDir(dirname) -} - -// Walk via filepath.Walk -func (DefaultFs) Walk(root string, walkFn filepath.WalkFunc) error { - return filepath.Walk(root, walkFn) -} - -// Chmod via os.Chmod -func (f DefaultFs) Chmod(name string, mode os.FileMode) error { - return os.Chmod(name, mode) -} - -// defaultFile implements File using same-named functions from "os" -type defaultFile struct { - file *os.File -} - -// Name via os.File.Name -func (file *defaultFile) Name() string { - return file.file.Name() -} - -// Write via os.File.Write -func (file *defaultFile) Write(b []byte) (n int, err error) { - return file.file.Write(b) -} - -// WriteString via File.WriteString -func (file *defaultFile) WriteString(s string) (int, error) { - return file.file.WriteString(s) -} - -// Sync via os.File.Sync -func (file *defaultFile) Sync() error { - return file.file.Sync() -} - -// Close via os.File.Close -func (file *defaultFile) Close() error { - return file.file.Close() -} - -func (file *defaultFile) Readdir(n int) ([]os.FileInfo, error) { - return file.file.Readdir(n) -} - -func (file *defaultFile) Read(b []byte) (n int, err error) { - return file.file.Read(b) -} - -func (file *defaultFile) Chmod(name string, mode os.FileMode) error { - return file.file.Chmod(mode) -} diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/testingutil/filesystem/fake_fs.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/testingutil/filesystem/fake_fs.go deleted file mode 100644 index fbf016dcdb3..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/testingutil/filesystem/fake_fs.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* - This package is a FORK of https://github.com/kubernetes/kubernetes/blob/master/pkg/util/filesystem/fakefs.go - See above license -*/ - -package filesystem - -import ( - "os" - "path/filepath" - "time" - - "github.com/spf13/afero" -) - -// fakeFs is implemented in terms of afero -type fakeFs struct { - a afero.Afero -} - -// NewFakeFs returns a fake Filesystem that exists in-memory, useful for unit tests -func NewFakeFs() Filesystem { - return &fakeFs{a: afero.Afero{Fs: afero.NewMemMapFs()}} -} - -// Stat via afero.Fs.Stat -func (fs *fakeFs) Stat(name string) (os.FileInfo, error) { - return fs.a.Fs.Stat(name) -} - -// Create via afero.Fs.Create -func (fs *fakeFs) Create(name string) (File, error) { - file, err := fs.a.Fs.Create(name) - if err != nil { - return nil, err - } - return &fakeFile{file}, nil -} - -// Open via afero.Fs.Open -func (fs *fakeFs) Open(name string) (File, error) { - file, err := fs.a.Fs.Open(name) - if err != nil { - return nil, err - } - return &fakeFile{file}, nil -} - -// OpenFile via afero.Fs.OpenFile -func (fs *fakeFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - file, err := fs.a.Fs.OpenFile(name, flag, perm) - if err != nil { - return nil, err - } - return &fakeFile{file}, nil -} - -// Rename via afero.Fs.Rename -func (fs *fakeFs) Rename(oldpath, newpath string) error { - return fs.a.Fs.Rename(oldpath, newpath) -} - -// MkdirAll via afero.Fs.MkdirAll -func (fs *fakeFs) MkdirAll(path string, perm os.FileMode) error { - return fs.a.Fs.MkdirAll(path, perm) -} - -// Chtimes via afero.Fs.Chtimes -func (fs *fakeFs) Chtimes(name string, atime time.Time, mtime time.Time) error { - return fs.a.Fs.Chtimes(name, atime, mtime) -} - -// ReadFile via afero.ReadFile -func (fs *fakeFs) ReadFile(filename string) ([]byte, error) { - return fs.a.ReadFile(filename) -} - -// WriteFile via afero.WriteFile -func (fs *fakeFs) WriteFile(filename string, data []byte, perm os.FileMode) error { - return fs.a.WriteFile(filename, data, perm) -} - -// TempDir via afero.TempDir -func (fs *fakeFs) TempDir(dir, prefix string) (string, error) { - return fs.a.TempDir(dir, prefix) -} - -// TempFile via afero.TempFile -func (fs *fakeFs) TempFile(dir, prefix string) (File, error) { - file, err := fs.a.TempFile(dir, prefix) - if err != nil { - return nil, err - } - return &fakeFile{file}, nil -} - -// ReadDir via afero.ReadDir -func (fs *fakeFs) ReadDir(dirname string) ([]os.FileInfo, error) { - return fs.a.ReadDir(dirname) -} - -// Walk via afero.Walk -func (fs *fakeFs) Walk(root string, walkFn filepath.WalkFunc) error { - return fs.a.Walk(root, walkFn) -} - -// RemoveAll via afero.RemoveAll -func (fs *fakeFs) RemoveAll(path string) error { - return fs.a.RemoveAll(path) -} - -func (fs *fakeFs) Getwd() (dir string, err error) { - return ".", nil -} - -// Remove via afero.RemoveAll -func (fs *fakeFs) Remove(name string) error { - return fs.a.Remove(name) -} - -// Chmod via afero.Chmod -func (fs *fakeFs) Chmod(name string, mode os.FileMode) error { - return fs.a.Chmod(name, mode) -} - -// fakeFile implements File; for use with fakeFs -type fakeFile struct { - file afero.File -} - -// Name via afero.File.Name -func (file *fakeFile) Name() string { - return file.file.Name() -} - -// Write via afero.File.Write -func (file *fakeFile) Write(b []byte) (n int, err error) { - return file.file.Write(b) -} - -// WriteString via afero.File.WriteString -func (file *fakeFile) WriteString(s string) (n int, err error) { - return file.file.WriteString(s) -} - -// Sync via afero.File.Sync -func (file *fakeFile) Sync() error { - return file.file.Sync() -} - -// Close via afero.File.Close -func (file *fakeFile) Close() error { - return file.file.Close() -} - -func (file *fakeFile) Readdir(n int) ([]os.FileInfo, error) { - return file.file.Readdir(n) -} - -func (file *fakeFile) Read(b []byte) (n int, err error) { - return file.file.Read(b) -} diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/testingutil/filesystem/filesystem.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/testingutil/filesystem/filesystem.go deleted file mode 100644 index e71b6ffc8d2..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/testingutil/filesystem/filesystem.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* - This package is a FORK of https://github.com/kubernetes/kubernetes/blob/master/pkg/util/filesystem/filesystem.go - See above license -*/ - -package filesystem - -import ( - "os" - "path/filepath" - "time" -) - -// Filesystem is an interface that we can use to mock various filesystem operations -type Filesystem interface { - // from "os" - Stat(name string) (os.FileInfo, error) - Create(name string) (File, error) - Open(name string) (File, error) - OpenFile(name string, flag int, perm os.FileMode) (File, error) - Rename(oldpath, newpath string) error - MkdirAll(path string, perm os.FileMode) error - Chtimes(name string, atime time.Time, mtime time.Time) error - RemoveAll(path string) error - Remove(name string) error - Chmod(name string, mode os.FileMode) error - Getwd() (dir string, err error) - - // from "io/ioutil" - ReadFile(filename string) ([]byte, error) - WriteFile(filename string, data []byte, perm os.FileMode) error - TempDir(dir, prefix string) (string, error) - TempFile(dir, prefix string) (File, error) - ReadDir(dirname string) ([]os.FileInfo, error) - Walk(root string, walkFn filepath.WalkFunc) error -} - -// File is an interface that we can use to mock various filesystem operations typically -// accessed through the File object from the "os" package -type File interface { - // for now, the only os.File methods used are those below, add more as necessary - Name() string - Write(b []byte) (n int, err error) - WriteString(s string) (n int, err error) - Sync() error - Close() error - Read(b []byte) (n int, err error) - Readdir(n int) ([]os.FileInfo, error) -} diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/testingutil/filesystem/singleton.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/testingutil/filesystem/singleton.go deleted file mode 100644 index 1d16ade79d0..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/testingutil/filesystem/singleton.go +++ /dev/null @@ -1,25 +0,0 @@ -// -// Copyright 2022 Red Hat, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package filesystem - -var singleFs Filesystem - -func Get() Filesystem { - if singleFs == nil { - singleFs = &DefaultFs{} - } - return singleFs -} diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/testingutil/filesystem/watcher.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/testingutil/filesystem/watcher.go deleted file mode 100644 index 5a693e70a41..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/testingutil/filesystem/watcher.go +++ /dev/null @@ -1,90 +0,0 @@ -//go:build !js - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* - This package is a FORK of https://github.com/kubernetes/kubernetes/blob/master/pkg/util/filesystem/watcher.go - See above license -*/ - -package filesystem - -import ( - "github.com/fsnotify/fsnotify" -) - -// FSWatcher is a callback-based filesystem watcher abstraction for fsnotify. -type FSWatcher interface { - // Initializes the watcher with the given watch handlers. - // Called before all other methods. - Init(FSEventHandler, FSErrorHandler) error - - // Starts listening for events and errors. - // When an event or error occurs, the corresponding handler is called. - Run() - - // Add a filesystem path to watch - AddWatch(path string) error -} - -// FSEventHandler is called when a fsnotify event occurs. -type FSEventHandler func(event fsnotify.Event) - -// FSErrorHandler is called when a fsnotify error occurs. -type FSErrorHandler func(err error) - -type fsnotifyWatcher struct { - watcher *fsnotify.Watcher - eventHandler FSEventHandler - errorHandler FSErrorHandler -} - -var _ FSWatcher = &fsnotifyWatcher{} - -func (w *fsnotifyWatcher) AddWatch(path string) error { - return w.watcher.Add(path) -} - -func (w *fsnotifyWatcher) Init(eventHandler FSEventHandler, errorHandler FSErrorHandler) error { - var err error - w.watcher, err = fsnotify.NewWatcher() - if err != nil { - return err - } - - w.eventHandler = eventHandler - w.errorHandler = errorHandler - return nil -} - -func (w *fsnotifyWatcher) Run() { - go func() { - defer w.watcher.Close() - for { - select { - case event := <-w.watcher.Events: - if w.eventHandler != nil { - w.eventHandler(event) - } - case err := <-w.watcher.Errors: - if w.errorHandler != nil { - w.errorHandler(err) - } - } - } - }() -} diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/util/httpcache.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/util/httpcache.go deleted file mode 100644 index 8efa0a94105..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/util/httpcache.go +++ /dev/null @@ -1,44 +0,0 @@ -// -// Copyright 2022 Red Hat, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package util - -import ( - "io/ioutil" - "os" - "path/filepath" - "time" - - "k8s.io/klog" -) - -// cleanHttpCache checks cacheDir and deletes all files that were modified more than cacheTime back -func cleanHttpCache(cacheDir string, cacheTime time.Duration) error { - cacheFiles, err := ioutil.ReadDir(cacheDir) - if err != nil { - return err - } - - for _, f := range cacheFiles { - if f.ModTime().Add(cacheTime).Before(time.Now()) { - klog.V(4).Infof("Removing cache file %s, because it is older than %s", f.Name(), cacheTime.String()) - err := os.Remove(filepath.Join(cacheDir, f.Name())) - if err != nil { - return err - } - } - } - return nil -} diff --git a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/util/util.go b/ui/wasm/vendor/github.com/devfile/library/v2/pkg/util/util.go deleted file mode 100644 index bdcdb4d139e..00000000000 --- a/ui/wasm/vendor/github.com/devfile/library/v2/pkg/util/util.go +++ /dev/null @@ -1,1441 +0,0 @@ -// -// Copyright 2022 Red Hat, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package util - -import ( - "archive/zip" - "bufio" - "bytes" - "crypto/rand" - "fmt" - "io" - "io/ioutil" - "math/big" - "net" - "net/http" - "net/url" - "os" - "os/exec" - "os/signal" - "os/user" - "path" - "path/filepath" - "regexp" - "runtime" - "sort" - "strconv" - "strings" - "syscall" - "time" - - "github.com/go-git/go-git/v5/plumbing" - - "github.com/devfile/library/v2/pkg/testingutil/filesystem" - "github.com/fatih/color" - gitpkg "github.com/go-git/go-git/v5" - "github.com/gobwas/glob" - "github.com/gregjones/httpcache" - "github.com/gregjones/httpcache/diskcache" - "github.com/pkg/errors" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - kvalidation "k8s.io/apimachinery/pkg/util/validation" - "k8s.io/client-go/util/homedir" - "k8s.io/klog" -) - -const ( - HTTPRequestResponseTimeout = 30 * time.Second // HTTPRequestTimeout configures timeout of all HTTP requests - ModeReadWriteFile = 0600 // default Permission for a file - CredentialPrefix = "odo-" // CredentialPrefix is the prefix of the credential that uses to access secure registry - TelemetryClientName = "devfile-library" //TelemetryClientName is the name of the devfile library client - TelemetryIndirectDevfileCall = "devfile-library-indirect" //TelemetryIndirectDevfileCall is used to identify calls made to retrieve the parent or plugin devfile -) - -// httpCacheDir determines directory where odo will cache HTTP respones -var httpCacheDir = filepath.Join(os.TempDir(), "odohttpcache") - -var letterRunes = []rune("abcdefghijklmnopqrstuvwxyz") - -// 63 is the max length of a DeploymentConfig in Openshift and we also have to take into account -// that each component also gets a volume that uses the component name suffixed with -s2idata -const maxAllowedNamespacedStringLength = 63 - len("-s2idata") - 1 - -// This value can be provided to set a seperate directory for users 'homedir' resolution -// note for mocking purpose ONLY -var customHomeDir = os.Getenv("CUSTOM_HOMEDIR") - -const defaultGithubRef = "master" - -// ResourceRequirementInfo holds resource quantity before transformation into its appropriate form in container spec -type ResourceRequirementInfo struct { - ResourceType corev1.ResourceName - MinQty resource.Quantity - MaxQty resource.Quantity -} - -// HTTPRequestParams holds parameters of forming http request -type HTTPRequestParams struct { - URL string - Token string - Timeout *int - TelemetryClientName string //optional client name for telemetry -} - -// DownloadParams holds parameters of forming file download request -type DownloadParams struct { - Request HTTPRequestParams - Filepath string -} - -// ConvertLabelsToSelector converts the given labels to selector -func ConvertLabelsToSelector(labels map[string]string) string { - var selector string - isFirst := true - for k, v := range labels { - if isFirst { - isFirst = false - if v == "" { - selector = selector + fmt.Sprintf("%v", k) - } else { - selector = fmt.Sprintf("%v=%v", k, v) - } - } else { - if v == "" { - selector = selector + fmt.Sprintf(",%v", k) - } else { - selector = selector + fmt.Sprintf(",%v=%v", k, v) - } - } - } - return selector -} - -// GenerateRandomString generates a random string of lower case characters of -// the given size -func GenerateRandomString(n int) string { - b := make([]rune, n) - - for i := range b { - // this error is ignored because it fails only when the 2nd arg of Int() is less then 0 - // which wont happen - n, _ := rand.Int(rand.Reader, big.NewInt(int64(len(letterRunes)))) - b[i] = letterRunes[n.Int64()] - } - return string(b) -} - -// In checks if the value is in the array -func In(arr []string, value string) bool { - for _, item := range arr { - if item == value { - return true - } - } - return false -} - -// NamespaceOpenShiftObject hyphenates applicationName and componentName -func NamespaceOpenShiftObject(componentName string, applicationName string) (string, error) { - - // Error if it's blank - if componentName == "" { - return "", errors.New("namespacing: component name cannot be blank") - } - - // Error if it's blank - if applicationName == "" { - return "", errors.New("namespacing: application name cannot be blank") - } - - // Return the hyphenated namespaced name - originalName := fmt.Sprintf("%s-%s", strings.Replace(componentName, "/", "-", -1), applicationName) - truncatedName := TruncateString(originalName, maxAllowedNamespacedStringLength) - if originalName != truncatedName { - klog.V(4).Infof("The combination of application %s and component %s was too long so the final name was truncated to %s", - applicationName, componentName, truncatedName) - } - return truncatedName, nil -} - -// ExtractComponentType returns only component type part from passed component type(default unqualified, fully qualified, versioned, etc...and their combinations) for use as component name -// Possible types of parameters: -// 1. "myproject/python:3.5" -- Return python -// 2. "python:3.5" -- Return python -// 3. nodejs -- Return nodejs -func ExtractComponentType(namespacedVersionedComponentType string) string { - s := strings.Split(namespacedVersionedComponentType, "/") - versionedString := s[0] - if len(s) == 2 { - versionedString = s[1] - } - s = strings.Split(versionedString, ":") - return s[0] -} - -// ParseComponentImageName returns -// 1. image name -// 2. component type i.e, builder image name -// 3. component name default value is component type else the user requested component name -// 4. component version which is by default latest else version passed with builder image name -func ParseComponentImageName(imageName string) (string, string, string, string) { - // We don't have to check it anymore, Args check made sure that args has at least one item - // and no more than two - - // "Default" values - componentImageName := imageName - componentType := imageName - componentName := ExtractComponentType(componentType) - componentVersion := "latest" - - // Check if componentType includes ":", if so, then we need to spit it into using versions - if strings.ContainsAny(componentImageName, ":") { - versionSplit := strings.Split(imageName, ":") - componentType = versionSplit[0] - componentName = ExtractComponentType(componentType) - componentVersion = versionSplit[1] - } - return componentImageName, componentType, componentName, componentVersion -} - -// WIN represent the windows OS -const WIN = "windows" - -// ReadFilePath Reads file path form URL file:///C:/path/to/file to C:\path\to\file -func ReadFilePath(u *url.URL, os string) string { - location := u.Path - if os == WIN { - location = strings.Replace(u.Path, "/", "\\", -1) - location = location[1:] - } - return location -} - -// GenFileURL Converts file path on windows to /C:/path/to/file to work in URL -func GenFileURL(location string, os ...string) string { - // param os is made variadic only for the purpose of UTs but need not be passed mandatorily - currOS := runtime.GOOS - if len(os) > 0 { - currOS = os[0] - } - urlPath := location - if currOS == WIN { - urlPath = "/" + strings.Replace(location, "\\", "/", -1) - } - return "file://" + urlPath -} - -// ConvertKeyValueStringToMap converts String Slice of Parameters to a Map[String]string -// Each value of the slice is expected to be in the key=value format -// Values that do not conform to this "spec", will be ignored -func ConvertKeyValueStringToMap(params []string) map[string]string { - result := make(map[string]string, len(params)) - for _, param := range params { - str := strings.Split(param, "=") - if len(str) != 2 { - klog.Fatalf("Parameter %s is not in the expected key=value format", param) - } else { - result[str[0]] = str[1] - } - } - return result -} - -// TruncateString truncates passed string to given length -// Note: if -1 is passed, the original string is returned -func TruncateString(str string, maxLen int) string { - if maxLen == -1 { - return str - } - if len(str) > maxLen { - return str[:maxLen] - } - return str -} - -// GetAbsPath returns absolute path from passed file path resolving even ~ to user home dir and any other such symbols that are only -// shell expanded can also be handled here -func GetAbsPath(path string) (string, error) { - // Only shell resolves `~` to home so handle it specially - var dir string - if strings.HasPrefix(path, "~") { - if len(customHomeDir) > 0 { - dir = customHomeDir - } else { - usr, err := user.Current() - if err != nil { - return path, errors.Wrapf(err, "unable to resolve %s to absolute path", path) - } - dir = usr.HomeDir - } - - if len(path) > 1 { - path = filepath.Join(dir, path[1:]) - } else { - path = dir - } - } - - path, err := filepath.Abs(path) - if err != nil { - return path, errors.Wrapf(err, "unable to resolve %s to absolute path", path) - } - return path, nil -} - -// GetRandomName returns a randomly generated name which can be used for naming odo and/or openshift entities -// prefix: Desired prefix part of the name -// prefixMaxLen: Desired maximum length of prefix part of random name; if -1 is passed, no limit on length will be enforced -// existList: List to verify that the returned name does not already exist -// retries: number of retries to try generating a unique name -// Returns: -// 1. randomname: is prefix-suffix, where: -// prefix: string passed as prefix or fetched current directory of length same as the passed prefixMaxLen -// suffix: 4 char random string -// 2. error: if requested number of retries also failed to generate unique name -func GetRandomName(prefix string, prefixMaxLen int, existList []string, retries int) (string, error) { - prefix = TruncateString(GetDNS1123Name(strings.ToLower(prefix)), prefixMaxLen) - name := fmt.Sprintf("%s-%s", prefix, GenerateRandomString(4)) - - //Create a map of existing names for efficient iteration to find if the newly generated name is same as any of the already existing ones - existingNames := make(map[string]bool) - for _, existingName := range existList { - existingNames[existingName] = true - } - - // check if generated name is already used in the existList - if _, ok := existingNames[name]; ok { - prevName := name - trial := 0 - // keep generating names until generated name is not unique. So, loop terminates when name is unique and hence for condition is false - for ok { - trial = trial + 1 - prevName = name - // Attempt unique name generation from prefix-suffix by concatenating prefix-suffix withrandom string of length 4 - prevName = fmt.Sprintf("%s-%s", prevName, GenerateRandomString(4)) - _, ok = existingNames[prevName] - if trial >= retries { - // Avoid infinite loops and fail after passed number of retries - return "", fmt.Errorf("failed to generate a unique name even after %d retrials", retries) - } - } - // If found to be unique, set name as generated name - name = prevName - } - // return name - return name, nil -} - -// GetDNS1123Name Converts passed string into DNS-1123 string -func GetDNS1123Name(str string) string { - nonAllowedCharsRegex := regexp.MustCompile(`[^a-zA-Z0-9_-]+`) - withReplacedChars := strings.Replace( - nonAllowedCharsRegex.ReplaceAllString(str, "-"), - "--", "-", -1) - return removeNonAlphaSuffix(removeNonAlphaPrefix(withReplacedChars)) -} - -func removeNonAlphaPrefix(input string) string { - regex := regexp.MustCompile("^[^a-zA-Z0-9]+(.*)$") - return regex.ReplaceAllString(input, "$1") -} - -func removeNonAlphaSuffix(input string) string { - suffixRegex := regexp.MustCompile("^(.*?)[^a-zA-Z0-9]+$") //regex that strips all trailing non alpha-numeric chars - matches := suffixRegex.FindStringSubmatch(input) - matchesLength := len(matches) - if matchesLength == 0 { - // in this case the string does not contain a non-alphanumeric suffix - return input - } else { - // in this case we return the smallest match which in the last element in the array - return matches[matchesLength-1] - } -} - -// SliceDifference returns the values of s2 that do not exist in s1 -func SliceDifference(s1 []string, s2 []string) []string { - mb := map[string]bool{} - for _, x := range s1 { - mb[x] = true - } - difference := []string{} - for _, x := range s2 { - if _, ok := mb[x]; !ok { - difference = append(difference, x) - } - } - return difference -} - -// OpenBrowser opens the URL within the users default browser -func OpenBrowser(url string) error { - var err error - - switch runtime.GOOS { - case "linux": - err = exec.Command("xdg-open", url).Start() - case "windows": - err = exec.Command("rundll32", "url.dll,FileProtocolHandler", url).Start() - case "darwin": - err = exec.Command("open", url).Start() - default: - err = fmt.Errorf("unsupported platform") - } - if err != nil { - return err - } - - return nil -} - -// FetchResourceQuantity takes passed min, max and requested resource quantities and returns min and max resource requests -func FetchResourceQuantity(resourceType corev1.ResourceName, min string, max string, request string) (*ResourceRequirementInfo, error) { - if min == "" && max == "" && request == "" { - return nil, nil - } - // If minimum and maximum both are passed they carry highest priority - // Otherwise, use the request as min and max - var minResource resource.Quantity - var maxResource resource.Quantity - if min != "" { - resourceVal, err := resource.ParseQuantity(min) - if err != nil { - return nil, err - } - minResource = resourceVal - } - if max != "" { - resourceVal, err := resource.ParseQuantity(max) - if err != nil { - return nil, err - } - maxResource = resourceVal - } - if request != "" && (min == "" || max == "") { - resourceVal, err := resource.ParseQuantity(request) - if err != nil { - return nil, err - } - minResource = resourceVal - maxResource = resourceVal - } - return &ResourceRequirementInfo{ - ResourceType: resourceType, - MinQty: minResource, - MaxQty: maxResource, - }, nil -} - -// CheckPathExists checks if a path exists or not -func CheckPathExists(path string) bool { - return checkPathExistsOnFS(path, filesystem.DefaultFs{}) -} - -func checkPathExistsOnFS(path string, fs filesystem.Filesystem) bool { - if _, err := fs.Stat(path); !os.IsNotExist(err) { - // path to file does exist - return true - } - klog.V(4).Infof("path %s doesn't exist, skipping it", path) - return false -} - -// GetHostWithPort parses provided url and returns string formated as -// host:port even if port was not specifically specified in the origin url. -// If port is not specified, standart port corresponding to url schema is provided. -// example: for url https://example.com function will return "example.com:443" -// for url https://example.com:8443 function will return "example:8443" -func GetHostWithPort(inputURL string) (string, error) { - u, err := url.Parse(inputURL) - if err != nil { - return "", errors.Wrapf(err, "error while getting port for url %s ", inputURL) - } - - port := u.Port() - address := u.Host - // if port is not specified try to detect it based on provided scheme - if port == "" { - portInt, err := net.LookupPort("tcp", u.Scheme) - if err != nil { - return "", errors.Wrapf(err, "error while getting port for url %s ", inputURL) - } - port = strconv.Itoa(portInt) - address = fmt.Sprintf("%s:%s", u.Host, port) - } - return address, nil -} - -// GetIgnoreRulesFromDirectory reads the .odoignore file, if present, and reads the rules from it -// if the .odoignore file is not found, then .gitignore is searched for the rules -// if both are not found, return empty array -// directory is the name of the directory to look into for either of the files -// rules is the array of rules (in string form) -func GetIgnoreRulesFromDirectory(directory string) ([]string, error) { - rules := []string{".git"} - // checking for presence of .odoignore file - pathIgnore := filepath.Join(directory, ".odoignore") - if _, err := os.Stat(pathIgnore); os.IsNotExist(err) || err != nil { - // .odoignore doesn't exist - // checking presence of .gitignore file - pathIgnore = filepath.Join(directory, ".gitignore") - if _, err := os.Stat(pathIgnore); os.IsNotExist(err) || err != nil { - // both doesn't exist, return empty array - return rules, nil - } - } - - file, err := os.Open(filepath.Clean(pathIgnore)) - if err != nil { - return nil, err - } - - defer file.Close() // #nosec G307 - - scanner := bufio.NewReader(file) - for { - line, _, err := scanner.ReadLine() - if err != nil { - if err == io.EOF { - break - } - - return rules, err - } - spaceTrimmedLine := strings.TrimSpace(string(line)) - if len(spaceTrimmedLine) > 0 && !strings.HasPrefix(string(line), "#") && !strings.HasPrefix(string(line), ".git") { - rules = append(rules, string(line)) - } - } - - return rules, nil -} - -// GetAbsGlobExps converts the relative glob expressions into absolute glob expressions -// returns the absolute glob expressions -func GetAbsGlobExps(directory string, globExps []string) []string { - absGlobExps := []string{} - for _, globExp := range globExps { - // for glob matching with the library - // the relative paths in the glob expressions need to be converted to absolute paths - absGlobExps = append(absGlobExps, filepath.Join(directory, globExp)) - } - return absGlobExps -} - -// GetSortedKeys retrieves the alphabetically-sorted keys of the specified map -func GetSortedKeys(mapping map[string]string) []string { - keys := make([]string, len(mapping)) - - i := 0 - for k := range mapping { - keys[i] = k - i++ - } - - sort.Strings(keys) - - return keys -} - -// GetSplitValuesFromStr returns a slice containing the split string, using ',' as a separator -func GetSplitValuesFromStr(inputStr string) []string { - if len(inputStr) == 0 { - return []string{} - } - - result := strings.Split(inputStr, ",") - for i, value := range result { - result[i] = strings.TrimSpace(value) - } - return result -} - -// GetContainerPortsFromStrings generates ContainerPort values from the array of string port values -// ports is the array containing the string port values -func GetContainerPortsFromStrings(ports []string) ([]corev1.ContainerPort, error) { - var containerPorts []corev1.ContainerPort - for _, port := range ports { - splits := strings.Split(port, "/") - if len(splits) < 1 || len(splits) > 2 { - return nil, fmt.Errorf("unable to parse the port string %s", port) - } - - portNumberI64, err := strconv.ParseInt(splits[0], 10, 32) - if err != nil { - return nil, fmt.Errorf("invalid port number %s", splits[0]) - } - portNumber := int32(portNumberI64) - - var portProto corev1.Protocol - if len(splits) == 2 { - switch strings.ToUpper(splits[1]) { - case "TCP": - portProto = corev1.ProtocolTCP - case "UDP": - portProto = corev1.ProtocolUDP - default: - return nil, fmt.Errorf("invalid port protocol %s", splits[1]) - } - } else { - portProto = corev1.ProtocolTCP - } - - port := corev1.ContainerPort{ - Name: fmt.Sprintf("%d-%s", portNumber, strings.ToLower(string(portProto))), - ContainerPort: portNumber, - Protocol: portProto, - } - containerPorts = append(containerPorts, port) - } - return containerPorts, nil -} - -// IsGlobExpMatch compiles strToMatch against each of the passed globExps -// Parameters: -// strToMatch : a string for matching against the rules -// globExps : a list of glob patterns to match strToMatch with -// Returns: true if there is any match else false the error (if any) -// Notes: -// Source as well as glob expression to match is changed to forward -// slashes due to supporting Windows as well as support with the -// "github.com/gobwas/glob" library that we use. -func IsGlobExpMatch(strToMatch string, globExps []string) (bool, error) { - - // Replace all backslashes with forward slashes in order for - // glob / expression matching to work correctly with - // the "github.com/gobwas/glob" library - strToMatch = strings.Replace(strToMatch, "\\", "/", -1) - - for _, globExp := range globExps { - - // We replace backslashes with forward slashes for - // glob expression / matching support - globExp = strings.Replace(globExp, "\\", "/", -1) - - pattern, err := glob.Compile(globExp) - if err != nil { - return false, err - } - matched := pattern.Match(strToMatch) - if matched { - klog.V(4).Infof("ignoring path %s because of glob rule %s", strToMatch, globExp) - return true, nil - } - } - return false, nil -} - -// CheckOutputFlag returns true if specified output format is supported -func CheckOutputFlag(outputFlag string) bool { - if outputFlag == "json" || outputFlag == "" { - return true - } - return false -} - -// RemoveDuplicates goes through a string slice and removes all duplicates. -// Reference: https://siongui.github.io/2018/04/14/go-remove-duplicates-from-slice-or-array/ -func RemoveDuplicates(s []string) []string { - - // Make a map and go through each value to see if it's a duplicate or not - m := make(map[string]bool) - for _, item := range s { - if _, ok := m[item]; !ok { - m[item] = true - } - } - - // Append to the unique string - var result []string - for item := range m { - result = append(result, item) - } - return result -} - -// RemoveRelativePathFromFiles removes a specified path from a list of files -func RemoveRelativePathFromFiles(files []string, path string) ([]string, error) { - - removedRelativePathFiles := []string{} - for _, file := range files { - rel, err := filepath.Rel(path, file) - if err != nil { - return []string{}, err - } - removedRelativePathFiles = append(removedRelativePathFiles, rel) - } - - return removedRelativePathFiles, nil -} - -// DeletePath deletes a file/directory if it exists and doesn't throw error if it doesn't exist -func DeletePath(path string) error { - _, err := os.Stat(path) - - // reason for double negative is os.IsExist() would be blind to EMPTY FILE. - if !os.IsNotExist(err) { - return os.Remove(path) - } - return nil -} - -// HTTPGetFreePort gets a free port from the system -func HTTPGetFreePort() (int, error) { - listener, err := net.Listen("tcp", "localhost:0") - if err != nil { - return -1, err - } - freePort := listener.Addr().(*net.TCPAddr).Port - err = listener.Close() - if err != nil { - return -1, err - } - return freePort, nil -} - -// IsEmpty checks to see if a directory is empty -// shamelessly taken from: https://stackoverflow.com/questions/30697324/how-to-check-if-directory-on-path-is-empty -// this helps detect any edge cases where an empty directory is copied over -func IsEmpty(name string) (bool, error) { - f, err := os.Open(filepath.Clean(name)) - if err != nil { - return false, err - } - defer f.Close() // #nosec G307 - - _, err = f.Readdirnames(1) // Or f.Readdir(1) - if err == io.EOF { - return true, nil - } - return false, err // Either not empty or error, suits both cases -} - -// GetRemoteFilesMarkedForDeletion returns the list of remote files marked for deletion -func GetRemoteFilesMarkedForDeletion(delSrcRelPaths []string, remoteFolder string) []string { - var rmPaths []string - for _, delRelPath := range delSrcRelPaths { - // since the paths inside the container are linux oriented - // so we convert the paths accordingly - rmPaths = append(rmPaths, filepath.ToSlash(filepath.Join(remoteFolder, delRelPath))) - } - return rmPaths -} - -// HTTPGetRequest gets resource contents given URL and token (if applicable) -// cacheFor determines how long the response should be cached (in minutes), 0 for no caching -func HTTPGetRequest(request HTTPRequestParams, cacheFor int) ([]byte, error) { - // Build http request - req, err := http.NewRequest("GET", request.URL, nil) - if err != nil { - return nil, err - } - if request.Token != "" { - bearer := "Bearer " + request.Token - req.Header.Add("Authorization", bearer) - } - - //add the telemetry client name - req.Header.Add("Client", request.TelemetryClientName) - - overriddenTimeout := HTTPRequestResponseTimeout - timeout := request.Timeout - if timeout != nil { - //if value is invalid, the default will be used - if *timeout > 0 { - //convert timeout to seconds - overriddenTimeout = time.Duration(*timeout) * time.Second - klog.V(4).Infof("HTTP request and response timeout overridden value is %v ", overriddenTimeout) - } else { - klog.V(4).Infof("Invalid httpTimeout is passed in, using default value") - } - - } - - httpClient := &http.Client{ - Transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, - ResponseHeaderTimeout: overriddenTimeout, - }, - Timeout: overriddenTimeout, - } - - klog.V(4).Infof("HTTPGetRequest: %s", req.URL.String()) - - if cacheFor > 0 { - // if there is an error during cache setup we show warning and continue without using cache - cacheError := false - httpCacheTime := time.Duration(cacheFor) * time.Minute - - // make sure that cache directory exists - err = os.MkdirAll(httpCacheDir, 0750) - if err != nil { - cacheError = true - klog.WarningDepth(4, "Unable to setup cache: ", err) - } - err = cleanHttpCache(httpCacheDir, httpCacheTime) - if err != nil { - cacheError = true - klog.WarningDepth(4, "Unable to clean up cache directory: ", err) - } - - if !cacheError { - httpClient.Transport = httpcache.NewTransport(diskcache.New(httpCacheDir)) - klog.V(4).Infof("Response will be cached in %s for %s", httpCacheDir, httpCacheTime) - } else { - klog.V(4).Info("Response won't be cached.") - } - } - - resp, err := httpClient.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if resp.Header.Get(httpcache.XFromCache) != "" { - klog.V(4).Infof("Cached response used.") - } - - // We have a non 1xx / 2xx status, return an error - if (resp.StatusCode - 300) > 0 { - return nil, errors.Errorf("failed to retrieve %s, %v: %s", request.URL, resp.StatusCode, http.StatusText(resp.StatusCode)) - } - - // Process http response - bytes, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - - return bytes, err -} - -// FilterIgnores applies the glob rules on the filesChanged and filesDeleted and filters them -// returns the filtered results which match any of the glob rules -func FilterIgnores(filesChanged, filesDeleted, absIgnoreRules []string) (filesChangedFiltered, filesDeletedFiltered []string) { - for _, file := range filesChanged { - match, err := IsGlobExpMatch(file, absIgnoreRules) - if err != nil { - continue - } - if !match { - filesChangedFiltered = append(filesChangedFiltered, file) - } - } - - for _, file := range filesDeleted { - match, err := IsGlobExpMatch(file, absIgnoreRules) - if err != nil { - continue - } - if !match { - filesDeletedFiltered = append(filesDeletedFiltered, file) - } - } - return filesChangedFiltered, filesDeletedFiltered -} - -// IsValidProjectDir checks that the folder to download the project from devfile is -// either empty or only contains the devfile used. -func IsValidProjectDir(path string, devfilePath string) error { - files, err := ioutil.ReadDir(path) - if err != nil { - return err - } - - if len(files) > 1 { - return errors.Errorf("Folder %s is not empty. It can only contain the devfile used.", path) - } else if len(files) == 1 { - file := files[0] - if file.IsDir() { - return errors.Errorf("Folder %s is not empty. It contains a subfolder.", path) - } - fileName := files[0].Name() - devfilePath = strings.TrimPrefix(devfilePath, "./") - if fileName != devfilePath { - return errors.Errorf("Folder %s contains one element and it's not the devfile used.", path) - } - } - - return nil -} - -// Converts Git ssh remote to https -func ConvertGitSSHRemoteToHTTPS(remote string) string { - remote = strings.Replace(remote, ":", "/", 1) - remote = strings.Replace(remote, "git@", "https://", 1) - return remote -} - -// GetAndExtractZip downloads a zip file from a URL with a http prefix or -// takes an absolute path prefixed with file:// and extracts it to a destination. -// pathToUnzip specifies the path within the zip folder to extract -func GetAndExtractZip(zipURL string, destination string, pathToUnzip string) error { - if zipURL == "" { - return errors.Errorf("Empty zip url: %s", zipURL) - } - - var pathToZip string - if strings.HasPrefix(zipURL, "file://") { - pathToZip = strings.TrimPrefix(zipURL, "file:/") - if runtime.GOOS == "windows" { - pathToZip = strings.Replace(pathToZip, "\\", "/", -1) - } - } else if strings.HasPrefix(zipURL, "http://") || strings.HasPrefix(zipURL, "https://") { - // Generate temporary zip file location - time := time.Now().Format(time.RFC3339) - time = strings.Replace(time, ":", "-", -1) // ":" is illegal char in windows - pathToZip = path.Join(os.TempDir(), "_"+time+".zip") - - params := DownloadParams{ - Request: HTTPRequestParams{ - URL: zipURL, - }, - Filepath: pathToZip, - } - err := DownloadFile(params) - if err != nil { - return err - } - - defer func() { - if err := DeletePath(pathToZip); err != nil { - klog.Errorf("Could not delete temporary directory for zip file. Error: %s", err) - } - }() - } else { - return errors.Errorf("Invalid Zip URL: %s . Should either be prefixed with file://, http:// or https://", zipURL) - } - - filenames, err := Unzip(pathToZip, destination, pathToUnzip) - if err != nil { - return err - } - - if len(filenames) == 0 { - return errors.New("no files were unzipped, ensure that the project repo is not empty or that sparseCheckoutDir has a valid path") - } - - return nil -} - -// Unzip will decompress a zip archive, moving specified files and folders -// within the zip file (parameter 1) to an output directory (parameter 2) -// Source: https://golangcode.com/unzip-files-in-go/ -// pathToUnzip (parameter 3) is the path within the zip folder to extract -func Unzip(src, dest, pathToUnzip string) ([]string, error) { - var filenames []string - - r, err := zip.OpenReader(src) - if err != nil { - return filenames, err - } - defer r.Close() - - // change path separator to correct character - pathToUnzip = filepath.FromSlash(pathToUnzip) - - // removes first slash of pathToUnzip if present - pathToUnzip = strings.TrimPrefix(pathToUnzip, string(os.PathSeparator)) - - for _, f := range r.File { - // Store filename/path for returning and using later on - index := strings.Index(f.Name, "/") - filename := filepath.FromSlash(f.Name[index+1:]) - if filename == "" { - continue - } - - // if sparseCheckoutDir has a pattern - match, err := filepath.Match(pathToUnzip, filename) - if err != nil { - return filenames, err - } - - // destination filepath before trim - fpath := filepath.Join(dest, filename) - - // used for pattern matching - fpathDir := filepath.Dir(fpath) - - // check for prefix or match - if strings.HasPrefix(filename, pathToUnzip) { - filename = strings.TrimPrefix(filename, pathToUnzip) - } else if !strings.HasPrefix(filename, pathToUnzip) && !match && !sliceContainsString(fpathDir, filenames) { - continue - } - // adds trailing slash to destination if needed as filepath.Join removes it - if (len(filename) == 1 && os.IsPathSeparator(filename[0])) || filename == "" { - fpath = dest + string(os.PathSeparator) - } else { - fpath = filepath.Join(dest, filename) - } - // Check for ZipSlip. More Info: http://bit.ly/2MsjAWE - if !strings.HasPrefix(fpath, filepath.Clean(dest)+string(os.PathSeparator)) { - return filenames, fmt.Errorf("%s: illegal file path", fpath) - } - - filenames = append(filenames, fpath) - - if f.FileInfo().IsDir() { - // Make Folder - if err = os.MkdirAll(fpath, os.ModePerm); err != nil { - return filenames, err - } - continue - } - - // Make File - if err = os.MkdirAll(filepath.Dir(fpath), os.ModePerm); err != nil { - return filenames, err - } - - outFile, err := os.OpenFile(filepath.Clean(fpath), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, ModeReadWriteFile) - if err != nil { - return filenames, err - } - - rc, err := f.Open() - if err != nil { - return filenames, err - } - - // limit the number of bytes copied from a file - // This is set to the limit of file size in Github - // which is 100MB - limited := io.LimitReader(rc, 100*1024*1024) - - _, err = io.Copy(outFile, limited) - - // Close the file without defer to close before next iteration of loop - _ = outFile.Close() - _ = rc.Close() - - if err != nil { - return filenames, err - } - } - return filenames, nil -} - -// DownloadFileWithCache downloads the file to the filepath given URL and token (if applicable) -// cacheFor determines how long the response should be cached (in minutes), 0 for no caching -func DownloadFileWithCache(params DownloadParams, cacheFor int) error { - // Get the data - data, err := HTTPGetRequest(params.Request, cacheFor) - if err != nil { - return err - } - - // Create the file - out, err := os.Create(params.Filepath) - if err != nil { - return err - } - defer out.Close() // #nosec G307 - - // Write the data to file - _, err = out.Write(data) - if err != nil { - return err - } - - return nil -} - -// DownloadFile downloads the file to the filepath given URL and token (if applicable) -func DownloadFile(params DownloadParams) error { - return DownloadFileWithCache(params, 0) -} - -// DownloadFileInMemory uses the url to download the file and return bytes -// Deprecated, use DownloadInMemory() instead -func DownloadFileInMemory(url string) ([]byte, error) { - var httpClient = &http.Client{Transport: &http.Transport{ - ResponseHeaderTimeout: HTTPRequestResponseTimeout, - }, Timeout: HTTPRequestResponseTimeout} - resp, err := httpClient.Get(url) - if err != nil { - return nil, err - } - // We have a non 1xx / 2xx status, return an error - if (resp.StatusCode - 300) > 0 { - return nil, errors.Errorf("failed to retrieve %s, %v: %s", url, resp.StatusCode, http.StatusText(resp.StatusCode)) - } - defer resp.Body.Close() - - return ioutil.ReadAll(resp.Body) -} - -// DownloadInMemory uses HTTPRequestParams to download the file and return bytes -func DownloadInMemory(params HTTPRequestParams) ([]byte, error) { - - var httpClient = &http.Client{Transport: &http.Transport{ - ResponseHeaderTimeout: HTTPRequestResponseTimeout, - }, Timeout: HTTPRequestResponseTimeout} - - url := params.URL - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - - //add the telemetry client name in the header - req.Header.Add("Client", params.TelemetryClientName) - resp, err := httpClient.Do(req) - if err != nil { - return nil, err - } - // We have a non 1xx / 2xx status, return an error - if (resp.StatusCode - 300) > 0 { - return nil, errors.Errorf("failed to retrieve %s, %v: %s", url, resp.StatusCode, http.StatusText(resp.StatusCode)) - } - defer resp.Body.Close() - - return ioutil.ReadAll(resp.Body) -} - -// ValidateK8sResourceName sanitizes kubernetes resource name with the following requirements: -// - Contain at most 63 characters -// - Contain only lowercase alphanumeric characters or ‘-’ -// - Start with an alphanumeric character -// - End with an alphanumeric character -// - Must not contain all numeric values -func ValidateK8sResourceName(key string, value string) error { - requirements := ` -- Contain at most 63 characters -- Contain only lowercase alphanumeric characters or ‘-’ -- Start with an alphanumeric character -- End with an alphanumeric character -- Must not contain all numeric values - ` - err1 := kvalidation.IsDNS1123Label(value) - _, err2 := strconv.ParseFloat(value, 64) - - if err1 != nil || err2 == nil { - return errors.Errorf("%s \"%s\" is not valid, %s should conform the following requirements: %s", key, value, key, requirements) - } - - return nil -} - -// CheckKubeConfigExist checks for existence of kubeconfig -func CheckKubeConfigExist() bool { - - var kubeconfig string - - if os.Getenv("KUBECONFIG") != "" { - kubeconfig = os.Getenv("KUBECONFIG") - } else { - if home := homedir.HomeDir(); home != "" { - kubeconfig = filepath.Join(home, ".kube", "config") - klog.V(4).Infof("using default kubeconfig path %s", kubeconfig) - } else { - klog.V(4).Infof("no KUBECONFIG provided and cannot fallback to default") - return false - } - } - - if CheckPathExists(kubeconfig) { - return true - } - - return false -} - -// ValidateURL validates the URL -func ValidateURL(sourceURL string) error { - u, err := url.Parse(sourceURL) - if err != nil { - return err - } - - if len(u.Host) == 0 || len(u.Scheme) == 0 { - return errors.New("URL is invalid") - } - - return nil -} - -// ValidateFile validates the file -func ValidateFile(filePath string) error { - // Check if the file path exist - file, err := os.Stat(filePath) - if err != nil { - return err - } - - if file.IsDir() { - return errors.Errorf("%s exists but it's not a file", filePath) - } - - return nil -} - -// GetGitUrlComponentsFromRaw converts a raw GitHub file link to a map of the url components -func GetGitUrlComponentsFromRaw(rawGitURL string) (map[string]string, error) { - var urlComponents map[string]string - - err := ValidateURL(rawGitURL) - if err != nil { - return nil, err - } - - u, _ := url.Parse(rawGitURL) - // the url scheme (e.g. https://) is removed before splitting into the 5 components - urlPath := strings.SplitN(u.Host+u.Path, "/", 5) - - // raw GitHub url: https://raw.githubusercontent.com/devfile/registry/main/stacks/nodejs/devfile.yaml - // host: raw.githubusercontent.com - // username: devfile - // project: registry - // branch: main - // file: stacks/nodejs/devfile.yaml - if len(urlPath) == 5 { - urlComponents = map[string]string{ - "host": urlPath[0], - "username": urlPath[1], - "project": urlPath[2], - "branch": urlPath[3], - "file": urlPath[4], - } - } - - return urlComponents, nil -} - -// CloneGitRepo clones a GitHub repo to a destination directory -func CloneGitRepo(gitUrlComponents map[string]string, destDir string) error { - gitUrl := fmt.Sprintf("https://github.com/%s/%s.git", gitUrlComponents["username"], gitUrlComponents["project"]) - branch := fmt.Sprintf("refs/heads/%s", gitUrlComponents["branch"]) - - cloneOptions := &gitpkg.CloneOptions{ - URL: gitUrl, - ReferenceName: plumbing.ReferenceName(branch), - SingleBranch: true, - Depth: 1, - } - - _, err := gitpkg.PlainClone(destDir, false, cloneOptions) - if err != nil { - return err - } - return nil -} - -// CopyFile copies file from source path to destination path -func CopyFile(srcPath string, dstPath string, info os.FileInfo) error { - // In order to avoid file overriding issue, do nothing if source path is equal to destination path - if PathEqual(srcPath, dstPath) { - return nil - } - // Check if the source file path exists - err := ValidateFile(srcPath) - if err != nil { - return err - } - - // Open source file - srcFile, err := os.Open(filepath.Clean(srcPath)) - if err != nil { - return err - } - defer srcFile.Close() // #nosec G307 - - // Create destination file - dstFile, err := os.Create(filepath.Clean(dstPath)) - if err != nil { - return err - } - defer dstFile.Close() // #nosec G307 - - // Ensure destination file has the same file mode with source file - err = os.Chmod(dstFile.Name(), info.Mode()) - if err != nil { - return err - } - - // Copy file - _, err = io.Copy(dstFile, srcFile) - if err != nil { - return err - } - - return nil -} - -// CopyAllDirFiles recursively copies a source directory to a destination directory -func CopyAllDirFiles(srcDir, destDir string) error { - return copyAllDirFilesOnFS(srcDir, destDir, filesystem.DefaultFs{}) -} - -func copyAllDirFilesOnFS(srcDir, destDir string, fs filesystem.Filesystem) error { - var info os.FileInfo - - files, err := fs.ReadDir(srcDir) - if err != nil { - return errors.Wrapf(err, "failed reading dir %v", srcDir) - } - - for _, file := range files { - srcPath := path.Join(srcDir, file.Name()) - destPath := path.Join(destDir, file.Name()) - - if file.IsDir() { - if info, err = fs.Stat(srcPath); err != nil { - return err - } - if err = fs.MkdirAll(destPath, info.Mode()); err != nil { - return err - } - if err = copyAllDirFilesOnFS(srcPath, destPath, fs); err != nil { - return err - } - } else { - if file.Name() == "devfile.yaml" { - continue - } - // Only copy files that do not exist in the destination directory - if !checkPathExistsOnFS(destPath, fs) { - if err := copyFileOnFs(srcPath, destPath, fs); err != nil { - return errors.Wrapf(err, "failed to copy %s to %s", srcPath, destPath) - } - } - } - } - return nil -} - -// copied from: https://github.com/devfile/registry-support/blob/main/index/generator/library/util.go -func copyFileOnFs(src, dst string, fs filesystem.Filesystem) error { - var err error - var srcinfo os.FileInfo - - srcfd, err := fs.Open(src) - if err != nil { - return err - } - defer func() { - if e := srcfd.Close(); e != nil { - fmt.Printf("err occurred while closing file: %v", e) - } - }() - - dstfd, err := fs.Create(dst) - if err != nil { - return err - } - defer func() { - if e := dstfd.Close(); e != nil { - fmt.Printf("err occurred while closing file: %v", e) - } - }() - - if _, err = io.Copy(dstfd, srcfd); err != nil { - return err - } - if srcinfo, err = fs.Stat(src); err != nil { - return err - } - return fs.Chmod(dst, srcinfo.Mode()) -} - -// PathEqual compare the paths to determine if they are equal -func PathEqual(firstPath string, secondPath string) bool { - firstAbsPath, _ := GetAbsPath(firstPath) - secondAbsPath, _ := GetAbsPath(secondPath) - return firstAbsPath == secondAbsPath -} - -// sliceContainsString checks for existence of given string in given slice -func sliceContainsString(str string, slice []string) bool { - for _, b := range slice { - if b == str { - return true - } - } - return false -} - -// AddFileToIgnoreFile adds a file to the gitignore file. It only does that if the file doesn't exist -func AddFileToIgnoreFile(gitIgnoreFile, filename string) error { - return addFileToIgnoreFile(gitIgnoreFile, filename, filesystem.DefaultFs{}) -} - -func addFileToIgnoreFile(gitIgnoreFile, filename string, fs filesystem.Filesystem) error { - var data []byte - file, err := fs.OpenFile(gitIgnoreFile, os.O_APPEND|os.O_RDWR, ModeReadWriteFile) - if err != nil { - return errors.Wrap(err, "failed to open .gitignore file") - } - defer file.Close() - - if data, err = fs.ReadFile(gitIgnoreFile); err != nil { - return errors.Wrap(err, fmt.Sprintf("failed reading data from %v file", gitIgnoreFile)) - } - // check whether .odo/odo-file-index.json is already in the .gitignore file - if !strings.Contains(string(data), filename) { - if _, err := file.WriteString("\n" + filename); err != nil { - return errors.Wrapf(err, "failed to add %v to %v file", filepath.Base(filename), gitIgnoreFile) - } - } - return nil -} - -// DisplayLog displays logs to user stdout with some color formatting -func DisplayLog(followLog bool, rd io.ReadCloser, compName string) (err error) { - - defer rd.Close() - - // Copy to stdout (in yellow) - color.Set(color.FgYellow) - defer color.Unset() - - // If we are going to followLog, we'll be copying it to stdout - // else, we copy it to a buffer - if followLog { - - c := make(chan os.Signal) - signal.Notify(c, os.Interrupt, syscall.SIGTERM) - go func() { - <-c - color.Unset() - os.Exit(1) - }() - - if _, err = io.Copy(os.Stdout, rd); err != nil { - return errors.Wrapf(err, "error followLoging logs for %s", compName) - } - - } else { - - // Copy to buffer (we aren't going to be followLoging the logs..) - buf := new(bytes.Buffer) - _, err = io.Copy(buf, rd) - if err != nil { - return errors.Wrapf(err, "unable to copy followLog to buffer") - } - - // Copy to stdout - if _, err = io.Copy(os.Stdout, buf); err != nil { - return errors.Wrapf(err, "error copying logs to stdout") - } - - } - return - -} diff --git a/ui/wasm/vendor/github.com/devfile/registry-support/index/generator/LICENSE b/ui/wasm/vendor/github.com/devfile/registry-support/index/generator/LICENSE deleted file mode 100644 index 261eeb9e9f8..00000000000 --- a/ui/wasm/vendor/github.com/devfile/registry-support/index/generator/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/ui/wasm/vendor/github.com/devfile/registry-support/index/generator/schema/schema.go b/ui/wasm/vendor/github.com/devfile/registry-support/index/generator/schema/schema.go deleted file mode 100644 index f003c452e43..00000000000 --- a/ui/wasm/vendor/github.com/devfile/registry-support/index/generator/schema/schema.go +++ /dev/null @@ -1,215 +0,0 @@ -// -// Copyright 2022 Red Hat, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package schema - -import ( - apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" -) - -/* -Sample index file: -[ - { - "name": "java-maven", - "displayName": "Maven Java", - "description": "Upstream Maven and OpenJDK 11", - "type": "stack", - "tags": [ - "Java", - "Maven" - ], - "architectures": [ - "amd64", - "arm64", - "s390x" - ], - "projectType": "maven", - "language": "java", - "versions": [ - { - "version": "1.1.0", - "schemaVersion": "2.1.0", - "default": true, - "description": "Upstream Maven and OpenJDK 11", - "tags": [ - "Java", - "Maven" - ], - "architectures": [ - "amd64", - "arm64", - "s390x" - ], - "links": { - "self": "devfile-catalog/java-maven:1.1.0" - }, - "resources": [ - "devfile.yaml" - ], - "starterProjects": [ - "springbootproject" - ] - } - ] - }, - { - "name": "java-quarkus", - "displayName": "Quarkus Java", - "description": "Quarkus with Java", - "type": "stack", - "tags": [ - "Java", - "Quarkus" - ], - "architectures": [ - "amd64" - ], - "projectType": "quarkus", - "language": "java", - "versions": [ - { - "version": "1.1.0", - "schemaVersion": "2.0.0", - "default": true, - "description": "Quarkus with Java", - "tags": [ - "Java", - "Quarkus" - ], - "architectures": [ - "amd64" - ], - "links": { - "self": "devfile-catalog/java-quarkus:1.1.0" - }, - "resources": [ - "devfile.yaml" - ], - "starterProjects": [ - "community", - "redhat-product" - ] - } - ] - } -] -*/ - -/* -Index file schema definition -name: string - The stack name -version: string - The stack version -attributes: map[string]apiext.JSON - Map of implementation-dependant free-form YAML attributes -displayName: string - The display name of devfile -description: string - The description of devfile -type: DevfileType - The type of the devfile, currently supports stack and sample -tags: string[] - The tags associated to devfile -icon: string - The devfile icon -globalMemoryLimit: string - The devfile global memory limit -projectType: string - The project framework that is used in the devfile -language: string - The project language that is used in the devfile -links: map[string]string - Links related to the devfile -resources: []string - The file resources that compose a devfile stack. -starterProjects: string[] - The project templates that can be used in the devfile -git: *git - The information of remote repositories -provider: string - The devfile provider information -versions: []Version - The list of stack versions information -*/ - -// Schema is the index file schema -type Schema struct { - Name string `yaml:"name,omitempty" json:"name,omitempty"` - Version string `yaml:"version,omitempty" json:"version,omitempty"` - Attributes map[string]apiext.JSON `yaml:"attributes,omitempty" json:"attributes,omitempty"` - DisplayName string `yaml:"displayName,omitempty" json:"displayName,omitempty"` - Description string `yaml:"description,omitempty" json:"description,omitempty"` - Type DevfileType `yaml:"type,omitempty" json:"type,omitempty"` - Tags []string `yaml:"tags,omitempty" json:"tags,omitempty"` - Architectures []string `yaml:"architectures,omitempty" json:"architectures,omitempty"` - Icon string `yaml:"icon,omitempty" json:"icon,omitempty"` - GlobalMemoryLimit string `yaml:"globalMemoryLimit,omitempty" json:"globalMemoryLimit,omitempty"` - ProjectType string `yaml:"projectType,omitempty" json:"projectType,omitempty"` - Language string `yaml:"language,omitempty" json:"language,omitempty"` - Links map[string]string `yaml:"links,omitempty" json:"links,omitempty"` - Resources []string `yaml:"resources,omitempty" json:"resources,omitempty"` - StarterProjects []string `yaml:"starterProjects,omitempty" json:"starterProjects,omitempty"` - Git *Git `yaml:"git,omitempty" json:"git,omitempty"` - Provider string `yaml:"provider,omitempty" json:"provider,omitempty"` - SupportUrl string `yaml:"supportUrl,omitempty" json:"supportUrl,omitempty"` - Versions []Version `yaml:"versions,omitempty" json:"versions,omitempty"` -} - -// DevfileType describes the type of devfile -type DevfileType string - -const ( - // SampleDevfileType represents a sample devfile - SampleDevfileType DevfileType = "sample" - - // StackDevfileType represents a stack devfile - StackDevfileType DevfileType = "stack" -) - -// StarterProject is the devfile starter project -type StarterProject struct { - Name string `yaml:"name,omitempty" json:"name,omitempty"` -} - -// Devfile is the devfile structure that is used by index component -type Devfile struct { - Meta Schema `yaml:"metadata,omitempty" json:"metadata,omitempty"` - StarterProjects []StarterProject `yaml:"starterProjects,omitempty" json:"starterProjects,omitempty"` - SchemaVersion string `yaml:"schemaVersion,omitempty" json:"schemaVersion,omitempty"` -} - -// Git stores the information of remote repositories -type Git struct { - Remotes map[string]string `yaml:"remotes,omitempty" json:"remotes,omitempty"` - Url string `yaml:"url,omitempty" json:"url,omitempty"` - RemoteName string `yaml:"remoteName,omitempty" json:"remoteName,omitempty"` - SubDir string `yaml:"subDir,omitempty" json:"subDir,omitempty"` - Revision string `yaml:"revision,omitempty" json:"revision,omitempty"` -} - -// ExtraDevfileEntries is the extraDevfileEntries structure that is used by index component -type ExtraDevfileEntries struct { - Samples []Schema `yaml:"samples,omitempty" json:"samples,omitempty"` - Stacks []Schema `yaml:"stacks,omitempty" json:"stacks,omitempty"` -} - -// Version stores the top-level stack information defined within stack.yaml -type StackInfo struct { - Name string `yaml:"name,omitempty" json:"name,omitempty"` - DisplayName string `yaml:"displayName,omitempty" json:"displayName,omitempty"` - Description string `yaml:"description,omitempty" json:"description,omitempty"` - Icon string `yaml:"icon,omitempty" json:"icon,omitempty"` - Versions []Version `yaml:"versions,omitempty" json:"versions,omitempty"` -} - -// Version stores the information for each stack version -type Version struct { - Version string `yaml:"version,omitempty" json:"version,omitempty"` - SchemaVersion string `yaml:"schemaVersion,omitempty" json:"schemaVersion,omitempty"` - Default bool `yaml:"default,omitempty" json:"default,omitempty"` - Git *Git `yaml:"git,omitempty" json:"git,omitempty"` - Description string `yaml:"description,omitempty" json:"description,omitempty"` - Tags []string `yaml:"tags,omitempty" json:"tags,omitempty"` - Architectures []string `yaml:"architectures,omitempty" json:"architectures,omitempty"` - Icon string `yaml:"icon,omitempty" json:"icon,omitempty"` - Links map[string]string `yaml:"links,omitempty" json:"links,omitempty"` - Resources []string `yaml:"resources,omitempty" json:"resources,omitempty"` - StarterProjects []string `yaml:"starterProjects,omitempty" json:"starterProjects,omitempty"` -} diff --git a/ui/wasm/vendor/github.com/devfile/registry-support/registry-library/LICENSE b/ui/wasm/vendor/github.com/devfile/registry-support/registry-library/LICENSE deleted file mode 100644 index 261eeb9e9f8..00000000000 --- a/ui/wasm/vendor/github.com/devfile/registry-support/registry-library/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/ui/wasm/vendor/github.com/devfile/registry-support/registry-library/library/library.go b/ui/wasm/vendor/github.com/devfile/registry-support/registry-library/library/library.go deleted file mode 100644 index d235f76fd01..00000000000 --- a/ui/wasm/vendor/github.com/devfile/registry-support/registry-library/library/library.go +++ /dev/null @@ -1,571 +0,0 @@ -/* Copyright 2020-2022 Red Hat, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package library - -import ( - "archive/zip" - "encoding/json" - "fmt" - "github.com/hashicorp/go-multierror" - "io" - "io/ioutil" - "net/http" - "net/url" - "os" - "path" - "path/filepath" - "reflect" - "strings" - "text/tabwriter" - "time" - - orasctx "oras.land/oras-go/pkg/context" - - "github.com/containerd/containerd/remotes/docker" - indexSchema "github.com/devfile/registry-support/index/generator/schema" - versionpkg "github.com/hashicorp/go-version" - "oras.land/oras-go/pkg/content" - "oras.land/oras-go/pkg/oras" -) - -const ( - // Supported Devfile media types - DevfileMediaType = "application/vnd.devfileio.devfile.layer.v1" - DevfileVSXMediaType = "application/vnd.devfileio.vsx.layer.v1.tar" - DevfileSVGLogoMediaType = "image/svg+xml" - DevfilePNGLogoMediaType = "image/png" - DevfileArchiveMediaType = "application/x-tar" - - OwnersFile = "OWNERS" - - httpRequestResponseTimeout = 30 * time.Second // httpRequestTimeout configures timeout of all HTTP requests -) - -var ( - DevfileMediaTypeList = []string{DevfileMediaType} - DevfileAllMediaTypesList = []string{DevfileMediaType, DevfilePNGLogoMediaType, DevfileSVGLogoMediaType, DevfileVSXMediaType, DevfileArchiveMediaType} - ExcludedFiles = []string{OwnersFile} -) - -type Registry struct { - registryURL string - registryContents []indexSchema.Schema - err error -} - -//TelemetryData structure to pass in client telemetry information -// The User and Locale fields should be passed in by clients if telemetry opt-in is enabled -// the generic Client name will be passed in regardless of opt-in/out choice. The value -// will be assigned to the UserId field for opt-outs -type TelemetryData struct { - // User is a generated UUID or generic client name - User string - // Locale is the OS or browser locale - Locale string - //Client is a generic name that describes the client - Client string -} - -type RegistryOptions struct { - // SkipTLSVerify is false by default which is the recommended setting for a devfile registry deployed in production. SkipTLSVerify should only be set to true - // if you are testing a devfile registry or proxy server that is set up with self-signed certificates in a pre-production environment. - SkipTLSVerify bool - // Telemetry allows clients to send telemetry data to the community Devfile Registry - Telemetry TelemetryData - // Filter allows clients to specify which architectures they want to filter their devfiles on - Filter RegistryFilter - // NewIndexSchema is false by default, which calls GET /index and returns index of default version of each stack using the old index schema struct. - // If specified to true, calls GET /v2index and returns the new Index schema with multi-version support - NewIndexSchema bool - // HTTPTimeout overrides the request and response timeout values for the custom HTTP clients set by the registry library. If unset or a negative value is specified, the default timeout of 30s will be used. - HTTPTimeout *int -} - -type RegistryFilter struct { - Architectures []string - // MinSchemaVersion is set to filter devfile index equal and above a particular devfile schema version (inclusive) - // only major version and minor version are required. e.g. 2.1, 2.2 ect. service version should not be provided. - // will only be applied if `NewIndexSchema=true` - MinSchemaVersion string - // MaxSchemaVersion is set to filter devfile index equal and below a particular devfile schema version (inclusive) - // only major version and minor version are required. e.g. 2.1, 2.2 ect. service version should not be provided. - // will only be applied if `NewIndexSchema=true` - MaxSchemaVersion string -} - -// GetRegistryIndex returns the list of index schema structured stacks and/or samples from a specified devfile registry. -func GetRegistryIndex(registryURL string, options RegistryOptions, devfileTypes ...indexSchema.DevfileType) ([]indexSchema.Schema, error) { - var registryIndex []indexSchema.Schema - - // Call index server REST API to get the index - urlObj, err := url.Parse(registryURL) - if err != nil { - return nil, err - } - getStack := false - getSample := false - for _, devfileType := range devfileTypes { - if devfileType == indexSchema.StackDevfileType { - getStack = true - } else if devfileType == indexSchema.SampleDevfileType { - getSample = true - } - } - - var endpoint string - indexEndpoint := "index" - if options.NewIndexSchema { - indexEndpoint = "v2index" - } - if getStack && getSample { - endpoint = path.Join(indexEndpoint, "all") - } else if getStack && !getSample { - endpoint = indexEndpoint - } else if getSample && !getStack { - endpoint = path.Join(indexEndpoint, "sample") - } else { - return registryIndex, nil - } - - endpointURL, err := url.Parse(endpoint) - if err != nil { - return nil, err - } - urlObj = urlObj.ResolveReference(endpointURL) - - if !reflect.DeepEqual(options.Filter, RegistryFilter{}) { - q := urlObj.Query() - if len(options.Filter.Architectures) > 0 { - for _, arch := range options.Filter.Architectures { - q.Add("arch", arch) - } - } - - if options.NewIndexSchema && (options.Filter.MaxSchemaVersion != "" || options.Filter.MinSchemaVersion != "") { - if options.Filter.MinSchemaVersion != "" { - q.Add("minSchemaVersion", options.Filter.MinSchemaVersion) - } - if options.Filter.MaxSchemaVersion != "" { - q.Add("maxSchemaVersion", options.Filter.MaxSchemaVersion) - } - } - urlObj.RawQuery = q.Encode() - } - - url := urlObj.String() - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - - setHeaders(&req.Header, options) - - httpClient := getHTTPClient(options) - - resp, err := httpClient.Do(req) - if err != nil { - return nil, err - } - bytes, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - err = json.Unmarshal(bytes, ®istryIndex) - if err != nil { - return nil, err - } - return registryIndex, nil -} - -// GetMultipleRegistryIndices returns the list of stacks and/or samples from multiple registries -func GetMultipleRegistryIndices(registryURLs []string, options RegistryOptions, devfileTypes ...indexSchema.DevfileType) []Registry { - registryList := make([]Registry, len(registryURLs)) - registryContentsChannel := make(chan []indexSchema.Schema) - errChannel := make(chan error) - - for index, registryURL := range registryURLs { - go func(chan []indexSchema.Schema, chan error) { - registryContents, err := GetRegistryIndex(registryURL, options, devfileTypes...) - registryContentsChannel <- registryContents - errChannel <- err - }(registryContentsChannel, errChannel) - registryList[index].registryURL = registryURL - registryList[index].registryContents = <-registryContentsChannel - registryList[index].err = <-errChannel - } - return registryList -} - -// PrintRegistry prints the registry with devfile type -func PrintRegistry(registryURLs string, devfileType string, options RegistryOptions) error { - // Get the registry index - registryURLArray := strings.Split(registryURLs, ",") - var registryList []Registry - - if devfileType == string(indexSchema.StackDevfileType) { - registryList = GetMultipleRegistryIndices(registryURLArray, options, indexSchema.StackDevfileType) - } else if devfileType == string(indexSchema.SampleDevfileType) { - registryList = GetMultipleRegistryIndices(registryURLArray, options, indexSchema.SampleDevfileType) - } else if devfileType == "all" { - registryList = GetMultipleRegistryIndices(registryURLArray, options, indexSchema.StackDevfileType, indexSchema.SampleDevfileType) - } - - w := tabwriter.NewWriter(os.Stdout, 5, 2, 3, ' ', tabwriter.TabIndent) - fmt.Fprintln(w, "Name", "\t", "Description", "\t", "Registry", "\t", "Error", "\t") - for _, devfileRegistry := range registryList { - if devfileRegistry.err != nil { - fmt.Fprintln(w, "NONE", "\t", "NONE", "\t", devfileRegistry.registryURL, devfileRegistry.err.Error(), "\t") - } else { - for _, devfileEntry := range devfileRegistry.registryContents { - fmt.Fprintln(w, devfileEntry.Name, "\t", devfileEntry.Description, "\t", devfileRegistry.registryURL, "\t", "NONE", "\t") - } - } - } - - _ = w.Flush() - return nil -} - -// PullStackByMediaTypesFromRegistry pulls a specified stack with allowed media types from a given registry URL to the destination directory. -// OWNERS files present in the registry will be excluded -func PullStackByMediaTypesFromRegistry(registry string, stack string, allowedMediaTypes []string, destDir string, options RegistryOptions) error { - // Get stack link - stackLink, err := GetStackLink(registry, stack, options) - if err != nil { - return err - } - - // Pull stack initialization - ctx := orasctx.Background() - urlObj, err := url.Parse(registry) - if err != nil { - return err - } - plainHTTP := true - if urlObj.Scheme == "https" { - plainHTTP = false - } - httpClient := getHTTPClient(options) - - headers := make(http.Header) - setHeaders(&headers, options) - - resolver := docker.NewResolver(docker.ResolverOptions{Headers: headers, PlainHTTP: plainHTTP, Client: httpClient}) - ref := path.Join(urlObj.Host, stackLink) - fileStore := content.NewFile(destDir) - defer fileStore.Close() - - // Pull stack from registry and save it to disk - _, err = oras.Copy(ctx, resolver, ref, fileStore, ref, oras.WithAllowedMediaTypes(allowedMediaTypes)) - if err != nil { - return fmt.Errorf("failed to pull stack %s from %s with allowed media types %v: %v", stack, ref, allowedMediaTypes, err) - } - - // Decompress archive.tar - archivePath := filepath.Join(destDir, "archive.tar") - if _, err := os.Stat(archivePath); err == nil { - err := decompress(destDir, archivePath, ExcludedFiles) - if err != nil { - return err - } - - err = os.RemoveAll(archivePath) - if err != nil { - return err - } - } - - return nil -} - -// PullStackFromRegistry pulls a specified stack with all devfile supported media types from a registry URL to the destination directory -func PullStackFromRegistry(registry string, stack string, destDir string, options RegistryOptions) error { - return PullStackByMediaTypesFromRegistry(registry, stack, DevfileAllMediaTypesList, destDir, options) -} - -// DownloadStarterProjectAsDir downloads a specified starter project archive and extracts it to given path -func DownloadStarterProjectAsDir(path string, registryURL string, stack string, starterProject string, options RegistryOptions) error { - var err error - - // Create temp path to download archive - archivePath := filepath.Join(os.TempDir(), fmt.Sprintf("%s.zip", starterProject)) - - // Download starter project archive to temp path - if err = DownloadStarterProject(archivePath, registryURL, stack, starterProject, options); err != nil { - return err - } - - // Open archive reader - archive, err := zip.OpenReader(archivePath) - if err != nil { - return fmt.Errorf("error opening downloaded starter project archive: %v", err) - } - defer archive.Close() - - // Extract files from starter project archive to specified directory path - cleanPath := filepath.Clean(path) - for _, file := range archive.File { - filePath := filepath.Join(cleanPath, filepath.Clean(file.Name)) - - // validate extracted filepath - if filePath != file.Name && !strings.HasPrefix(filePath, cleanPath+string(os.PathSeparator)) { - return fmt.Errorf("invalid file path %s", filePath) - } - - // if file is a directory, create it in destination and continue to next file - if file.FileInfo().IsDir() { - if err = os.MkdirAll(filePath, os.ModePerm); err != nil { - return fmt.Errorf("error creating directory %s: %v", filepath.Dir(filePath), err) - } - continue - } - - // ensure parent directory of current file is created in destination - if err = os.MkdirAll(filepath.Dir(filePath), os.ModePerm); err != nil { - return fmt.Errorf("error creating parent directory %s: %v", filepath.Dir(filePath), err) - } - - // open destination file - /* #nosec G304 -- filePath is produced using path.Join which cleans the dir path */ - dstFile, err := os.OpenFile(filePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, file.Mode()) - if err != nil { - return fmt.Errorf("error opening destination file at %s: %v", filePath, err) - } - - // open source file in archive - srcFile, err := file.Open() - if err != nil { - return fmt.Errorf("error opening source file %s in archive %s: %v", file.Name, archivePath, err) - } - - // extract source file to destination file - /* #nosec G110 -- starter projects are vetted before they are added to a registry. Their contents can be seen before they are downloaded */ - if _, err = io.Copy(dstFile, srcFile); err != nil { - return fmt.Errorf("error extracting file %s from archive %s to destination at %s: %v", file.Name, archivePath, filePath, err) - } - - err = dstFile.Close() - if err != nil { - return err - } - err = srcFile.Close() - if err != nil { - return err - } - } - - return nil -} - -// DownloadStarterProject downloads a specified starter project archive to a given path -func DownloadStarterProject(path string, registryURL string, stack string, starterProject string, options RegistryOptions) error { - var fileStream *os.File - var returnedErr error - - cleanPath := filepath.Clean(path) - // Download Starter Project archive bytes - bytes, err := DownloadStarterProjectAsBytes(registryURL, stack, starterProject, options) - if err != nil { - return err - } - - // Error if parent directory does not exist - if _, err = os.Stat(filepath.Dir(cleanPath)); os.IsNotExist(err) { - return fmt.Errorf("parent directory '%s' does not exist: %v", filepath.Dir(path), err) - } - - // If file does not exist, create a new one - // Else open existing for overwriting - if _, err = os.Stat(path); os.IsNotExist(err) { - fileStream, err = os.Create(cleanPath) - if err != nil { - return fmt.Errorf("failed to create file '%s': %v", path, err) - } - } else { - fileStream, err = os.OpenFile(cleanPath, os.O_WRONLY|os.O_TRUNC, os.ModePerm) - if err != nil { - return fmt.Errorf("failed to open file '%s': %v", path, err) - } - } - - defer func() { - if err = fileStream.Close(); err != nil { - returnedErr = multierror.Append(returnedErr, err) - } - }() - - // Write downloaded bytes to file - _, err = fileStream.Write(bytes) - if err != nil { - returnedErr = multierror.Append(returnedErr, fmt.Errorf("failed writing to '%s': %v", path, err)) - return returnedErr - } - - return nil -} - -// DownloadStarterProjectAsBytes downloads the file bytes of a specified starter project archive and return these bytes -func DownloadStarterProjectAsBytes(registryURL string, stack string, starterProject string, options RegistryOptions) ([]byte, error) { - stackName, _, err := SplitVersionFromStack(stack) - if err != nil { - return nil, fmt.Errorf("problem in stack/version tag: %v", err) - } - - exists, err := IsStarterProjectExists(registryURL, stack, starterProject, options) - if err != nil { - return nil, err - } else if !exists { - return nil, fmt.Errorf("the starter project '%s' does not exist under the stack '%s'", starterProject, stackName) - } - - urlObj, err := url.Parse(registryURL) - if err != nil { - return nil, err - } - - url := fmt.Sprintf("%s://%s", urlObj.Scheme, path.Join(urlObj.Host, "devfiles", stackName, "starter-projects", starterProject)) - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - - setHeaders(&req.Header, options) - - httpClient := getHTTPClient(options) - resp, err := httpClient.Do(req) - if err != nil { - return nil, err - } - - // Return downloaded starter project as bytes or error if unsuccessful. - return ioutil.ReadAll(resp.Body) -} - -// IsStarterProjectExists checks if starter project exists for a given stack -func IsStarterProjectExists(registryURL string, stack string, starterProject string, options RegistryOptions) (bool, error) { - // Get stack index - stackIndex, err := GetStackIndex(registryURL, stack, options) - if err != nil { - return false, err - } - - // Check if starter project exists in the stack index - exists := false - for _, sp := range stackIndex.StarterProjects { - if sp == starterProject { - exists = true - break - } - } - - if !exists && options.NewIndexSchema { - var starterProjects []string - - for _, version := range stackIndex.Versions { - starterProjects = append(starterProjects, version.StarterProjects...) - } - - exists = false - for _, sp := range starterProjects { - if sp == starterProject { - exists = true - break - } - } - - return exists, nil - } else { - return exists, nil - } -} - -// GetStackLink returns the slug needed to pull a specified stack from a registry URL -func GetStackLink(registryURL string, stack string, options RegistryOptions) (string, error) { - var stackLink string - - // Get stack index - stackIndex, err := GetStackIndex(registryURL, stack, options) - if err != nil { - return "", err - } - - // Split version from stack label if specified - stack, requestVersion, err := SplitVersionFromStack(stack) - if err != nil { - return "", fmt.Errorf("problem in stack/version tag: %v", err) - } - - if options.NewIndexSchema { - latestVersionIndex := 0 - latest, err := versionpkg.NewVersion(stackIndex.Versions[latestVersionIndex].Version) - if err != nil { - return "", fmt.Errorf("failed to parse the stack version %s for stack %s", stackIndex.Versions[latestVersionIndex].Version, stack) - } - for index, version := range stackIndex.Versions { - if (requestVersion == "" && version.Default) || (version.Version == requestVersion) { - stackLink = version.Links["self"] - break - } else if requestVersion == "latest" { - current, err := versionpkg.NewVersion(version.Version) - if err != nil { - return "", fmt.Errorf("failed to parse the stack version %s for stack %s", version.Version, stack) - } - if current.GreaterThan(latest) { - latestVersionIndex = index - latest = current - } - } - } - if requestVersion == "latest" { - stackLink = stackIndex.Versions[latestVersionIndex].Links["self"] - } - if requestVersion == "" && stackLink == "" { - return "", fmt.Errorf("no version specified for stack %s which no default version exists in the registry %s", stack, registryURL) - } else if stackLink == "" { - return "", fmt.Errorf("the requested version %s for stack %s does not exist in the registry %s", requestVersion, stack, registryURL) - } - } else { - stackLink = stackIndex.Links["self"] - } - - return stackLink, nil -} - -// GetStackIndex returns the schema index of a specified stack -func GetStackIndex(registryURL string, stack string, options RegistryOptions) (indexSchema.Schema, error) { - // Get the registry index - registryIndex, err := GetRegistryIndex(registryURL, options, indexSchema.StackDevfileType) - if err != nil { - return indexSchema.Schema{}, err - } - - // Prune version from stack label if specified - stack, _, err = SplitVersionFromStack(stack) - if err != nil { - return indexSchema.Schema{}, fmt.Errorf("problem in stack/version tag: %v", err) - } - - // Parse the index to get the specified stack's metadata in the index - for _, item := range registryIndex { - // Return index of specified stack if found - if item.Name == stack { - return item, nil - } - } - - // Return error if stack index is not found - return indexSchema.Schema{}, fmt.Errorf("stack %s does not exist in the registry %s", stack, registryURL) -} diff --git a/ui/wasm/vendor/github.com/devfile/registry-support/registry-library/library/util.go b/ui/wasm/vendor/github.com/devfile/registry-support/registry-library/library/util.go deleted file mode 100644 index eb3fe27fde6..00000000000 --- a/ui/wasm/vendor/github.com/devfile/registry-support/registry-library/library/util.go +++ /dev/null @@ -1,193 +0,0 @@ -/* Copyright 2020-2022 Red Hat, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package library - -import ( - "archive/tar" - "compress/gzip" - "crypto/tls" - "fmt" - "github.com/hashicorp/go-multierror" - "io" - "log" - "net/http" - "os" - "path" - "path/filepath" - "regexp" - "strings" - "time" -) - -// SplitVersionFromStack takes a stack/version tag and splits the stack name from the version -func SplitVersionFromStack(stackWithVersion string) (string, string, error) { - var requestVersion string - var stack string - - if valid, err := ValidateStackVersionTag(stackWithVersion); !valid { - if err != nil { - return "", "", err - } - return "", "", fmt.Errorf("stack/version tag '%s' is malformed, use form ':' or ''", - stackWithVersion) - } else if strings.Contains(stackWithVersion, ":") { - pair := strings.Split(stackWithVersion, ":") - if len(pair) != 2 { - return "", "", fmt.Errorf("problem splitting stack/version pair from tag '%s', instead of a pair got a length of %d", - stackWithVersion, len(pair)) - } - - stack = pair[0] - requestVersion = pair[1] - } else { - stack = stackWithVersion - requestVersion = "" - } - - return stack, requestVersion, nil -} - -// ValidateStackVersionTag returns true if stack/version tag is well formed -// and false if it is malformed -func ValidateStackVersionTag(stackWithVersion string) (bool, error) { - const exp = `^[a-z][^:\s]*(:([a-z]|[0-9])[^:\s]*)?$` - r, err := regexp.Compile(exp) - if err != nil { - return false, err - } - - return r.MatchString(stackWithVersion), nil -} - -// decompress extracts the archive file -func decompress(targetDir string, tarFile string, excludeFiles []string) error { - var returnedErr error - - reader, err := os.Open(filepath.Clean(tarFile)) - if err != nil { - return err - } - - defer func() { - if err = reader.Close(); err != nil { - returnedErr = multierror.Append(returnedErr, err) - } - }() - - gzReader, err := gzip.NewReader(reader) - if err != nil { - returnedErr = multierror.Append(returnedErr, err) - return returnedErr - } - - defer func() { - if err = gzReader.Close(); err != nil { - returnedErr = multierror.Append(returnedErr, err) - } - }() - - tarReader := tar.NewReader(gzReader) - for { - header, err := tarReader.Next() - if err == io.EOF { - break - } else if err != nil { - returnedErr = multierror.Append(returnedErr, err) - return returnedErr - } - if isExcluded(header.Name, excludeFiles) { - continue - } - - target := path.Join(targetDir, filepath.Clean(header.Name)) - switch header.Typeflag { - case tar.TypeDir: - err = os.MkdirAll(target, os.FileMode(header.Mode)) - if err != nil { - returnedErr = multierror.Append(returnedErr, err) - return returnedErr - } - case tar.TypeReg: - /* #nosec G304 -- target is produced using path.Join which cleans the dir path */ - w, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR, os.FileMode(header.Mode)) - if err != nil { - returnedErr = multierror.Append(returnedErr, err) - return returnedErr - } - /* #nosec G110 -- starter projects are vetted before they are added to a registry. Their contents can be seen before they are downloaded */ - _, err = io.Copy(w, tarReader) - if err != nil { - returnedErr = multierror.Append(returnedErr, err) - return returnedErr - } - err = w.Close() - if err != nil { - returnedErr = multierror.Append(returnedErr, err) - return returnedErr - } - default: - log.Printf("Unsupported type: %v", header.Typeflag) - } - } - - return nil -} - -func isExcluded(name string, excludeFiles []string) bool { - basename := filepath.Base(name) - for _, excludeFile := range excludeFiles { - if basename == excludeFile { - return true - } - } - return false -} - -//setHeaders sets the request headers -func setHeaders(headers *http.Header, options RegistryOptions) { - t := options.Telemetry - if t.User != "" { - headers.Add("User", t.User) - } - if t.Client != "" { - headers.Add("Client", t.Client) - } - if t.Locale != "" { - headers.Add("Locale", t.Locale) - } -} - -//getHTTPClient returns a new http client object -func getHTTPClient(options RegistryOptions) *http.Client { - - overriddenTimeout := httpRequestResponseTimeout - timeout := options.HTTPTimeout - //if value is invalid or unspecified, the default will be used - if timeout != nil && *timeout > 0 { - //convert timeout to seconds - overriddenTimeout = time.Duration(*timeout) * time.Second - } - - return &http.Client{ - Transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, - ResponseHeaderTimeout: overriddenTimeout, - /*#nosec G402 -- documented user option for dev/test, not for prod use */ - TLSClientConfig: &tls.Config{InsecureSkipVerify: options.SkipTLSVerify}, - }, - Timeout: overriddenTimeout, - } -} diff --git a/ui/wasm/vendor/github.com/docker/cli/AUTHORS b/ui/wasm/vendor/github.com/docker/cli/AUTHORS deleted file mode 100644 index 8990f85b56e..00000000000 --- a/ui/wasm/vendor/github.com/docker/cli/AUTHORS +++ /dev/null @@ -1,771 +0,0 @@ -# This file lists all individuals having contributed content to the repository. -# For how it is generated, see `scripts/docs/generate-authors.sh`. - -Aanand Prasad -Aaron L. Xu -Aaron Lehmann -Aaron.L.Xu -Abdur Rehman -Abhinandan Prativadi -Abin Shahab -Abreto FU -Ace Tang -Addam Hardy -Adolfo Ochagavía -Adrian Plata -Adrien Duermael -Adrien Folie -Ahmet Alp Balkan -Aidan Feldman -Aidan Hobson Sayers -AJ Bowen -Akhil Mohan -Akihiro Suda -Akim Demaille -Alan Thompson -Albert Callarisa -Albin Kerouanton -Aleksa Sarai -Aleksander Piotrowski -Alessandro Boch -Alex Mavrogiannis -Alex Mayer -Alexander Boyd -Alexander Larsson -Alexander Morozov -Alexander Ryabov -Alexandre González -Alfred Landrum -Alicia Lauerman -Allen Sun -Alvin Deng -Amen Belayneh -Amir Goldstein -Amit Krishnan -Amit Shukla -Amy Lindburg -Anca Iordache -Anda Xu -Andrea Luzzardi -Andreas Köhler -Andrew France -Andrew Hsu -Andrew Macpherson -Andrew McDonnell -Andrew Po -Andrey Petrov -Andrii Berehuliak -André Martins -Andy Goldstein -Andy Rothfusz -Anil Madhavapeddy -Ankush Agarwal -Anne Henmi -Anton Polonskiy -Antonio Murdaca -Antonis Kalipetis -Anusha Ragunathan -Ao Li -Arash Deshmeh -Arko Dasgupta -Arnaud Porterie -Arthur Peka -Ashwini Oruganti -Azat Khuyiyakhmetov -Bardia Keyoumarsi -Barnaby Gray -Bastiaan Bakker -BastianHofmann -Ben Bonnefoy -Ben Creasy -Ben Firshman -Benjamin Boudreau -Benoit Sigoure -Bhumika Bayani -Bill Wang -Bin Liu -Bingshen Wang -Boaz Shuster -Bogdan Anton -Boris Pruessmann -Bradley Cicenas -Brandon Mitchell -Brandon Philips -Brent Salisbury -Bret Fisher -Brian (bex) Exelbierd -Brian Goff -Brian Wieder -Bryan Bess -Bryan Boreham -Bryan Murphy -bryfry -Cameron Spear -Cao Weiwei -Carlo Mion -Carlos Alexandro Becker -Carlos de Paula -Ce Gao -Cedric Davies -Cezar Sa Espinola -Chad Faragher -Chao Wang -Charles Chan -Charles Law -Charles Smith -Charlie Drage -ChaYoung You -Chen Chuanliang -Chen Hanxiao -Chen Mingjie -Chen Qiu -Chris Gavin -Chris Gibson -Chris McKinnel -Chris Snow -Chris Weyl -Christian Persson -Christian Stefanescu -Christophe Robin -Christophe Vidal -Christopher Biscardi -Christopher Crone -Christopher Jones -Christy Norman -Chun Chen -Clinton Kitson -Coenraad Loubser -Colin Hebert -Collin Guarino -Colm Hally -Comical Derskeal <27731088+derskeal@users.noreply.github.com> -Corey Farrell -Corey Quon -Craig Wilhite -Cristian Staretu -Daehyeok Mun -Dafydd Crosby -Daisuke Ito -dalanlan -Damien Nadé -Dan Cotora -Daniel Artine -Daniel Cassidy -Daniel Dao -Daniel Farrell -Daniel Gasienica -Daniel Goosen -Daniel Helfand -Daniel Hiltgen -Daniel J Walsh -Daniel Nephin -Daniel Norberg -Daniel Watkins -Daniel Zhang -Daniil Nikolenko -Danny Berger -Darren Shepherd -Darren Stahl -Dattatraya Kumbhar -Dave Goodchild -Dave Henderson -Dave Tucker -David Beitey -David Calavera -David Cramer -David Dooling -David Gageot -David Lechner -David Scott -David Sheets -David Williamson -David Xia -David Young -Deng Guangxing -Denis Defreyne -Denis Gladkikh -Denis Ollier -Dennis Docter -Derek McGowan -Deshi Xiao -Dharmit Shah -Dhawal Yogesh Bhanushali -Dieter Reuter -Dima Stopel -Dimitry Andric -Ding Fei -Diogo Monica -Djordje Lukic -Dmitry Gusev -Dmitry Smirnov -Dmitry V. Krivenok -Dominik Braun -Don Kjer -Dong Chen -Doug Davis -Drew Erny -Ed Costello -Elango Sivanandam -Eli Uriegas -Eli Uriegas -Elias Faxö -Elliot Luo <956941328@qq.com> -Eric Curtin -Eric G. Noriega -Eric Rosenberg -Eric Sage -Eric-Olivier Lamey -Erica Windisch -Erik Hollensbe -Erik St. Martin -Essam A. Hassan -Ethan Haynes -Euan Kemp -Eugene Yakubovich -Evan Allrich -Evan Hazlett -Evan Krall -Evelyn Xu -Everett Toews -Fabio Falci -Fabrizio Soppelsa -Felix Hupfeld -Felix Rabe -Filip Jareš -Flavio Crisciani -Florian Klein -Forest Johnson -Foysal Iqbal -François Scala -Fred Lifton -Frederic Hemberger -Frederick F. Kautz IV -Frederik Nordahl Jul Sabroe -Frieder Bluemle -Gabriel Nicolas Avellaneda -Gaetan de Villele -Gang Qiao -Gary Schaetz -Genki Takiuchi -George MacRorie -George Xie -Gianluca Borello -Gildas Cuisinier -Goksu Toprak -Gou Rao -Grant Reaber -Greg Pflaum -Guilhem Lettron -Guillaume J. Charmes -Guillaume Le Floch -gwx296173 -Günther Jungbluth -Hakan Özler -Hao Zhang <21521210@zju.edu.cn> -Harald Albers -Harold Cooper -Harry Zhang -He Simei -Hector S -Helen Xie -Henning Sprang -Henry N -Hernan Garcia -Hongbin Lu -Hu Keping -Huayi Zhang -Hugo Gabriel Eyherabide -huqun -Huu Nguyen -Hyzhou Zhy -Ian Campbell -Ian Philpot -Ignacio Capurro -Ilya Dmitrichenko -Ilya Khlopotov -Ilya Sotkov -Ioan Eugen Stan -Isabel Jimenez -Ivan Grcic -Ivan Markin -Jacob Atzen -Jacob Tomlinson -Jaivish Kothari -Jake Lambert -Jake Sanders -James Nesbitt -James Turnbull -Jamie Hannaford -Jan Koprowski -Jan Pazdziora -Jan-Jaap Driessen -Jana Radhakrishnan -Jared Hocutt -Jasmine Hegman -Jason Heiss -Jason Plum -Jay Kamat -Jean Rouge -Jean-Christophe Sirot -Jean-Pierre Huynh -Jeff Lindsay -Jeff Nickoloff -Jeff Silberman -Jeremy Chambers -Jeremy Unruh -Jeremy Yallop -Jeroen Franse -Jesse Adametz -Jessica Frazelle -Jezeniel Zapanta -Jian Zhang -Jie Luo -Jilles Oldenbeuving -Jim Galasyn -Jimmy Leger -Jimmy Song -jimmyxian -Jintao Zhang -Joao Fernandes -Joe Abbey -Joe Doliner -Joe Gordon -Joel Handwell -Joey Geiger -Joffrey F -Johan Euphrosine -Johannes 'fish' Ziemke -John Feminella -John Harris -John Howard -John Laswell -John Maguire -John Mulhausen -John Starks -John Stephens -John Tims -John V. Martinez -John Willis -Jon Johnson -Jonatas Baldin -Jonathan Boulle -Jonathan Lee -Jonathan Lomas -Jonathan McCrohan -Jonh Wendell -Jordan Jennings -Jose J. Escobar <53836904+jescobar-docker@users.noreply.github.com> -Joseph Kern -Josh Bodah -Josh Chorlton -Josh Hawn -Josh Horwitz -Josh Soref -Julien Barbier -Julien Kassar -Julien Maitrehenry -Justas Brazauskas -Justin Cormack -Justin Simonelis -Justyn Temme -Jyrki Puttonen -Jérémie Drouet -Jérôme Petazzoni -Jörg Thalheim -Kai Blin -Kai Qiang Wu (Kennan) -Kara Alexandra -Kareem Khazem -Karthik Nayak -Kat Samperi -Kathryn Spiers -Katie McLaughlin -Ke Xu -Kei Ohmura -Keith Hudgins -Ken Cochrane -Ken ICHIKAWA -Kenfe-Mickaël Laventure -Kevin Burke -Kevin Feyrer -Kevin Kern -Kevin Kirsche -Kevin Meredith -Kevin Richardson -Kevin Woblick -khaled souf -Kim Eik -Kir Kolyshkin -Kotaro Yoshimatsu -Krasi Georgiev -Kris-Mikael Krister -Kun Zhang -Kunal Kushwaha -Lachlan Cooper -Lai Jiangshan -Lars Kellogg-Stedman -Laura Frank -Laurent Erignoux -Lee Gaines -Lei Jitang -Lennie -Leo Gallucci -Lewis Daly -Li Yi -Li Yi -Liang-Chi Hsieh -Lifubang -Lihua Tang -Lily Guo -Lin Lu -Linus Heckemann -Liping Xue -Liron Levin -liwenqi -lixiaobing10051267 -Lloyd Dewolf -Lorenzo Fontana -Louis Opter -Luca Favatella -Luca Marturana -Lucas Chan -Luka Hartwig -Lukas Heeren -Lukasz Zajaczkowski -Lydell Manganti -Lénaïc Huard -Ma Shimiao -Mabin -Maciej Kalisz -Madhav Puri -Madhu Venugopal -Madhur Batra -Malte Janduda -Manjunath A Kumatagi -Mansi Nahar -mapk0y -Marc Bihlmaier -Marco Mariani -Marco Vedovati -Marcus Martins -Marianna Tessel -Marius Ileana -Marius Sturm -Mark Oates -Marsh Macy -Martin Mosegaard Amdisen -Mary Anthony -Mason Fish -Mason Malone -Mateusz Major -Mathieu Champlon -Matt Gucci -Matt Robenolt -Matteo Orefice -Matthew Heon -Matthieu Hauglustaine -Mauro Porras P -Max Shytikov -Maxime Petazzoni -Mei ChunTao -Micah Zoltu -Michael A. Smith -Michael Bridgen -Michael Crosby -Michael Friis -Michael Irwin -Michael Käufl -Michael Prokop -Michael Scharf -Michael Spetsiotis -Michael Steinert -Michael West -Michal Minář -Michał Czeraszkiewicz -Miguel Angel Alvarez Cabrerizo -Mihai Borobocea -Mihuleacc Sergiu -Mike Brown -Mike Casas -Mike Danese -Mike Dillon -Mike Goelzer -Mike MacCana -mikelinjie <294893458@qq.com> -Mikhail Vasin -Milind Chawre -Mindaugas Rukas -Miroslav Gula -Misty Stanley-Jones -Mohammad Banikazemi -Mohammed Aaqib Ansari -Mohini Anne Dsouza -Moorthy RS -Morgan Bauer -Morten Hekkvang -Moysés Borges -Mrunal Patel -muicoder -Muthukumar R -Máximo Cuadros -Mårten Cassel -Nace Oroz -Nahum Shalman -Nalin Dahyabhai -Nao YONASHIRO -Nassim 'Nass' Eddequiouaq -Natalie Parker -Nate Brennand -Nathan Hsieh -Nathan LeClaire -Nathan McCauley -Neil Peterson -Nick Adcock -Nico Stapelbroek -Nicola Kabar -Nicolas Borboën -Nicolas De Loof -Nikhil Chawla -Nikolas Garofil -Nikolay Milovanov -Nir Soffer -Nishant Totla -NIWA Hideyuki -Noah Treuhaft -O.S. Tezer -Odin Ugedal -ohmystack -Olle Jonsson -Olli Janatuinen -Oscar Wieman -Otto Kekäläinen -Ovidio Mallo -Pascal Borreli -Patrick Böänziger -Patrick Hemmer -Patrick Lang -Paul -Paul Kehrer -Paul Lietar -Paul Mulders -Paul Weaver -Pavel Pospisil -Paweł Szczekutowicz -Peeyush Gupta -Per Lundberg -Peter Edge -Peter Hsu -Peter Jaffe -Peter Kehl -Peter Nagy -Peter Salvatore -Peter Waller -Phil Estes -Philip Alexander Etling -Philipp Gillé -Philipp Schmied -pidster -pixelistik -Pratik Karki -Prayag Verma -Preston Cowley -Pure White -Qiang Huang -Qinglan Peng -qudongfang -Raghavendra K T -Rahul Zoldyck -Ravi Shekhar Jethani -Ray Tsang -Reficul -Remy Suen -Renaud Gaubert -Ricardo N Feliciano -Rich Moyse -Richard Mathie -Richard Scothern -Rick Wieman -Ritesh H Shukla -Riyaz Faizullabhoy -Rob Gulewich -Robert Wallis -Robin Naundorf -Robin Speekenbrink -Rodolfo Ortiz -Rogelio Canedo -Rohan Verma -Roland Kammerer -Roman Dudin -Rory Hunter -Ross Boucher -Rubens Figueiredo -Rui Cao -Ryan Belgrave -Ryan Detzel -Ryan Stelly -Ryan Wilson-Perkin -Ryan Zhang -Sainath Grandhi -Sakeven Jiang -Sally O'Malley -Sam Neirinck -Samarth Shah -Sambuddha Basu -Sami Tabet -Samuel Cochran -Samuel Karp -Santhosh Manohar -Sargun Dhillon -Saswat Bhattacharya -Scott Brenner -Scott Collier -Sean Christopherson -Sean Rodman -Sebastiaan van Stijn -Sergey Tryuber -Serhat Gülçiçek -Sevki Hasirci -Shaun Kaasten -Sheng Yang -Shijiang Wei -Shishir Mahajan -Shoubhik Bose -Shukui Yang -Sian Lerk Lau -Sidhartha Mani -sidharthamani -Silvin Lubecki -Simei He -Simon Ferquel -Simon Heimberg -Sindhu S -Slava Semushin -Solomon Hykes -Song Gao -Spencer Brown -squeegels <1674195+squeegels@users.noreply.github.com> -Srini Brahmaroutu -Stefan S. -Stefan Scherer -Stefan Weil -Stephane Jeandeaux -Stephen Day -Stephen Rust -Steve Durrheimer -Steve Richards -Steven Burgess -Subhajit Ghosh -Sun Jianbo -Sune Keller -Sungwon Han -Sunny Gogoi -Sven Dowideit -Sylvain Baubeau -Sébastien HOUZÉ -T K Sourabh -TAGOMORI Satoshi -taiji-tech -Taylor Jones -Tejaswini Duggaraju -Tengfei Wang -Teppei Fukuda -Thatcher Peskens -Thibault Coupin -Thomas Gazagnaire -Thomas Krzero -Thomas Leonard -Thomas Léveil -Thomas Riccardi -Thomas Swift -Tianon Gravi -Tianyi Wang -Tibor Vass -Tim Dettrick -Tim Hockin -Tim Sampson -Tim Smith -Tim Waugh -Tim Wraight -timfeirg -Timothy Hobbs -Tobias Bradtke -Tobias Gesellchen -Todd Whiteman -Tom Denham -Tom Fotherby -Tom Klingenberg -Tom Milligan -Tom X. Tobin -Tomas Tomecek -Tomasz Kopczynski -Tomáš Hrčka -Tony Abboud -Tõnis Tiigi -Trapier Marshall -Travis Cline -Tristan Carel -Tycho Andersen -Tycho Andersen -uhayate -Ulrich Bareth -Ulysses Souza -Umesh Yadav -Valentin Lorentz -Venkateswara Reddy Bukkasamudram -Veres Lajos -Victor Vieux -Victoria Bialas -Viktor Stanchev -Vimal Raghubir -Vincent Batts -Vincent Bernat -Vincent Demeester -Vincent Woo -Vishnu Kannan -Vivek Goyal -Wang Jie -Wang Lei -Wang Long -Wang Ping -Wang Xing -Wang Yuexiao -Wang Yumu <37442693@qq.com> -Wataru Ishida -Wayne Song -Wen Cheng Ma -Wenzhi Liang -Wes Morgan -Wewang Xiaorenfine -William Henry -Xianglin Gao -Xiaodong Liu -Xiaodong Zhang -Xiaoxi He -Xinbo Weng -Xuecong Liao -Yan Feng -Yanqiang Miao -Yassine Tijani -Yi EungJun -Ying Li -Yong Tang -Yosef Fertel -Yu Peng -Yuan Sun -Yue Zhang -Yunxiang Huang -Zachary Romero -Zander Mackie -zebrilee -Zhang Kun -Zhang Wei -Zhang Wentao -ZhangHang -zhenghenghuo -Zhou Hao -Zhoulin Xie -Zhu Guihua -Álex González -Álvaro Lázaro -Átila Camurça Alves -徐俊杰 diff --git a/ui/wasm/vendor/github.com/docker/cli/LICENSE b/ui/wasm/vendor/github.com/docker/cli/LICENSE deleted file mode 100644 index 9c8e20ab85c..00000000000 --- a/ui/wasm/vendor/github.com/docker/cli/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2013-2017 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/ui/wasm/vendor/github.com/docker/cli/NOTICE b/ui/wasm/vendor/github.com/docker/cli/NOTICE deleted file mode 100644 index 58b19b6d15b..00000000000 --- a/ui/wasm/vendor/github.com/docker/cli/NOTICE +++ /dev/null @@ -1,19 +0,0 @@ -Docker -Copyright 2012-2017 Docker, Inc. - -This product includes software developed at Docker, Inc. (https://www.docker.com). - -This product contains software (https://github.com/creack/pty) developed -by Keith Rarick, licensed under the MIT License. - -The following is courtesy of our legal counsel: - - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see https://www.bis.doc.gov - -See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/ui/wasm/vendor/github.com/docker/cli/cli/config/config.go b/ui/wasm/vendor/github.com/docker/cli/cli/config/config.go deleted file mode 100644 index 93275f3d985..00000000000 --- a/ui/wasm/vendor/github.com/docker/cli/cli/config/config.go +++ /dev/null @@ -1,163 +0,0 @@ -package config - -import ( - "fmt" - "io" - "os" - "path/filepath" - "strings" - "sync" - - "github.com/docker/cli/cli/config/configfile" - "github.com/docker/cli/cli/config/credentials" - "github.com/docker/cli/cli/config/types" - "github.com/docker/docker/pkg/homedir" - "github.com/pkg/errors" -) - -const ( - // ConfigFileName is the name of config file - ConfigFileName = "config.json" - configFileDir = ".docker" - oldConfigfile = ".dockercfg" - contextsDir = "contexts" -) - -var ( - initConfigDir = new(sync.Once) - configDir string - homeDir string -) - -// resetHomeDir is used in testing to reset the "homeDir" package variable to -// force re-lookup of the home directory between tests. -func resetHomeDir() { - homeDir = "" -} - -func getHomeDir() string { - if homeDir == "" { - homeDir = homedir.Get() - } - return homeDir -} - -// resetConfigDir is used in testing to reset the "configDir" package variable -// and its sync.Once to force re-lookup between tests. -func resetConfigDir() { - configDir = "" - initConfigDir = new(sync.Once) -} - -func setConfigDir() { - if configDir != "" { - return - } - configDir = os.Getenv("DOCKER_CONFIG") - if configDir == "" { - configDir = filepath.Join(getHomeDir(), configFileDir) - } -} - -// Dir returns the directory the configuration file is stored in -func Dir() string { - initConfigDir.Do(setConfigDir) - return configDir -} - -// ContextStoreDir returns the directory the docker contexts are stored in -func ContextStoreDir() string { - return filepath.Join(Dir(), contextsDir) -} - -// SetDir sets the directory the configuration file is stored in -func SetDir(dir string) { - configDir = filepath.Clean(dir) -} - -// Path returns the path to a file relative to the config dir -func Path(p ...string) (string, error) { - path := filepath.Join(append([]string{Dir()}, p...)...) - if !strings.HasPrefix(path, Dir()+string(filepath.Separator)) { - return "", errors.Errorf("path %q is outside of root config directory %q", path, Dir()) - } - return path, nil -} - -// LegacyLoadFromReader is a convenience function that creates a ConfigFile object from -// a non-nested reader -func LegacyLoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { - configFile := configfile.ConfigFile{ - AuthConfigs: make(map[string]types.AuthConfig), - } - err := configFile.LegacyLoadFromReader(configData) - return &configFile, err -} - -// LoadFromReader is a convenience function that creates a ConfigFile object from -// a reader -func LoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { - configFile := configfile.ConfigFile{ - AuthConfigs: make(map[string]types.AuthConfig), - } - err := configFile.LoadFromReader(configData) - return &configFile, err -} - -// TODO remove this temporary hack, which is used to warn about the deprecated ~/.dockercfg file -var printLegacyFileWarning bool - -// Load reads the configuration files in the given directory, and sets up -// the auth config information and returns values. -// FIXME: use the internal golang config parser -func Load(configDir string) (*configfile.ConfigFile, error) { - printLegacyFileWarning = false - - if configDir == "" { - configDir = Dir() - } - - filename := filepath.Join(configDir, ConfigFileName) - configFile := configfile.New(filename) - - // Try happy path first - latest config file - if file, err := os.Open(filename); err == nil { - defer file.Close() - err = configFile.LoadFromReader(file) - if err != nil { - err = errors.Wrap(err, filename) - } - return configFile, err - } else if !os.IsNotExist(err) { - // if file is there but we can't stat it for any reason other - // than it doesn't exist then stop - return configFile, errors.Wrap(err, filename) - } - - // Can't find latest config file so check for the old one - filename = filepath.Join(getHomeDir(), oldConfigfile) - if file, err := os.Open(filename); err == nil { - printLegacyFileWarning = true - defer file.Close() - if err := configFile.LegacyLoadFromReader(file); err != nil { - return configFile, errors.Wrap(err, filename) - } - } - return configFile, nil -} - -// LoadDefaultConfigFile attempts to load the default config file and returns -// an initialized ConfigFile struct if none is found. -func LoadDefaultConfigFile(stderr io.Writer) *configfile.ConfigFile { - configFile, err := Load(Dir()) - if err != nil { - fmt.Fprintf(stderr, "WARNING: Error loading config file: %v\n", err) - } - if printLegacyFileWarning { - _, _ = fmt.Fprintln(stderr, "WARNING: Support for the legacy ~/.dockercfg configuration file and file-format is deprecated and will be removed in an upcoming release") - } - if !configFile.ContainsAuth() { - configFile.CredentialsStore = credentials.DetectDefaultStore(configFile.CredentialsStore) - } - return configFile -} diff --git a/ui/wasm/vendor/github.com/docker/cli/cli/config/configfile/file.go b/ui/wasm/vendor/github.com/docker/cli/cli/config/configfile/file.go deleted file mode 100644 index dc9f39eb7e3..00000000000 --- a/ui/wasm/vendor/github.com/docker/cli/cli/config/configfile/file.go +++ /dev/null @@ -1,415 +0,0 @@ -package configfile - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "github.com/docker/cli/cli/config/credentials" - "github.com/docker/cli/cli/config/types" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -const ( - // This constant is only used for really old config files when the - // URL wasn't saved as part of the config file and it was just - // assumed to be this value. - defaultIndexServer = "https://index.docker.io/v1/" -) - -// ConfigFile ~/.docker/config.json file info -type ConfigFile struct { - AuthConfigs map[string]types.AuthConfig `json:"auths"` - HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"` - PsFormat string `json:"psFormat,omitempty"` - ImagesFormat string `json:"imagesFormat,omitempty"` - NetworksFormat string `json:"networksFormat,omitempty"` - PluginsFormat string `json:"pluginsFormat,omitempty"` - VolumesFormat string `json:"volumesFormat,omitempty"` - StatsFormat string `json:"statsFormat,omitempty"` - DetachKeys string `json:"detachKeys,omitempty"` - CredentialsStore string `json:"credsStore,omitempty"` - CredentialHelpers map[string]string `json:"credHelpers,omitempty"` - Filename string `json:"-"` // Note: for internal use only - ServiceInspectFormat string `json:"serviceInspectFormat,omitempty"` - ServicesFormat string `json:"servicesFormat,omitempty"` - TasksFormat string `json:"tasksFormat,omitempty"` - SecretFormat string `json:"secretFormat,omitempty"` - ConfigFormat string `json:"configFormat,omitempty"` - NodesFormat string `json:"nodesFormat,omitempty"` - PruneFilters []string `json:"pruneFilters,omitempty"` - Proxies map[string]ProxyConfig `json:"proxies,omitempty"` - Experimental string `json:"experimental,omitempty"` - StackOrchestrator string `json:"stackOrchestrator,omitempty"` - Kubernetes *KubernetesConfig `json:"kubernetes,omitempty"` - CurrentContext string `json:"currentContext,omitempty"` - CLIPluginsExtraDirs []string `json:"cliPluginsExtraDirs,omitempty"` - Plugins map[string]map[string]string `json:"plugins,omitempty"` - Aliases map[string]string `json:"aliases,omitempty"` -} - -// ProxyConfig contains proxy configuration settings -type ProxyConfig struct { - HTTPProxy string `json:"httpProxy,omitempty"` - HTTPSProxy string `json:"httpsProxy,omitempty"` - NoProxy string `json:"noProxy,omitempty"` - FTPProxy string `json:"ftpProxy,omitempty"` -} - -// KubernetesConfig contains Kubernetes orchestrator settings -type KubernetesConfig struct { - AllNamespaces string `json:"allNamespaces,omitempty"` -} - -// New initializes an empty configuration file for the given filename 'fn' -func New(fn string) *ConfigFile { - return &ConfigFile{ - AuthConfigs: make(map[string]types.AuthConfig), - HTTPHeaders: make(map[string]string), - Filename: fn, - Plugins: make(map[string]map[string]string), - Aliases: make(map[string]string), - } -} - -// LegacyLoadFromReader reads the non-nested configuration data given and sets up the -// auth config information with given directory and populates the receiver object -func (configFile *ConfigFile) LegacyLoadFromReader(configData io.Reader) error { - b, err := ioutil.ReadAll(configData) - if err != nil { - return err - } - - if err := json.Unmarshal(b, &configFile.AuthConfigs); err != nil { - arr := strings.Split(string(b), "\n") - if len(arr) < 2 { - return errors.Errorf("The Auth config file is empty") - } - authConfig := types.AuthConfig{} - origAuth := strings.Split(arr[0], " = ") - if len(origAuth) != 2 { - return errors.Errorf("Invalid Auth config file") - } - authConfig.Username, authConfig.Password, err = decodeAuth(origAuth[1]) - if err != nil { - return err - } - authConfig.ServerAddress = defaultIndexServer - configFile.AuthConfigs[defaultIndexServer] = authConfig - } else { - for k, authConfig := range configFile.AuthConfigs { - authConfig.Username, authConfig.Password, err = decodeAuth(authConfig.Auth) - if err != nil { - return err - } - authConfig.Auth = "" - authConfig.ServerAddress = k - configFile.AuthConfigs[k] = authConfig - } - } - return nil -} - -// LoadFromReader reads the configuration data given and sets up the auth config -// information with given directory and populates the receiver object -func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error { - if err := json.NewDecoder(configData).Decode(&configFile); err != nil && !errors.Is(err, io.EOF) { - return err - } - var err error - for addr, ac := range configFile.AuthConfigs { - if ac.Auth != "" { - ac.Username, ac.Password, err = decodeAuth(ac.Auth) - if err != nil { - return err - } - } - ac.Auth = "" - ac.ServerAddress = addr - configFile.AuthConfigs[addr] = ac - } - return checkKubernetesConfiguration(configFile.Kubernetes) -} - -// ContainsAuth returns whether there is authentication configured -// in this file or not. -func (configFile *ConfigFile) ContainsAuth() bool { - return configFile.CredentialsStore != "" || - len(configFile.CredentialHelpers) > 0 || - len(configFile.AuthConfigs) > 0 -} - -// GetAuthConfigs returns the mapping of repo to auth configuration -func (configFile *ConfigFile) GetAuthConfigs() map[string]types.AuthConfig { - return configFile.AuthConfigs -} - -// SaveToWriter encodes and writes out all the authorization information to -// the given writer -func (configFile *ConfigFile) SaveToWriter(writer io.Writer) error { - // Encode sensitive data into a new/temp struct - tmpAuthConfigs := make(map[string]types.AuthConfig, len(configFile.AuthConfigs)) - for k, authConfig := range configFile.AuthConfigs { - authCopy := authConfig - // encode and save the authstring, while blanking out the original fields - authCopy.Auth = encodeAuth(&authCopy) - authCopy.Username = "" - authCopy.Password = "" - authCopy.ServerAddress = "" - tmpAuthConfigs[k] = authCopy - } - - saveAuthConfigs := configFile.AuthConfigs - configFile.AuthConfigs = tmpAuthConfigs - defer func() { configFile.AuthConfigs = saveAuthConfigs }() - - // User-Agent header is automatically set, and should not be stored in the configuration - for v := range configFile.HTTPHeaders { - if strings.EqualFold(v, "User-Agent") { - delete(configFile.HTTPHeaders, v) - } - } - - data, err := json.MarshalIndent(configFile, "", "\t") - if err != nil { - return err - } - _, err = writer.Write(data) - return err -} - -// Save encodes and writes out all the authorization information -func (configFile *ConfigFile) Save() (retErr error) { - if configFile.Filename == "" { - return errors.Errorf("Can't save config with empty filename") - } - - dir := filepath.Dir(configFile.Filename) - if err := os.MkdirAll(dir, 0700); err != nil { - return err - } - temp, err := ioutil.TempFile(dir, filepath.Base(configFile.Filename)) - if err != nil { - return err - } - defer func() { - temp.Close() - if retErr != nil { - if err := os.Remove(temp.Name()); err != nil { - logrus.WithError(err).WithField("file", temp.Name()).Debug("Error cleaning up temp file") - } - } - }() - - err = configFile.SaveToWriter(temp) - if err != nil { - return err - } - - if err := temp.Close(); err != nil { - return errors.Wrap(err, "error closing temp file") - } - - // Handle situation where the configfile is a symlink - cfgFile := configFile.Filename - if f, err := os.Readlink(cfgFile); err == nil { - cfgFile = f - } - - // Try copying the current config file (if any) ownership and permissions - copyFilePermissions(cfgFile, temp.Name()) - return os.Rename(temp.Name(), cfgFile) -} - -// ParseProxyConfig computes proxy configuration by retrieving the config for the provided host and -// then checking this against any environment variables provided to the container -func (configFile *ConfigFile) ParseProxyConfig(host string, runOpts map[string]*string) map[string]*string { - var cfgKey string - - if _, ok := configFile.Proxies[host]; !ok { - cfgKey = "default" - } else { - cfgKey = host - } - - config := configFile.Proxies[cfgKey] - permitted := map[string]*string{ - "HTTP_PROXY": &config.HTTPProxy, - "HTTPS_PROXY": &config.HTTPSProxy, - "NO_PROXY": &config.NoProxy, - "FTP_PROXY": &config.FTPProxy, - } - m := runOpts - if m == nil { - m = make(map[string]*string) - } - for k := range permitted { - if *permitted[k] == "" { - continue - } - if _, ok := m[k]; !ok { - m[k] = permitted[k] - } - if _, ok := m[strings.ToLower(k)]; !ok { - m[strings.ToLower(k)] = permitted[k] - } - } - return m -} - -// encodeAuth creates a base64 encoded string to containing authorization information -func encodeAuth(authConfig *types.AuthConfig) string { - if authConfig.Username == "" && authConfig.Password == "" { - return "" - } - - authStr := authConfig.Username + ":" + authConfig.Password - msg := []byte(authStr) - encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg))) - base64.StdEncoding.Encode(encoded, msg) - return string(encoded) -} - -// decodeAuth decodes a base64 encoded string and returns username and password -func decodeAuth(authStr string) (string, string, error) { - if authStr == "" { - return "", "", nil - } - - decLen := base64.StdEncoding.DecodedLen(len(authStr)) - decoded := make([]byte, decLen) - authByte := []byte(authStr) - n, err := base64.StdEncoding.Decode(decoded, authByte) - if err != nil { - return "", "", err - } - if n > decLen { - return "", "", errors.Errorf("Something went wrong decoding auth config") - } - arr := strings.SplitN(string(decoded), ":", 2) - if len(arr) != 2 { - return "", "", errors.Errorf("Invalid auth configuration file") - } - password := strings.Trim(arr[1], "\x00") - return arr[0], password, nil -} - -// GetCredentialsStore returns a new credentials store from the settings in the -// configuration file -func (configFile *ConfigFile) GetCredentialsStore(registryHostname string) credentials.Store { - if helper := getConfiguredCredentialStore(configFile, registryHostname); helper != "" { - return newNativeStore(configFile, helper) - } - return credentials.NewFileStore(configFile) -} - -// var for unit testing. -var newNativeStore = func(configFile *ConfigFile, helperSuffix string) credentials.Store { - return credentials.NewNativeStore(configFile, helperSuffix) -} - -// GetAuthConfig for a repository from the credential store -func (configFile *ConfigFile) GetAuthConfig(registryHostname string) (types.AuthConfig, error) { - return configFile.GetCredentialsStore(registryHostname).Get(registryHostname) -} - -// getConfiguredCredentialStore returns the credential helper configured for the -// given registry, the default credsStore, or the empty string if neither are -// configured. -func getConfiguredCredentialStore(c *ConfigFile, registryHostname string) string { - if c.CredentialHelpers != nil && registryHostname != "" { - if helper, exists := c.CredentialHelpers[registryHostname]; exists { - return helper - } - } - return c.CredentialsStore -} - -// GetAllCredentials returns all of the credentials stored in all of the -// configured credential stores. -func (configFile *ConfigFile) GetAllCredentials() (map[string]types.AuthConfig, error) { - auths := make(map[string]types.AuthConfig) - addAll := func(from map[string]types.AuthConfig) { - for reg, ac := range from { - auths[reg] = ac - } - } - - defaultStore := configFile.GetCredentialsStore("") - newAuths, err := defaultStore.GetAll() - if err != nil { - return nil, err - } - addAll(newAuths) - - // Auth configs from a registry-specific helper should override those from the default store. - for registryHostname := range configFile.CredentialHelpers { - newAuth, err := configFile.GetAuthConfig(registryHostname) - if err != nil { - return nil, err - } - auths[registryHostname] = newAuth - } - return auths, nil -} - -// GetFilename returns the file name that this config file is based on. -func (configFile *ConfigFile) GetFilename() string { - return configFile.Filename -} - -// PluginConfig retrieves the requested option for the given plugin. -func (configFile *ConfigFile) PluginConfig(pluginname, option string) (string, bool) { - if configFile.Plugins == nil { - return "", false - } - pluginConfig, ok := configFile.Plugins[pluginname] - if !ok { - return "", false - } - value, ok := pluginConfig[option] - return value, ok -} - -// SetPluginConfig sets the option to the given value for the given -// plugin. Passing a value of "" will remove the option. If removing -// the final config item for a given plugin then also cleans up the -// overall plugin entry. -func (configFile *ConfigFile) SetPluginConfig(pluginname, option, value string) { - if configFile.Plugins == nil { - configFile.Plugins = make(map[string]map[string]string) - } - pluginConfig, ok := configFile.Plugins[pluginname] - if !ok { - pluginConfig = make(map[string]string) - configFile.Plugins[pluginname] = pluginConfig - } - if value != "" { - pluginConfig[option] = value - } else { - delete(pluginConfig, option) - } - if len(pluginConfig) == 0 { - delete(configFile.Plugins, pluginname) - } -} - -func checkKubernetesConfiguration(kubeConfig *KubernetesConfig) error { - if kubeConfig == nil { - return nil - } - switch kubeConfig.AllNamespaces { - case "": - case "enabled": - case "disabled": - default: - return fmt.Errorf("invalid 'kubernetes.allNamespaces' value, should be 'enabled' or 'disabled': %s", kubeConfig.AllNamespaces) - } - return nil -} diff --git a/ui/wasm/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go b/ui/wasm/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go deleted file mode 100644 index 3ca65c6140d..00000000000 --- a/ui/wasm/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go +++ /dev/null @@ -1,35 +0,0 @@ -// +build !windows - -package configfile - -import ( - "os" - "syscall" -) - -// copyFilePermissions copies file ownership and permissions from "src" to "dst", -// ignoring any error during the process. -func copyFilePermissions(src, dst string) { - var ( - mode os.FileMode = 0600 - uid, gid int - ) - - fi, err := os.Stat(src) - if err != nil { - return - } - if fi.Mode().IsRegular() { - mode = fi.Mode() - } - if err := os.Chmod(dst, mode); err != nil { - return - } - - uid = int(fi.Sys().(*syscall.Stat_t).Uid) - gid = int(fi.Sys().(*syscall.Stat_t).Gid) - - if uid > 0 && gid > 0 { - _ = os.Chown(dst, uid, gid) - } -} diff --git a/ui/wasm/vendor/github.com/docker/cli/cli/config/configfile/file_windows.go b/ui/wasm/vendor/github.com/docker/cli/cli/config/configfile/file_windows.go deleted file mode 100644 index 42fffc39ad2..00000000000 --- a/ui/wasm/vendor/github.com/docker/cli/cli/config/configfile/file_windows.go +++ /dev/null @@ -1,5 +0,0 @@ -package configfile - -func copyFilePermissions(src, dst string) { - // TODO implement for Windows -} diff --git a/ui/wasm/vendor/github.com/docker/cli/cli/config/credentials/credentials.go b/ui/wasm/vendor/github.com/docker/cli/cli/config/credentials/credentials.go deleted file mode 100644 index 28d58ec48d7..00000000000 --- a/ui/wasm/vendor/github.com/docker/cli/cli/config/credentials/credentials.go +++ /dev/null @@ -1,17 +0,0 @@ -package credentials - -import ( - "github.com/docker/cli/cli/config/types" -) - -// Store is the interface that any credentials store must implement. -type Store interface { - // Erase removes credentials from the store for a given server. - Erase(serverAddress string) error - // Get retrieves credentials from the store for a given server. - Get(serverAddress string) (types.AuthConfig, error) - // GetAll retrieves all the credentials from the store. - GetAll() (map[string]types.AuthConfig, error) - // Store saves credentials in the store. - Store(authConfig types.AuthConfig) error -} diff --git a/ui/wasm/vendor/github.com/docker/cli/cli/config/credentials/default_store.go b/ui/wasm/vendor/github.com/docker/cli/cli/config/credentials/default_store.go deleted file mode 100644 index 402235bff02..00000000000 --- a/ui/wasm/vendor/github.com/docker/cli/cli/config/credentials/default_store.go +++ /dev/null @@ -1,21 +0,0 @@ -package credentials - -import ( - exec "golang.org/x/sys/execabs" -) - -// DetectDefaultStore return the default credentials store for the platform if -// the store executable is available. -func DetectDefaultStore(store string) string { - platformDefault := defaultCredentialsStore() - - // user defined or no default for platform - if store != "" || platformDefault == "" { - return store - } - - if _, err := exec.LookPath(remoteCredentialsPrefix + platformDefault); err == nil { - return platformDefault - } - return "" -} diff --git a/ui/wasm/vendor/github.com/docker/cli/cli/config/credentials/default_store_darwin.go b/ui/wasm/vendor/github.com/docker/cli/cli/config/credentials/default_store_darwin.go deleted file mode 100644 index 5d42dec6224..00000000000 --- a/ui/wasm/vendor/github.com/docker/cli/cli/config/credentials/default_store_darwin.go +++ /dev/null @@ -1,5 +0,0 @@ -package credentials - -func defaultCredentialsStore() string { - return "osxkeychain" -} diff --git a/ui/wasm/vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go b/ui/wasm/vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go deleted file mode 100644 index a9012c6d4a8..00000000000 --- a/ui/wasm/vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go +++ /dev/null @@ -1,13 +0,0 @@ -package credentials - -import ( - "os/exec" -) - -func defaultCredentialsStore() string { - if _, err := exec.LookPath("pass"); err == nil { - return "pass" - } - - return "secretservice" -} diff --git a/ui/wasm/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go b/ui/wasm/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go deleted file mode 100644 index 3028168ac24..00000000000 --- a/ui/wasm/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !windows,!darwin,!linux - -package credentials - -func defaultCredentialsStore() string { - return "" -} diff --git a/ui/wasm/vendor/github.com/docker/cli/cli/config/credentials/default_store_windows.go b/ui/wasm/vendor/github.com/docker/cli/cli/config/credentials/default_store_windows.go deleted file mode 100644 index bb799ca61b7..00000000000 --- a/ui/wasm/vendor/github.com/docker/cli/cli/config/credentials/default_store_windows.go +++ /dev/null @@ -1,5 +0,0 @@ -package credentials - -func defaultCredentialsStore() string { - return "wincred" -} diff --git a/ui/wasm/vendor/github.com/docker/cli/cli/config/credentials/file_store.go b/ui/wasm/vendor/github.com/docker/cli/cli/config/credentials/file_store.go deleted file mode 100644 index e509820b73f..00000000000 --- a/ui/wasm/vendor/github.com/docker/cli/cli/config/credentials/file_store.go +++ /dev/null @@ -1,81 +0,0 @@ -package credentials - -import ( - "strings" - - "github.com/docker/cli/cli/config/types" -) - -type store interface { - Save() error - GetAuthConfigs() map[string]types.AuthConfig - GetFilename() string -} - -// fileStore implements a credentials store using -// the docker configuration file to keep the credentials in plain text. -type fileStore struct { - file store -} - -// NewFileStore creates a new file credentials store. -func NewFileStore(file store) Store { - return &fileStore{file: file} -} - -// Erase removes the given credentials from the file store. -func (c *fileStore) Erase(serverAddress string) error { - delete(c.file.GetAuthConfigs(), serverAddress) - return c.file.Save() -} - -// Get retrieves credentials for a specific server from the file store. -func (c *fileStore) Get(serverAddress string) (types.AuthConfig, error) { - authConfig, ok := c.file.GetAuthConfigs()[serverAddress] - if !ok { - // Maybe they have a legacy config file, we will iterate the keys converting - // them to the new format and testing - for r, ac := range c.file.GetAuthConfigs() { - if serverAddress == ConvertToHostname(r) { - return ac, nil - } - } - - authConfig = types.AuthConfig{} - } - return authConfig, nil -} - -func (c *fileStore) GetAll() (map[string]types.AuthConfig, error) { - return c.file.GetAuthConfigs(), nil -} - -// Store saves the given credentials in the file store. -func (c *fileStore) Store(authConfig types.AuthConfig) error { - c.file.GetAuthConfigs()[authConfig.ServerAddress] = authConfig - return c.file.Save() -} - -func (c *fileStore) GetFilename() string { - return c.file.GetFilename() -} - -func (c *fileStore) IsFileStore() bool { - return true -} - -// ConvertToHostname converts a registry url which has http|https prepended -// to just an hostname. -// Copied from github.com/docker/docker/registry.ConvertToHostname to reduce dependencies. -func ConvertToHostname(url string) string { - stripped := url - if strings.HasPrefix(url, "http://") { - stripped = strings.TrimPrefix(url, "http://") - } else if strings.HasPrefix(url, "https://") { - stripped = strings.TrimPrefix(url, "https://") - } - - nameParts := strings.SplitN(stripped, "/", 2) - - return nameParts[0] -} diff --git a/ui/wasm/vendor/github.com/docker/cli/cli/config/credentials/native_store.go b/ui/wasm/vendor/github.com/docker/cli/cli/config/credentials/native_store.go deleted file mode 100644 index afe542cc3ce..00000000000 --- a/ui/wasm/vendor/github.com/docker/cli/cli/config/credentials/native_store.go +++ /dev/null @@ -1,143 +0,0 @@ -package credentials - -import ( - "github.com/docker/cli/cli/config/types" - "github.com/docker/docker-credential-helpers/client" - "github.com/docker/docker-credential-helpers/credentials" -) - -const ( - remoteCredentialsPrefix = "docker-credential-" - tokenUsername = "" -) - -// nativeStore implements a credentials store -// using native keychain to keep credentials secure. -// It piggybacks into a file store to keep users' emails. -type nativeStore struct { - programFunc client.ProgramFunc - fileStore Store -} - -// NewNativeStore creates a new native store that -// uses a remote helper program to manage credentials. -func NewNativeStore(file store, helperSuffix string) Store { - name := remoteCredentialsPrefix + helperSuffix - return &nativeStore{ - programFunc: client.NewShellProgramFunc(name), - fileStore: NewFileStore(file), - } -} - -// Erase removes the given credentials from the native store. -func (c *nativeStore) Erase(serverAddress string) error { - if err := client.Erase(c.programFunc, serverAddress); err != nil { - return err - } - - // Fallback to plain text store to remove email - return c.fileStore.Erase(serverAddress) -} - -// Get retrieves credentials for a specific server from the native store. -func (c *nativeStore) Get(serverAddress string) (types.AuthConfig, error) { - // load user email if it exist or an empty auth config. - auth, _ := c.fileStore.Get(serverAddress) - - creds, err := c.getCredentialsFromStore(serverAddress) - if err != nil { - return auth, err - } - auth.Username = creds.Username - auth.IdentityToken = creds.IdentityToken - auth.Password = creds.Password - - return auth, nil -} - -// GetAll retrieves all the credentials from the native store. -func (c *nativeStore) GetAll() (map[string]types.AuthConfig, error) { - auths, err := c.listCredentialsInStore() - if err != nil { - return nil, err - } - - // Emails are only stored in the file store. - // This call can be safely eliminated when emails are removed. - fileConfigs, _ := c.fileStore.GetAll() - - authConfigs := make(map[string]types.AuthConfig) - for registry := range auths { - creds, err := c.getCredentialsFromStore(registry) - if err != nil { - return nil, err - } - ac := fileConfigs[registry] // might contain Email - ac.Username = creds.Username - ac.Password = creds.Password - ac.IdentityToken = creds.IdentityToken - authConfigs[registry] = ac - } - - return authConfigs, nil -} - -// Store saves the given credentials in the file store. -func (c *nativeStore) Store(authConfig types.AuthConfig) error { - if err := c.storeCredentialsInStore(authConfig); err != nil { - return err - } - authConfig.Username = "" - authConfig.Password = "" - authConfig.IdentityToken = "" - - // Fallback to old credential in plain text to save only the email - return c.fileStore.Store(authConfig) -} - -// storeCredentialsInStore executes the command to store the credentials in the native store. -func (c *nativeStore) storeCredentialsInStore(config types.AuthConfig) error { - creds := &credentials.Credentials{ - ServerURL: config.ServerAddress, - Username: config.Username, - Secret: config.Password, - } - - if config.IdentityToken != "" { - creds.Username = tokenUsername - creds.Secret = config.IdentityToken - } - - return client.Store(c.programFunc, creds) -} - -// getCredentialsFromStore executes the command to get the credentials from the native store. -func (c *nativeStore) getCredentialsFromStore(serverAddress string) (types.AuthConfig, error) { - var ret types.AuthConfig - - creds, err := client.Get(c.programFunc, serverAddress) - if err != nil { - if credentials.IsErrCredentialsNotFound(err) { - // do not return an error if the credentials are not - // in the keychain. Let docker ask for new credentials. - return ret, nil - } - return ret, err - } - - if creds.Username == tokenUsername { - ret.IdentityToken = creds.Secret - } else { - ret.Password = creds.Secret - ret.Username = creds.Username - } - - ret.ServerAddress = serverAddress - return ret, nil -} - -// listCredentialsInStore returns a listing of stored credentials as a map of -// URL -> username. -func (c *nativeStore) listCredentialsInStore() (map[string]string, error) { - return client.List(c.programFunc) -} diff --git a/ui/wasm/vendor/github.com/docker/cli/cli/config/types/authconfig.go b/ui/wasm/vendor/github.com/docker/cli/cli/config/types/authconfig.go deleted file mode 100644 index 056af6b8425..00000000000 --- a/ui/wasm/vendor/github.com/docker/cli/cli/config/types/authconfig.go +++ /dev/null @@ -1,22 +0,0 @@ -package types - -// AuthConfig contains authorization information for connecting to a Registry -type AuthConfig struct { - Username string `json:"username,omitempty"` - Password string `json:"password,omitempty"` - Auth string `json:"auth,omitempty"` - - // Email is an optional value associated with the username. - // This field is deprecated and will be removed in a later - // version of docker. - Email string `json:"email,omitempty"` - - ServerAddress string `json:"serveraddress,omitempty"` - - // IdentityToken is used to authenticate the user and get - // an access token for the registry. - IdentityToken string `json:"identitytoken,omitempty"` - - // RegistryToken is a bearer token to be sent to a registry - RegistryToken string `json:"registrytoken,omitempty"` -} diff --git a/ui/wasm/vendor/github.com/docker/distribution/.gitignore b/ui/wasm/vendor/github.com/docker/distribution/.gitignore deleted file mode 100644 index 4cf7888e927..00000000000 --- a/ui/wasm/vendor/github.com/docker/distribution/.gitignore +++ /dev/null @@ -1,38 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof - -# never checkin from the bin file (for now) -bin/* - -# Test key files -*.pem - -# Cover profiles -*.out - -# Editor/IDE specific files. -*.sublime-project -*.sublime-workspace -.idea/* diff --git a/ui/wasm/vendor/github.com/docker/distribution/.gometalinter.json b/ui/wasm/vendor/github.com/docker/distribution/.gometalinter.json deleted file mode 100644 index 9df5b14bcb2..00000000000 --- a/ui/wasm/vendor/github.com/docker/distribution/.gometalinter.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "Vendor": true, - "Deadline": "2m", - "Sort": ["linter", "severity", "path", "line"], - "EnableGC": true, - "Enable": [ - "structcheck", - "staticcheck", - "unconvert", - - "gofmt", - "goimports", - "golint", - "vet" - ] -} diff --git a/ui/wasm/vendor/github.com/docker/distribution/.mailmap b/ui/wasm/vendor/github.com/docker/distribution/.mailmap deleted file mode 100644 index 0f48321d497..00000000000 --- a/ui/wasm/vendor/github.com/docker/distribution/.mailmap +++ /dev/null @@ -1,32 +0,0 @@ -Stephen J Day Stephen Day -Stephen J Day Stephen Day -Olivier Gambier Olivier Gambier -Brian Bland Brian Bland -Brian Bland Brian Bland -Josh Hawn Josh Hawn -Richard Scothern Richard -Richard Scothern Richard Scothern -Andrew Meredith Andrew Meredith -harche harche -Jessie Frazelle -Sharif Nassar Sharif Nassar -Sven Dowideit Sven Dowideit -Vincent Giersch Vincent Giersch -davidli davidli -Omer Cohen Omer Cohen -Eric Yang Eric Yang -Nikita Tarasov Nikita -Yu Wang yuwaMSFT2 -Yu Wang Yu Wang (UC) -Olivier Gambier dmp -Olivier Gambier Olivier -Olivier Gambier Olivier -Elsan Li 李楠 elsanli(李楠) -Rui Cao ruicao -Gwendolynne Barr gbarr01 -Haibing Zhou 周海兵 zhouhaibing089 -Feng Honglin tifayuki -Helen Xie Helen-xie -Mike Brown Mike Brown -Manish Tomar Manish Tomar -Sakeven Jiang sakeven diff --git a/ui/wasm/vendor/github.com/docker/distribution/.travis.yml b/ui/wasm/vendor/github.com/docker/distribution/.travis.yml deleted file mode 100644 index 44ced60451d..00000000000 --- a/ui/wasm/vendor/github.com/docker/distribution/.travis.yml +++ /dev/null @@ -1,51 +0,0 @@ -dist: trusty -sudo: required -# setup travis so that we can run containers for integration tests -services: - - docker - -language: go - -go: - - "1.11.x" - -go_import_path: github.com/docker/distribution - -addons: - apt: - packages: - - python-minimal - - -env: - - TRAVIS_GOOS=linux DOCKER_BUILDTAGS="include_oss include_gcs" TRAVIS_CGO_ENABLED=1 - -before_install: - - uname -r - - sudo apt-get -q update - -install: - - go get -u github.com/vbatts/git-validation - # TODO: Add enforcement of license - # - go get -u github.com/kunalkushwaha/ltag - - cd $TRAVIS_BUILD_DIR - -script: - - export GOOS=$TRAVIS_GOOS - - export CGO_ENABLED=$TRAVIS_CGO_ENABLED - - DCO_VERBOSITY=-q script/validate/dco - - GOOS=linux script/setup/install-dev-tools - - script/validate/vendor - - go build -i . - - make check - - make build - - make binaries - # Currently takes too long - #- if [ "$GOOS" = "linux" ]; then make test-race ; fi - - if [ "$GOOS" = "linux" ]; then make coverage ; fi - -after_success: - - bash <(curl -s https://codecov.io/bash) -F linux - -before_deploy: - # Run tests with storage driver configurations diff --git a/ui/wasm/vendor/github.com/docker/distribution/BUILDING.md b/ui/wasm/vendor/github.com/docker/distribution/BUILDING.md deleted file mode 100644 index 2981d016b0d..00000000000 --- a/ui/wasm/vendor/github.com/docker/distribution/BUILDING.md +++ /dev/null @@ -1,117 +0,0 @@ - -# Building the registry source - -## Use-case - -This is useful if you intend to actively work on the registry. - -### Alternatives - -Most people should use the [official Registry docker image](https://hub.docker.com/r/library/registry/). - -People looking for advanced operational use cases might consider rolling their own image with a custom Dockerfile inheriting `FROM registry:2`. - -OS X users who want to run natively can do so following [the instructions here](https://github.com/docker/docker.github.io/blob/master/registry/recipes/osx-setup-guide.md). - -### Gotchas - -You are expected to know your way around with go & git. - -If you are a casual user with no development experience, and no preliminary knowledge of go, building from source is probably not a good solution for you. - -## Build the development environment - -The first prerequisite of properly building distribution targets is to have a Go -development environment setup. Please follow [How to Write Go Code](https://golang.org/doc/code.html) -for proper setup. If done correctly, you should have a GOROOT and GOPATH set in the -environment. - -If a Go development environment is setup, one can use `go get` to install the -`registry` command from the current latest: - - go get github.com/docker/distribution/cmd/registry - -The above will install the source repository into the `GOPATH`. - -Now create the directory for the registry data (this might require you to set permissions properly) - - mkdir -p /var/lib/registry - -... or alternatively `export REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere` if you want to store data into another location. - -The `registry` -binary can then be run with the following: - - $ $GOPATH/bin/registry --version - $GOPATH/bin/registry github.com/docker/distribution v2.0.0-alpha.1+unknown - -> __NOTE:__ While you do not need to use `go get` to checkout the distribution -> project, for these build instructions to work, the project must be checked -> out in the correct location in the `GOPATH`. This should almost always be -> `$GOPATH/src/github.com/docker/distribution`. - -The registry can be run with the default config using the following -incantation: - - $ $GOPATH/bin/registry serve $GOPATH/src/github.com/docker/distribution/cmd/registry/config-example.yml - INFO[0000] endpoint local-5003 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown - INFO[0000] endpoint local-8083 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown - INFO[0000] listening on :5000 app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown - INFO[0000] debug server listening localhost:5001 - -If it is working, one should see the above log messages. - -### Repeatable Builds - -For the full development experience, one should `cd` into -`$GOPATH/src/github.com/docker/distribution`. From there, the regular `go` -commands, such as `go test`, should work per package (please see -[Developing](#developing) if they don't work). - -A `Makefile` has been provided as a convenience to support repeatable builds. -Please install the following into `GOPATH` for it to work: - - go get github.com/golang/lint/golint - -Once these commands are available in the `GOPATH`, run `make` to get a full -build: - - $ make - + clean - + fmt - + vet - + lint - + build - github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar - github.com/sirupsen/logrus - github.com/docker/libtrust - ... - github.com/yvasiyarov/gorelic - github.com/docker/distribution/registry/handlers - github.com/docker/distribution/cmd/registry - + test - ... - ok github.com/docker/distribution/digest 7.875s - ok github.com/docker/distribution/manifest 0.028s - ok github.com/docker/distribution/notifications 17.322s - ? github.com/docker/distribution/registry [no test files] - ok github.com/docker/distribution/registry/api/v2 0.101s - ? github.com/docker/distribution/registry/auth [no test files] - ok github.com/docker/distribution/registry/auth/silly 0.011s - ... - + /Users/sday/go/src/github.com/docker/distribution/bin/registry - + /Users/sday/go/src/github.com/docker/distribution/bin/registry-api-descriptor-template - + binaries - -The above provides a repeatable build using the contents of the vendor -directory. This includes formatting, vetting, linting, building, -testing and generating tagged binaries. We can verify this worked by running -the registry binary generated in the "./bin" directory: - - $ ./bin/registry --version - ./bin/registry github.com/docker/distribution v2.0.0-alpha.2-80-g16d8b2c.m - -### Optional build tags - -Optional [build tags](http://golang.org/pkg/go/build/) can be provided using -the environment variable `DOCKER_BUILDTAGS`. diff --git a/ui/wasm/vendor/github.com/docker/distribution/CONTRIBUTING.md b/ui/wasm/vendor/github.com/docker/distribution/CONTRIBUTING.md deleted file mode 100644 index 4c067d9e7ec..00000000000 --- a/ui/wasm/vendor/github.com/docker/distribution/CONTRIBUTING.md +++ /dev/null @@ -1,148 +0,0 @@ -# Contributing to the registry - -## Before reporting an issue... - -### If your problem is with... - - - automated builds - - your account on the [Docker Hub](https://hub.docker.com/) - - any other [Docker Hub](https://hub.docker.com/) issue - -Then please do not report your issue here - you should instead report it to [https://support.docker.com](https://support.docker.com) - -### If you... - - - need help setting up your registry - - can't figure out something - - are not sure what's going on or what your problem is - -Then please do not open an issue here yet - you should first try one of the following support forums: - - - irc: #docker-distribution on freenode - - mailing-list: or https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution - -### Reporting security issues - -The Docker maintainers take security seriously. If you discover a security -issue, please bring it to their attention right away! - -Please **DO NOT** file a public issue, instead send your report privately to -[security@docker.com](mailto:security@docker.com). - -## Reporting an issue properly - -By following these simple rules you will get better and faster feedback on your issue. - - - search the bugtracker for an already reported issue - -### If you found an issue that describes your problem: - - - please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments - - please refrain from adding "same thing here" or "+1" comments - - you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button - - comment if you have some new, technical and relevant information to add to the case - - __DO NOT__ comment on closed issues or merged PRs. If you think you have a related problem, open up a new issue and reference the PR or issue. - -### If you have not found an existing issue that describes your problem: - - 1. create a new issue, with a succinct title that describes your issue: - - bad title: "It doesn't work with my docker" - - good title: "Private registry push fail: 400 error with E_INVALID_DIGEST" - 2. copy the output of: - - `docker version` - - `docker info` - - `docker exec registry --version` - 3. copy the command line you used to launch your Registry - 4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments) - 5. reproduce your problem and get your docker daemon logs showing the error - 6. if relevant, copy your registry logs that show the error - 7. provide any relevant detail about your specific Registry configuration (e.g., storage backend used) - 8. indicate if you are using an enterprise proxy, Nginx, or anything else between you and your Registry - -## Contributing a patch for a known bug, or a small correction - -You should follow the basic GitHub workflow: - - 1. fork - 2. commit a change - 3. make sure the tests pass - 4. PR - -Additionally, you must [sign your commits](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work). It's very simple: - - - configure your name with git: `git config user.name "Real Name" && git config user.email mail@example.com` - - sign your commits using `-s`: `git commit -s -m "My commit"` - -Some simple rules to ensure quick merge: - - - clearly point to the issue(s) you want to fix in your PR comment (e.g., `closes #12345`) - - prefer multiple (smaller) PRs addressing individual issues over a big one trying to address multiple issues at once - - if you need to amend your PR following comments, please squash instead of adding more commits - -## Contributing new features - -You are heavily encouraged to first discuss what you want to do. You can do so on the irc channel, or by opening an issue that clearly describes the use case you want to fulfill, or the problem you are trying to solve. - -If this is a major new feature, you should then submit a proposal that describes your technical solution and reasoning. -If you did discuss it first, this will likely be greenlighted very fast. It's advisable to address all feedback on this proposal before starting actual work. - -Then you should submit your implementation, clearly linking to the issue (and possible proposal). - -Your PR will be reviewed by the community, then ultimately by the project maintainers, before being merged. - -It's mandatory to: - - - interact respectfully with other community members and maintainers - more generally, you are expected to abide by the [Docker community rules](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#docker-community-guidelines) - - address maintainers' comments and modify your submission accordingly - - write tests for any new code - -Complying to these simple rules will greatly accelerate the review process, and will ensure you have a pleasant experience in contributing code to the Registry. - -Have a look at a great, successful contribution: the [Swift driver PR](https://github.com/docker/distribution/pull/493) - -## Coding Style - -Unless explicitly stated, we follow all coding guidelines from the Go -community. While some of these standards may seem arbitrary, they somehow seem -to result in a solid, consistent codebase. - -It is possible that the code base does not currently comply with these -guidelines. We are not looking for a massive PR that fixes this, since that -goes against the spirit of the guidelines. All new contributions should make a -best effort to clean up and make the code base better than they left it. -Obviously, apply your best judgement. Remember, the goal here is to make the -code base easier for humans to navigate and understand. Always keep that in -mind when nudging others to comply. - -The rules: - -1. All code should be formatted with `gofmt -s`. -2. All code should pass the default levels of - [`golint`](https://github.com/golang/lint). -3. All code should follow the guidelines covered in [Effective - Go](http://golang.org/doc/effective_go.html) and [Go Code Review - Comments](https://github.com/golang/go/wiki/CodeReviewComments). -4. Comment the code. Tell us the why, the history and the context. -5. Document _all_ declarations and methods, even private ones. Declare - expectations, caveats and anything else that may be important. If a type - gets exported, having the comments already there will ensure it's ready. -6. Variable name length should be proportional to its context and no longer. - `noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`. - In practice, short methods will have short variable names and globals will - have longer names. -7. No underscores in package names. If you need a compound name, step back, - and re-examine why you need a compound name. If you still think you need a - compound name, lose the underscore. -8. No utils or helpers packages. If a function is not general enough to - warrant its own package, it has not been written generally enough to be a - part of a util package. Just leave it unexported and well-documented. -9. All tests should run with `go test` and outside tooling should not be - required. No, we don't need another unit testing framework. Assertion - packages are acceptable if they provide _real_ incremental value. -10. Even though we call these "rules" above, they are actually just - guidelines. Since you've read all the rules, you now know that. - -If you are having trouble getting into the mood of idiomatic Go, we recommend -reading through [Effective Go](http://golang.org/doc/effective_go.html). The -[Go Blog](http://blog.golang.org/) is also a great resource. Drinking the -kool-aid is a lot easier than going thirsty. diff --git a/ui/wasm/vendor/github.com/docker/distribution/Dockerfile b/ui/wasm/vendor/github.com/docker/distribution/Dockerfile deleted file mode 100644 index 9537817ca3a..00000000000 --- a/ui/wasm/vendor/github.com/docker/distribution/Dockerfile +++ /dev/null @@ -1,23 +0,0 @@ -FROM golang:1.11-alpine AS build - -ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution -ENV BUILDTAGS include_oss include_gcs - -ARG GOOS=linux -ARG GOARCH=amd64 -ARG GOARM=6 - -RUN set -ex \ - && apk add --no-cache make git file - -WORKDIR $DISTRIBUTION_DIR -COPY . $DISTRIBUTION_DIR -RUN CGO_ENABLED=0 make PREFIX=/go clean binaries && file ./bin/registry | grep "statically linked" - -FROM alpine -COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml -COPY --from=build /go/src/github.com/docker/distribution/bin/registry /bin/registry -VOLUME ["/var/lib/registry"] -EXPOSE 5000 -ENTRYPOINT ["registry"] -CMD ["serve", "/etc/docker/registry/config.yml"] diff --git a/ui/wasm/vendor/github.com/docker/distribution/LICENSE b/ui/wasm/vendor/github.com/docker/distribution/LICENSE deleted file mode 100644 index e06d2081865..00000000000 --- a/ui/wasm/vendor/github.com/docker/distribution/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/ui/wasm/vendor/github.com/docker/distribution/MAINTAINERS b/ui/wasm/vendor/github.com/docker/distribution/MAINTAINERS deleted file mode 100644 index 3183620c57b..00000000000 --- a/ui/wasm/vendor/github.com/docker/distribution/MAINTAINERS +++ /dev/null @@ -1,243 +0,0 @@ -# Distribution maintainers file -# -# This file describes who runs the docker/distribution project and how. -# This is a living document - if you see something out of date or missing, speak up! -# -# It is structured to be consumable by both humans and programs. -# To extract its contents programmatically, use any TOML-compliant parser. -# - -[Rules] - - [Rules.maintainers] - - title = "What is a maintainer?" - - text = """ -There are different types of maintainers, with different responsibilities, but -all maintainers have 3 things in common: - -1) They share responsibility in the project's success. -2) They have made a long-term, recurring time investment to improve the project. -3) They spend that time doing whatever needs to be done, not necessarily what -is the most interesting or fun. - -Maintainers are often under-appreciated, because their work is harder to appreciate. -It's easy to appreciate a really cool and technically advanced feature. It's harder -to appreciate the absence of bugs, the slow but steady improvement in stability, -or the reliability of a release process. But those things distinguish a good -project from a great one. -""" - - [Rules.reviewer] - - title = "What is a reviewer?" - - text = """ -A reviewer is a core role within the project. -They share in reviewing issues and pull requests and their LGTM count towards the -required LGTM count to merge a code change into the project. - -Reviewers are part of the organization but do not have write access. -Becoming a reviewer is a core aspect in the journey to becoming a maintainer. -""" - - [Rules.adding-maintainers] - - title = "How are maintainers added?" - - text = """ -Maintainers are first and foremost contributors that have shown they are -committed to the long term success of a project. Contributors wanting to become -maintainers are expected to be deeply involved in contributing code, pull -request review, and triage of issues in the project for more than three months. - -Just contributing does not make you a maintainer, it is about building trust -with the current maintainers of the project and being a person that they can -depend on and trust to make decisions in the best interest of the project. - -Periodically, the existing maintainers curate a list of contributors that have -shown regular activity on the project over the prior months. From this list, -maintainer candidates are selected and proposed on the maintainers mailing list. - -After a candidate has been announced on the maintainers mailing list, the -existing maintainers are given five business days to discuss the candidate, -raise objections and cast their vote. Candidates must be approved by at least 66% of the current maintainers by adding their vote on the mailing -list. Only maintainers of the repository that the candidate is proposed for are -allowed to vote. - -If a candidate is approved, a maintainer will contact the candidate to invite -the candidate to open a pull request that adds the contributor to the -MAINTAINERS file. The candidate becomes a maintainer once the pull request is -merged. -""" - - [Rules.stepping-down-policy] - - title = "Stepping down policy" - - text = """ -Life priorities, interests, and passions can change. If you're a maintainer but -feel you must remove yourself from the list, inform other maintainers that you -intend to step down, and if possible, help find someone to pick up your work. -At the very least, ensure your work can be continued where you left off. - -After you've informed other maintainers, create a pull request to remove -yourself from the MAINTAINERS file. -""" - - [Rules.inactive-maintainers] - - title = "Removal of inactive maintainers" - - text = """ -Similar to the procedure for adding new maintainers, existing maintainers can -be removed from the list if they do not show significant activity on the -project. Periodically, the maintainers review the list of maintainers and their -activity over the last three months. - -If a maintainer has shown insufficient activity over this period, a neutral -person will contact the maintainer to ask if they want to continue being -a maintainer. If the maintainer decides to step down as a maintainer, they -open a pull request to be removed from the MAINTAINERS file. - -If the maintainer wants to remain a maintainer, but is unable to perform the -required duties they can be removed with a vote of at least 66% of -the current maintainers. An e-mail is sent to the -mailing list, inviting maintainers of the project to vote. The voting period is -five business days. Issues related to a maintainer's performance should be -discussed with them among the other maintainers so that they are not surprised -by a pull request removing them. -""" - - [Rules.decisions] - - title = "How are decisions made?" - - text = """ -Short answer: EVERYTHING IS A PULL REQUEST. - -distribution is an open-source project with an open design philosophy. This means -that the repository is the source of truth for EVERY aspect of the project, -including its philosophy, design, road map, and APIs. *If it's part of the -project, it's in the repo. If it's in the repo, it's part of the project.* - -As a result, all decisions can be expressed as changes to the repository. An -implementation change is a change to the source code. An API change is a change -to the API specification. A philosophy change is a change to the philosophy -manifesto, and so on. - -All decisions affecting distribution, big and small, follow the same 3 steps: - -* Step 1: Open a pull request. Anyone can do this. - -* Step 2: Discuss the pull request. Anyone can do this. - -* Step 3: Merge or refuse the pull request. Who does this depends on the nature -of the pull request and which areas of the project it affects. -""" - - [Rules.DCO] - - title = "Helping contributors with the DCO" - - text = """ -The [DCO or `Sign your work`]( -https://github.com/moby/moby/blob/master/CONTRIBUTING.md#sign-your-work) -requirement is not intended as a roadblock or speed bump. - -Some distribution contributors are not as familiar with `git`, or have used a web -based editor, and thus asking them to `git commit --amend -s` is not the best -way forward. - -In this case, maintainers can update the commits based on clause (c) of the DCO. -The most trivial way for a contributor to allow the maintainer to do this, is to -add a DCO signature in a pull requests's comment, or a maintainer can simply -note that the change is sufficiently trivial that it does not substantially -change the existing contribution - i.e., a spelling change. - -When you add someone's DCO, please also add your own to keep a log. -""" - - [Rules."no direct push"] - - title = "I'm a maintainer. Should I make pull requests too?" - - text = """ -Yes. Nobody should ever push to master directly. All changes should be -made through a pull request. -""" - - [Rules.tsc] - - title = "Conflict Resolution and technical disputes" - - text = """ -distribution defers to the [Technical Steering Committee](https://github.com/moby/tsc) for escalations and resolution on disputes for technical matters." - """ - - [Rules.meta] - - title = "How is this process changed?" - - text = "Just like everything else: by making a pull request :)" - -# Current project organization -[Org] - - [Org.Maintainers] - people = [ - "dmcgowan", - "dmp42", - "stevvooe", - ] - [Org.Reviewers] - people = [ - "manishtomar", - "caervs", - "davidswu", - "RobbKistler" - ] - -[people] - -# A reference list of all people associated with the project. -# All other sections should refer to people by their canonical key -# in the people section. - - # ADD YOURSELF HERE IN ALPHABETICAL ORDER - - [people.caervs] - Name = "Ryan Abrams" - Email = "rdabrams@gmail.com" - GitHub = "caervs" - - [people.davidswu] - Name = "David Wu" - Email = "dwu7401@gmail.com" - GitHub = "davidswu" - - [people.dmcgowan] - Name = "Derek McGowan" - Email = "derek@mcgstyle.net" - GitHub = "dmcgowan" - - [people.dmp42] - Name = "Olivier Gambier" - Email = "olivier@docker.com" - GitHub = "dmp42" - - [people.manishtomar] - Name = "Manish Tomar" - Email = "manish.tomar@docker.com" - GitHub = "manishtomar" - - [people.RobbKistler] - Name = "Robb Kistler" - Email = "robb.kistler@docker.com" - GitHub = "RobbKistler" - - [people.stevvooe] - Name = "Stephen Day" - Email = "stephen.day@docker.com" - GitHub = "stevvooe" diff --git a/ui/wasm/vendor/github.com/docker/distribution/Makefile b/ui/wasm/vendor/github.com/docker/distribution/Makefile deleted file mode 100644 index 4635c6eca87..00000000000 --- a/ui/wasm/vendor/github.com/docker/distribution/Makefile +++ /dev/null @@ -1,102 +0,0 @@ -# Root directory of the project (absolute path). -ROOTDIR=$(dir $(abspath $(lastword $(MAKEFILE_LIST)))) - -# Used to populate version variable in main package. -VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always) -REVISION=$(shell git rev-parse HEAD)$(shell if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi) - - -PKG=github.com/docker/distribution - -# Project packages. -PACKAGES=$(shell go list -tags "${BUILDTAGS}" ./... | grep -v /vendor/) -INTEGRATION_PACKAGE=${PKG} -COVERAGE_PACKAGES=$(filter-out ${PKG}/registry/storage/driver/%,${PACKAGES}) - - -# Project binaries. -COMMANDS=registry digest registry-api-descriptor-template - -# Allow turning off function inlining and variable registerization -ifeq (${DISABLE_OPTIMIZATION},true) - GO_GCFLAGS=-gcflags "-N -l" - VERSION:="$(VERSION)-noopt" -endif - -WHALE = "+" - -# Go files -# -TESTFLAGS_RACE= -GOFILES=$(shell find . -type f -name '*.go') -GO_TAGS=$(if $(BUILDTAGS),-tags "$(BUILDTAGS)",) -GO_LDFLAGS=-ldflags '-s -w -X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) -X $(PKG)/version.Package=$(PKG) $(EXTRA_LDFLAGS)' - -BINARIES=$(addprefix bin/,$(COMMANDS)) - -# Flags passed to `go test` -TESTFLAGS ?= -v $(TESTFLAGS_RACE) -TESTFLAGS_PARALLEL ?= 8 - -.PHONY: all build binaries check clean test test-race test-full integration coverage -.DEFAULT: all - -all: binaries - -# This only needs to be generated by hand when cutting full releases. -version/version.go: - @echo "$(WHALE) $@" - ./version/version.sh > $@ - -check: ## run all linters (TODO: enable "unused", "varcheck", "ineffassign", "unconvert", "staticheck", "goimports", "structcheck") - @echo "$(WHALE) $@" - gometalinter --config .gometalinter.json ./... - -test: ## run tests, except integration test with test.short - @echo "$(WHALE) $@" - @go test ${GO_TAGS} -test.short ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES}) - -test-race: ## run tests, except integration test with test.short and race - @echo "$(WHALE) $@" - @go test ${GO_TAGS} -race -test.short ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES}) - -test-full: ## run tests, except integration tests - @echo "$(WHALE) $@" - @go test ${GO_TAGS} ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES}) - -integration: ## run integration tests - @echo "$(WHALE) $@" - @go test ${TESTFLAGS} -parallel ${TESTFLAGS_PARALLEL} ${INTEGRATION_PACKAGE} - -coverage: ## generate coverprofiles from the unit tests - @echo "$(WHALE) $@" - @rm -f coverage.txt - @go test ${GO_TAGS} -i ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${COVERAGE_PACKAGES}) 2> /dev/null - @( for pkg in $(filter-out ${INTEGRATION_PACKAGE},${COVERAGE_PACKAGES}); do \ - go test ${GO_TAGS} ${TESTFLAGS} \ - -cover \ - -coverprofile=profile.out \ - -covermode=atomic $$pkg || exit; \ - if [ -f profile.out ]; then \ - cat profile.out >> coverage.txt; \ - rm profile.out; \ - fi; \ - done ) - -FORCE: - -# Build a binary from a cmd. -bin/%: cmd/% FORCE - @echo "$(WHALE) $@${BINARY_SUFFIX}" - @go build ${GO_GCFLAGS} ${GO_BUILD_FLAGS} -o $@${BINARY_SUFFIX} ${GO_LDFLAGS} ${GO_TAGS} ./$< - -binaries: $(BINARIES) ## build binaries - @echo "$(WHALE) $@" - -build: - @echo "$(WHALE) $@" - @go build ${GO_GCFLAGS} ${GO_BUILD_FLAGS} ${GO_LDFLAGS} ${GO_TAGS} $(PACKAGES) - -clean: ## clean up binaries - @echo "$(WHALE) $@" - @rm -f $(BINARIES) diff --git a/ui/wasm/vendor/github.com/docker/distribution/README.md b/ui/wasm/vendor/github.com/docker/distribution/README.md deleted file mode 100644 index 998878850cd..00000000000 --- a/ui/wasm/vendor/github.com/docker/distribution/README.md +++ /dev/null @@ -1,130 +0,0 @@ -# Distribution - -The Docker toolset to pack, ship, store, and deliver content. - -This repository's main product is the Docker Registry 2.0 implementation -for storing and distributing Docker images. It supersedes the -[docker/docker-registry](https://github.com/docker/docker-registry) -project with a new API design, focused around security and performance. - - - -[![Circle CI](https://circleci.com/gh/docker/distribution/tree/master.svg?style=svg)](https://circleci.com/gh/docker/distribution/tree/master) -[![GoDoc](https://godoc.org/github.com/docker/distribution?status.svg)](https://godoc.org/github.com/docker/distribution) - -This repository contains the following components: - -|**Component** |Description | -|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. | -| **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. | -| **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) | -| **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/) related just to the registry. | - -### How does this integrate with Docker engine? - -This project should provide an implementation to a V2 API for use in the [Docker -core project](https://github.com/docker/docker). The API should be embeddable -and simplify the process of securely pulling and pushing content from `docker` -daemons. - -### What are the long term goals of the Distribution project? - -The _Distribution_ project has the further long term goal of providing a -secure tool chain for distributing content. The specifications, APIs and tools -should be as useful with Docker as they are without. - -Our goal is to design a professional grade and extensible content distribution -system that allow users to: - -* Enjoy an efficient, secured and reliable way to store, manage, package and - exchange content -* Hack/roll their own on top of healthy open-source components -* Implement their own home made solution through good specs, and solid - extensions mechanism. - -## More about Registry 2.0 - -The new registry implementation provides the following benefits: - -- faster push and pull -- new, more efficient implementation -- simplified deployment -- pluggable storage backend -- webhook notifications - -For information on upcoming functionality, please see [ROADMAP.md](ROADMAP.md). - -### Who needs to deploy a registry? - -By default, Docker users pull images from Docker's public registry instance. -[Installing Docker](https://docs.docker.com/engine/installation/) gives users this -ability. Users can also push images to a repository on Docker's public registry, -if they have a [Docker Hub](https://hub.docker.com/) account. - -For some users and even companies, this default behavior is sufficient. For -others, it is not. - -For example, users with their own software products may want to maintain a -registry for private, company images. Also, you may wish to deploy your own -image repository for images used to test or in continuous integration. For these -use cases and others, [deploying your own registry instance](https://github.com/docker/docker.github.io/blob/master/registry/deploying.md) -may be the better choice. - -### Migration to Registry 2.0 - -For those who have previously deployed their own registry based on the Registry -1.0 implementation and wish to deploy a Registry 2.0 while retaining images, -data migration is required. A tool to assist with migration efforts has been -created. For more information see [docker/migrator](https://github.com/docker/migrator). - -## Contribute - -Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute -issues, fixes, and patches to this project. If you are contributing code, see -the instructions for [building a development environment](BUILDING.md). - -## Support - -If any issues are encountered while using the _Distribution_ project, several -avenues are available for support: - - - - - - - - - - - - - - - - - - -
- IRC - - #docker-distribution on FreeNode -
- Issue Tracker - - github.com/docker/distribution/issues -
- Google Groups - - https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution -
- Mailing List - - docker@dockerproject.org -
- - -## License - -This project is distributed under [Apache License, Version 2.0](LICENSE). diff --git a/ui/wasm/vendor/github.com/docker/distribution/ROADMAP.md b/ui/wasm/vendor/github.com/docker/distribution/ROADMAP.md deleted file mode 100644 index 701127afec6..00000000000 --- a/ui/wasm/vendor/github.com/docker/distribution/ROADMAP.md +++ /dev/null @@ -1,267 +0,0 @@ -# Roadmap - -The Distribution Project consists of several components, some of which are -still being defined. This document defines the high-level goals of the -project, identifies the current components, and defines the release- -relationship to the Docker Platform. - -* [Distribution Goals](#distribution-goals) -* [Distribution Components](#distribution-components) -* [Project Planning](#project-planning): release-relationship to the Docker Platform. - -This road map is a living document, providing an overview of the goals and -considerations made in respect of the future of the project. - -## Distribution Goals - -- Replace the existing [docker registry](github.com/docker/docker-registry) - implementation as the primary implementation. -- Replace the existing push and pull code in the docker engine with the - distribution package. -- Define a strong data model for distributing docker images -- Provide a flexible distribution tool kit for use in the docker platform -- Unlock new distribution models - -## Distribution Components - -Components of the Distribution Project are managed via github [milestones](https://github.com/docker/distribution/milestones). Upcoming -features and bugfixes for a component will be added to the relevant milestone. If a feature or -bugfix is not part of a milestone, it is currently unscheduled for -implementation. - -* [Registry](#registry) -* [Distribution Package](#distribution-package) - -*** - -### Registry - -The new Docker registry is the main portion of the distribution repository. -Registry 2.0 is the first release of the next-generation registry. This was -primarily focused on implementing the [new registry -API](https://github.com/docker/distribution/blob/master/docs/spec/api.md), -with a focus on security and performance. - -Following from the Distribution project goals above, we have a set of goals -for registry v2 that we would like to follow in the design. New features -should be compared against these goals. - -#### Data Storage and Distribution First - -The registry's first goal is to provide a reliable, consistent storage -location for Docker images. The registry should only provide the minimal -amount of indexing required to fetch image data and no more. - -This means we should be selective in new features and API additions, including -those that may require expensive, ever growing indexes. Requests should be -servable in "constant time". - -#### Content Addressability - -All data objects used in the registry API should be content addressable. -Content identifiers should be secure and verifiable. This provides a secure, -reliable base from which to build more advanced content distribution systems. - -#### Content Agnostic - -In the past, changes to the image format would require large changes in Docker -and the Registry. By decoupling the distribution and image format, we can -allow the formats to progress without having to coordinate between the two. -This means that we should be focused on decoupling Docker from the registry -just as much as decoupling the registry from Docker. Such an approach will -allow us to unlock new distribution models that haven't been possible before. - -We can take this further by saying that the new registry should be content -agnostic. The registry provides a model of names, tags, manifests and content -addresses and that model can be used to work with content. - -#### Simplicity - -The new registry should be closer to a microservice component than its -predecessor. This means it should have a narrower API and a low number of -service dependencies. It should be easy to deploy. - -This means that other solutions should be explored before changing the API or -adding extra dependencies. If functionality is required, can it be added as an -extension or companion service. - -#### Extensibility - -The registry should provide extension points to add functionality. By keeping -the scope narrow, but providing the ability to add functionality. - -Features like search, indexing, synchronization and registry explorers fall -into this category. No such feature should be added unless we've found it -impossible to do through an extension. - -#### Active Feature Discussions - -The following are feature discussions that are currently active. - -If you don't see your favorite, unimplemented feature, feel free to contact us -via IRC or the mailing list and we can talk about adding it. The goal here is -to make sure that new features go through a rigid design process before -landing in the registry. - -##### Proxying to other Registries - -A _pull-through caching_ mode exists for the registry, but is restricted from -within the docker client to only mirror the official Docker Hub. This functionality -can be expanded when image provenance has been specified and implemented in the -distribution project. - -##### Metadata storage - -Metadata for the registry is currently stored with the manifest and layer data on -the storage backend. While this is a big win for simplicity and reliably maintaining -state, it comes with the cost of consistency and high latency. The mutable registry -metadata operations should be abstracted behind an API which will allow ACID compliant -storage systems to handle metadata. - -##### Peer to Peer transfer - -Discussion has started here: https://docs.google.com/document/d/1rYDpSpJiQWmCQy8Cuiaa3NH-Co33oK_SC9HeXYo87QA/edit - -##### Indexing, Search and Discovery - -The original registry provided some implementation of search for use with -private registries. Support has been elided from V2 since we'd like to both -decouple search functionality from the registry. The makes the registry -simpler to deploy, especially in use cases where search is not needed, and -let's us decouple the image format from the registry. - -There are explorations into using the catalog API and notification system to -build external indexes. The current line of thought is that we will define a -common search API to index and query docker images. Such a system could be run -as a companion to a registry or set of registries to power discovery. - -The main issue with search and discovery is that there are so many ways to -accomplish it. There are two aspects to this project. The first is deciding on -how it will be done, including an API definition that can work with changing -data formats. The second is the process of integrating with `docker search`. -We expect that someone attempts to address the problem with the existing tools -and propose it as a standard search API or uses it to inform a standardization -process. Once this has been explored, we integrate with the docker client. - -Please see the following for more detail: - -- https://github.com/docker/distribution/issues/206 - -##### Deletes - -> __NOTE:__ Deletes are a much asked for feature. Before requesting this -feature or participating in discussion, we ask that you read this section in -full and understand the problems behind deletes. - -While, at first glance, implementing deleting seems simple, there are a number -mitigating factors that make many solutions not ideal or even pathological in -the context of a registry. The following paragraph discuss the background and -approaches that could be applied to arrive at a solution. - -The goal of deletes in any system is to remove unused or unneeded data. Only -data requested for deletion should be removed and no other data. Removing -unintended data is worse than _not_ removing data that was requested for -removal but ideally, both are supported. Generally, according to this rule, we -err on holding data longer than needed, ensuring that it is only removed when -we can be certain that it can be removed. With the current behavior, we opt to -hold onto the data forever, ensuring that data cannot be incorrectly removed. - -To understand the problems with implementing deletes, one must understand the -data model. All registry data is stored in a filesystem layout, implemented on -a "storage driver", effectively a _virtual file system_ (VFS). The storage -system must assume that this VFS layer will be eventually consistent and has -poor read- after-write consistency, since this is the lower common denominator -among the storage drivers. This is mitigated by writing values in reverse- -dependent order, but makes wider transactional operations unsafe. - -Layered on the VFS model is a content-addressable _directed, acyclic graph_ -(DAG) made up of blobs. Manifests reference layers. Tags reference manifests. -Since the same data can be referenced by multiple manifests, we only store -data once, even if it is in different repositories. Thus, we have a set of -blobs, referenced by tags and manifests. If we want to delete a blob we need -to be certain that it is no longer referenced by another manifest or tag. When -we delete a manifest, we also can try to delete the referenced blobs. Deciding -whether or not a blob has an active reference is the crux of the problem. - -Conceptually, deleting a manifest and its resources is quite simple. Just find -all the manifests, enumerate the referenced blobs and delete the blobs not in -that set. An astute observer will recognize this as a garbage collection -problem. As with garbage collection in programming languages, this is very -simple when one always has a consistent view. When one adds parallelism and an -inconsistent view of data, it becomes very challenging. - -A simple example can demonstrate this. Let's say we are deleting a manifest -_A_ in one process. We scan the manifest and decide that all the blobs are -ready for deletion. Concurrently, we have another process accepting a new -manifest _B_ referencing one or more blobs from the manifest _A_. Manifest _B_ -is accepted and all the blobs are considered present, so the operation -proceeds. The original process then deletes the referenced blobs, assuming -they were unreferenced. The manifest _B_, which we thought had all of its data -present, can no longer be served by the registry, since the dependent data has -been deleted. - -Deleting data from the registry safely requires some way to coordinate this -operation. The following approaches are being considered: - -- _Reference Counting_ - Maintain a count of references to each blob. This is - challenging for a number of reasons: 1. maintaining a consistent consensus - of reference counts across a set of Registries and 2. Building the initial - list of reference counts for an existing registry. These challenges can be - met with a consensus protocol like Paxos or Raft in the first case and a - necessary but simple scan in the second.. -- _Lock the World GC_ - Halt all writes to the data store. Walk the data store - and find all blob references. Delete all unreferenced blobs. This approach - is very simple but requires disabling writes for a period of time while the - service reads all data. This is slow and expensive but very accurate and - effective. -- _Generational GC_ - Do something similar to above but instead of blocking - writes, writes are sent to another storage backend while reads are broadcast - to the new and old backends. GC is then performed on the read-only portion. - Because writes land in the new backend, the data in the read-only section - can be safely deleted. The main drawbacks of this approach are complexity - and coordination. -- _Centralized Oracle_ - Using a centralized, transactional database, we can - know exactly which data is referenced at any given time. This avoids - coordination problem by managing this data in a single location. We trade - off metadata scalability for simplicity and performance. This is a very good - option for most registry deployments. This would create a bottleneck for - registry metadata. However, metadata is generally not the main bottleneck - when serving images. - -Please let us know if other solutions exist that we have yet to enumerate. -Note that for any approach, implementation is a massive consideration. For -example, a mark-sweep based solution may seem simple but the amount of work in -coordination offset the extra work it might take to build a _Centralized -Oracle_. We'll accept proposals for any solution but please coordinate with us -before dropping code. - -At this time, we have traded off simplicity and ease of deployment for disk -space. Simplicity and ease of deployment tend to reduce developer involvement, -which is currently the most expensive resource in software engineering. Taking -on any solution for deletes will greatly effect these factors, trading off -very cheap disk space for a complex deployment and operational story. - -Please see the following issues for more detail: - -- https://github.com/docker/distribution/issues/422 -- https://github.com/docker/distribution/issues/461 -- https://github.com/docker/distribution/issues/462 - -### Distribution Package - -At its core, the Distribution Project is a set of Go packages that make up -Distribution Components. At this time, most of these packages make up the -Registry implementation. - -The package itself is considered unstable. If you're using it, please take care to vendor the dependent version. - -For feature additions, please see the Registry section. In the future, we may break out a -separate Roadmap for distribution-specific features that apply to more than -just the registry. - -*** - -### Project Planning - -An [Open-Source Planning Process](https://github.com/docker/distribution/wiki/Open-Source-Planning-Process) is used to define the Roadmap. [Project Pages](https://github.com/docker/distribution/wiki) define the goals for each Milestone and identify current progress. - diff --git a/ui/wasm/vendor/github.com/docker/distribution/blobs.go b/ui/wasm/vendor/github.com/docker/distribution/blobs.go deleted file mode 100644 index c0e9261be93..00000000000 --- a/ui/wasm/vendor/github.com/docker/distribution/blobs.go +++ /dev/null @@ -1,265 +0,0 @@ -package distribution - -import ( - "context" - "errors" - "fmt" - "io" - "net/http" - "time" - - "github.com/docker/distribution/reference" - "github.com/opencontainers/go-digest" - "github.com/opencontainers/image-spec/specs-go/v1" -) - -var ( - // ErrBlobExists returned when blob already exists - ErrBlobExists = errors.New("blob exists") - - // ErrBlobDigestUnsupported when blob digest is an unsupported version. - ErrBlobDigestUnsupported = errors.New("unsupported blob digest") - - // ErrBlobUnknown when blob is not found. - ErrBlobUnknown = errors.New("unknown blob") - - // ErrBlobUploadUnknown returned when upload is not found. - ErrBlobUploadUnknown = errors.New("blob upload unknown") - - // ErrBlobInvalidLength returned when the blob has an expected length on - // commit, meaning mismatched with the descriptor or an invalid value. - ErrBlobInvalidLength = errors.New("blob invalid length") -) - -// ErrBlobInvalidDigest returned when digest check fails. -type ErrBlobInvalidDigest struct { - Digest digest.Digest - Reason error -} - -func (err ErrBlobInvalidDigest) Error() string { - return fmt.Sprintf("invalid digest for referenced layer: %v, %v", - err.Digest, err.Reason) -} - -// ErrBlobMounted returned when a blob is mounted from another repository -// instead of initiating an upload session. -type ErrBlobMounted struct { - From reference.Canonical - Descriptor Descriptor -} - -func (err ErrBlobMounted) Error() string { - return fmt.Sprintf("blob mounted from: %v to: %v", - err.From, err.Descriptor) -} - -// Descriptor describes targeted content. Used in conjunction with a blob -// store, a descriptor can be used to fetch, store and target any kind of -// blob. The struct also describes the wire protocol format. Fields should -// only be added but never changed. -type Descriptor struct { - // MediaType describe the type of the content. All text based formats are - // encoded as utf-8. - MediaType string `json:"mediaType,omitempty"` - - // Size in bytes of content. - Size int64 `json:"size,omitempty"` - - // Digest uniquely identifies the content. A byte stream can be verified - // against this digest. - Digest digest.Digest `json:"digest,omitempty"` - - // URLs contains the source URLs of this content. - URLs []string `json:"urls,omitempty"` - - // Annotations contains arbitrary metadata relating to the targeted content. - Annotations map[string]string `json:"annotations,omitempty"` - - // Platform describes the platform which the image in the manifest runs on. - // This should only be used when referring to a manifest. - Platform *v1.Platform `json:"platform,omitempty"` - - // NOTE: Before adding a field here, please ensure that all - // other options have been exhausted. Much of the type relationships - // depend on the simplicity of this type. -} - -// Descriptor returns the descriptor, to make it satisfy the Describable -// interface. Note that implementations of Describable are generally objects -// which can be described, not simply descriptors; this exception is in place -// to make it more convenient to pass actual descriptors to functions that -// expect Describable objects. -func (d Descriptor) Descriptor() Descriptor { - return d -} - -// BlobStatter makes blob descriptors available by digest. The service may -// provide a descriptor of a different digest if the provided digest is not -// canonical. -type BlobStatter interface { - // Stat provides metadata about a blob identified by the digest. If the - // blob is unknown to the describer, ErrBlobUnknown will be returned. - Stat(ctx context.Context, dgst digest.Digest) (Descriptor, error) -} - -// BlobDeleter enables deleting blobs from storage. -type BlobDeleter interface { - Delete(ctx context.Context, dgst digest.Digest) error -} - -// BlobEnumerator enables iterating over blobs from storage -type BlobEnumerator interface { - Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error -} - -// BlobDescriptorService manages metadata about a blob by digest. Most -// implementations will not expose such an interface explicitly. Such mappings -// should be maintained by interacting with the BlobIngester. Hence, this is -// left off of BlobService and BlobStore. -type BlobDescriptorService interface { - BlobStatter - - // SetDescriptor assigns the descriptor to the digest. The provided digest and - // the digest in the descriptor must map to identical content but they may - // differ on their algorithm. The descriptor must have the canonical - // digest of the content and the digest algorithm must match the - // annotators canonical algorithm. - // - // Such a facility can be used to map blobs between digest domains, with - // the restriction that the algorithm of the descriptor must match the - // canonical algorithm (ie sha256) of the annotator. - SetDescriptor(ctx context.Context, dgst digest.Digest, desc Descriptor) error - - // Clear enables descriptors to be unlinked - Clear(ctx context.Context, dgst digest.Digest) error -} - -// BlobDescriptorServiceFactory creates middleware for BlobDescriptorService. -type BlobDescriptorServiceFactory interface { - BlobAccessController(svc BlobDescriptorService) BlobDescriptorService -} - -// ReadSeekCloser is the primary reader type for blob data, combining -// io.ReadSeeker with io.Closer. -type ReadSeekCloser interface { - io.ReadSeeker - io.Closer -} - -// BlobProvider describes operations for getting blob data. -type BlobProvider interface { - // Get returns the entire blob identified by digest along with the descriptor. - Get(ctx context.Context, dgst digest.Digest) ([]byte, error) - - // Open provides a ReadSeekCloser to the blob identified by the provided - // descriptor. If the blob is not known to the service, an error will be - // returned. - Open(ctx context.Context, dgst digest.Digest) (ReadSeekCloser, error) -} - -// BlobServer can serve blobs via http. -type BlobServer interface { - // ServeBlob attempts to serve the blob, identified by dgst, via http. The - // service may decide to redirect the client elsewhere or serve the data - // directly. - // - // This handler only issues successful responses, such as 2xx or 3xx, - // meaning it serves data or issues a redirect. If the blob is not - // available, an error will be returned and the caller may still issue a - // response. - // - // The implementation may serve the same blob from a different digest - // domain. The appropriate headers will be set for the blob, unless they - // have already been set by the caller. - ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error -} - -// BlobIngester ingests blob data. -type BlobIngester interface { - // Put inserts the content p into the blob service, returning a descriptor - // or an error. - Put(ctx context.Context, mediaType string, p []byte) (Descriptor, error) - - // Create allocates a new blob writer to add a blob to this service. The - // returned handle can be written to and later resumed using an opaque - // identifier. With this approach, one can Close and Resume a BlobWriter - // multiple times until the BlobWriter is committed or cancelled. - Create(ctx context.Context, options ...BlobCreateOption) (BlobWriter, error) - - // Resume attempts to resume a write to a blob, identified by an id. - Resume(ctx context.Context, id string) (BlobWriter, error) -} - -// BlobCreateOption is a general extensible function argument for blob creation -// methods. A BlobIngester may choose to honor any or none of the given -// BlobCreateOptions, which can be specific to the implementation of the -// BlobIngester receiving them. -// TODO (brianbland): unify this with ManifestServiceOption in the future -type BlobCreateOption interface { - Apply(interface{}) error -} - -// CreateOptions is a collection of blob creation modifiers relevant to general -// blob storage intended to be configured by the BlobCreateOption.Apply method. -type CreateOptions struct { - Mount struct { - ShouldMount bool - From reference.Canonical - // Stat allows to pass precalculated descriptor to link and return. - // Blob access check will be skipped if set. - Stat *Descriptor - } -} - -// BlobWriter provides a handle for inserting data into a blob store. -// Instances should be obtained from BlobWriteService.Writer and -// BlobWriteService.Resume. If supported by the store, a writer can be -// recovered with the id. -type BlobWriter interface { - io.WriteCloser - io.ReaderFrom - - // Size returns the number of bytes written to this blob. - Size() int64 - - // ID returns the identifier for this writer. The ID can be used with the - // Blob service to later resume the write. - ID() string - - // StartedAt returns the time this blob write was started. - StartedAt() time.Time - - // Commit completes the blob writer process. The content is verified - // against the provided provisional descriptor, which may result in an - // error. Depending on the implementation, written data may be validated - // against the provisional descriptor fields. If MediaType is not present, - // the implementation may reject the commit or assign "application/octet- - // stream" to the blob. The returned descriptor may have a different - // digest depending on the blob store, referred to as the canonical - // descriptor. - Commit(ctx context.Context, provisional Descriptor) (canonical Descriptor, err error) - - // Cancel ends the blob write without storing any data and frees any - // associated resources. Any data written thus far will be lost. Cancel - // implementations should allow multiple calls even after a commit that - // result in a no-op. This allows use of Cancel in a defer statement, - // increasing the assurance that it is correctly called. - Cancel(ctx context.Context) error -} - -// BlobService combines the operations to access, read and write blobs. This -// can be used to describe remote blob services. -type BlobService interface { - BlobStatter - BlobProvider - BlobIngester -} - -// BlobStore represent the entire suite of blob related operations. Such an -// implementation can access, read, write, delete and serve blobs. -type BlobStore interface { - BlobService - BlobServer - BlobDeleter -} diff --git a/ui/wasm/vendor/github.com/docker/distribution/digestset/set.go b/ui/wasm/vendor/github.com/docker/distribution/digestset/set.go deleted file mode 100644 index 71327dca720..00000000000 --- a/ui/wasm/vendor/github.com/docker/distribution/digestset/set.go +++ /dev/null @@ -1,247 +0,0 @@ -package digestset - -import ( - "errors" - "sort" - "strings" - "sync" - - digest "github.com/opencontainers/go-digest" -) - -var ( - // ErrDigestNotFound is used when a matching digest - // could not be found in a set. - ErrDigestNotFound = errors.New("digest not found") - - // ErrDigestAmbiguous is used when multiple digests - // are found in a set. None of the matching digests - // should be considered valid matches. - ErrDigestAmbiguous = errors.New("ambiguous digest string") -) - -// Set is used to hold a unique set of digests which -// may be easily referenced by easily referenced by a string -// representation of the digest as well as short representation. -// The uniqueness of the short representation is based on other -// digests in the set. If digests are omitted from this set, -// collisions in a larger set may not be detected, therefore it -// is important to always do short representation lookups on -// the complete set of digests. To mitigate collisions, an -// appropriately long short code should be used. -type Set struct { - mutex sync.RWMutex - entries digestEntries -} - -// NewSet creates an empty set of digests -// which may have digests added. -func NewSet() *Set { - return &Set{ - entries: digestEntries{}, - } -} - -// checkShortMatch checks whether two digests match as either whole -// values or short values. This function does not test equality, -// rather whether the second value could match against the first -// value. -func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool { - if len(hex) == len(shortHex) { - if hex != shortHex { - return false - } - if len(shortAlg) > 0 && string(alg) != shortAlg { - return false - } - } else if !strings.HasPrefix(hex, shortHex) { - return false - } else if len(shortAlg) > 0 && string(alg) != shortAlg { - return false - } - return true -} - -// Lookup looks for a digest matching the given string representation. -// If no digests could be found ErrDigestNotFound will be returned -// with an empty digest value. If multiple matches are found -// ErrDigestAmbiguous will be returned with an empty digest value. -func (dst *Set) Lookup(d string) (digest.Digest, error) { - dst.mutex.RLock() - defer dst.mutex.RUnlock() - if len(dst.entries) == 0 { - return "", ErrDigestNotFound - } - var ( - searchFunc func(int) bool - alg digest.Algorithm - hex string - ) - dgst, err := digest.Parse(d) - if err == digest.ErrDigestInvalidFormat { - hex = d - searchFunc = func(i int) bool { - return dst.entries[i].val >= d - } - } else { - hex = dgst.Hex() - alg = dgst.Algorithm() - searchFunc = func(i int) bool { - if dst.entries[i].val == hex { - return dst.entries[i].alg >= alg - } - return dst.entries[i].val >= hex - } - } - idx := sort.Search(len(dst.entries), searchFunc) - if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) { - return "", ErrDigestNotFound - } - if dst.entries[idx].alg == alg && dst.entries[idx].val == hex { - return dst.entries[idx].digest, nil - } - if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) { - return "", ErrDigestAmbiguous - } - - return dst.entries[idx].digest, nil -} - -// Add adds the given digest to the set. An error will be returned -// if the given digest is invalid. If the digest already exists in the -// set, this operation will be a no-op. -func (dst *Set) Add(d digest.Digest) error { - if err := d.Validate(); err != nil { - return err - } - dst.mutex.Lock() - defer dst.mutex.Unlock() - entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} - searchFunc := func(i int) bool { - if dst.entries[i].val == entry.val { - return dst.entries[i].alg >= entry.alg - } - return dst.entries[i].val >= entry.val - } - idx := sort.Search(len(dst.entries), searchFunc) - if idx == len(dst.entries) { - dst.entries = append(dst.entries, entry) - return nil - } else if dst.entries[idx].digest == d { - return nil - } - - entries := append(dst.entries, nil) - copy(entries[idx+1:], entries[idx:len(entries)-1]) - entries[idx] = entry - dst.entries = entries - return nil -} - -// Remove removes the given digest from the set. An err will be -// returned if the given digest is invalid. If the digest does -// not exist in the set, this operation will be a no-op. -func (dst *Set) Remove(d digest.Digest) error { - if err := d.Validate(); err != nil { - return err - } - dst.mutex.Lock() - defer dst.mutex.Unlock() - entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} - searchFunc := func(i int) bool { - if dst.entries[i].val == entry.val { - return dst.entries[i].alg >= entry.alg - } - return dst.entries[i].val >= entry.val - } - idx := sort.Search(len(dst.entries), searchFunc) - // Not found if idx is after or value at idx is not digest - if idx == len(dst.entries) || dst.entries[idx].digest != d { - return nil - } - - entries := dst.entries - copy(entries[idx:], entries[idx+1:]) - entries = entries[:len(entries)-1] - dst.entries = entries - - return nil -} - -// All returns all the digests in the set -func (dst *Set) All() []digest.Digest { - dst.mutex.RLock() - defer dst.mutex.RUnlock() - retValues := make([]digest.Digest, len(dst.entries)) - for i := range dst.entries { - retValues[i] = dst.entries[i].digest - } - - return retValues -} - -// ShortCodeTable returns a map of Digest to unique short codes. The -// length represents the minimum value, the maximum length may be the -// entire value of digest if uniqueness cannot be achieved without the -// full value. This function will attempt to make short codes as short -// as possible to be unique. -func ShortCodeTable(dst *Set, length int) map[digest.Digest]string { - dst.mutex.RLock() - defer dst.mutex.RUnlock() - m := make(map[digest.Digest]string, len(dst.entries)) - l := length - resetIdx := 0 - for i := 0; i < len(dst.entries); i++ { - var short string - extended := true - for extended { - extended = false - if len(dst.entries[i].val) <= l { - short = dst.entries[i].digest.String() - } else { - short = dst.entries[i].val[:l] - for j := i + 1; j < len(dst.entries); j++ { - if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) { - if j > resetIdx { - resetIdx = j - } - extended = true - } else { - break - } - } - if extended { - l++ - } - } - } - m[dst.entries[i].digest] = short - if i >= resetIdx { - l = length - } - } - return m -} - -type digestEntry struct { - alg digest.Algorithm - val string - digest digest.Digest -} - -type digestEntries []*digestEntry - -func (d digestEntries) Len() int { - return len(d) -} - -func (d digestEntries) Less(i, j int) bool { - if d[i].val != d[j].val { - return d[i].val < d[j].val - } - return d[i].alg < d[j].alg -} - -func (d digestEntries) Swap(i, j int) { - d[i], d[j] = d[j], d[i] -} diff --git a/ui/wasm/vendor/github.com/docker/distribution/doc.go b/ui/wasm/vendor/github.com/docker/distribution/doc.go deleted file mode 100644 index bdd8cb708e5..00000000000 --- a/ui/wasm/vendor/github.com/docker/distribution/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -// Package distribution will define the interfaces for the components of -// docker distribution. The goal is to allow users to reliably package, ship -// and store content related to docker images. -// -// This is currently a work in progress. More details are available in the -// README.md. -package distribution diff --git a/ui/wasm/vendor/github.com/docker/distribution/errors.go b/ui/wasm/vendor/github.com/docker/distribution/errors.go deleted file mode 100644 index 8e0b788d6c5..00000000000 --- a/ui/wasm/vendor/github.com/docker/distribution/errors.go +++ /dev/null @@ -1,119 +0,0 @@ -package distribution - -import ( - "errors" - "fmt" - "strings" - - "github.com/opencontainers/go-digest" -) - -// ErrAccessDenied is returned when an access to a requested resource is -// denied. -var ErrAccessDenied = errors.New("access denied") - -// ErrManifestNotModified is returned when a conditional manifest GetByTag -// returns nil due to the client indicating it has the latest version -var ErrManifestNotModified = errors.New("manifest not modified") - -// ErrUnsupported is returned when an unimplemented or unsupported action is -// performed -var ErrUnsupported = errors.New("operation unsupported") - -// ErrSchemaV1Unsupported is returned when a client tries to upload a schema v1 -// manifest but the registry is configured to reject it -var ErrSchemaV1Unsupported = errors.New("manifest schema v1 unsupported") - -// ErrTagUnknown is returned if the given tag is not known by the tag service -type ErrTagUnknown struct { - Tag string -} - -func (err ErrTagUnknown) Error() string { - return fmt.Sprintf("unknown tag=%s", err.Tag) -} - -// ErrRepositoryUnknown is returned if the named repository is not known by -// the registry. -type ErrRepositoryUnknown struct { - Name string -} - -func (err ErrRepositoryUnknown) Error() string { - return fmt.Sprintf("unknown repository name=%s", err.Name) -} - -// ErrRepositoryNameInvalid should be used to denote an invalid repository -// name. Reason may set, indicating the cause of invalidity. -type ErrRepositoryNameInvalid struct { - Name string - Reason error -} - -func (err ErrRepositoryNameInvalid) Error() string { - return fmt.Sprintf("repository name %q invalid: %v", err.Name, err.Reason) -} - -// ErrManifestUnknown is returned if the manifest is not known by the -// registry. -type ErrManifestUnknown struct { - Name string - Tag string -} - -func (err ErrManifestUnknown) Error() string { - return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag) -} - -// ErrManifestUnknownRevision is returned when a manifest cannot be found by -// revision within a repository. -type ErrManifestUnknownRevision struct { - Name string - Revision digest.Digest -} - -func (err ErrManifestUnknownRevision) Error() string { - return fmt.Sprintf("unknown manifest name=%s revision=%s", err.Name, err.Revision) -} - -// ErrManifestUnverified is returned when the registry is unable to verify -// the manifest. -type ErrManifestUnverified struct{} - -func (ErrManifestUnverified) Error() string { - return "unverified manifest" -} - -// ErrManifestVerification provides a type to collect errors encountered -// during manifest verification. Currently, it accepts errors of all types, -// but it may be narrowed to those involving manifest verification. -type ErrManifestVerification []error - -func (errs ErrManifestVerification) Error() string { - var parts []string - for _, err := range errs { - parts = append(parts, err.Error()) - } - - return fmt.Sprintf("errors verifying manifest: %v", strings.Join(parts, ",")) -} - -// ErrManifestBlobUnknown returned when a referenced blob cannot be found. -type ErrManifestBlobUnknown struct { - Digest digest.Digest -} - -func (err ErrManifestBlobUnknown) Error() string { - return fmt.Sprintf("unknown blob %v on manifest", err.Digest) -} - -// ErrManifestNameInvalid should be used to denote an invalid manifest -// name. Reason may set, indicating the cause of invalidity. -type ErrManifestNameInvalid struct { - Name string - Reason error -} - -func (err ErrManifestNameInvalid) Error() string { - return fmt.Sprintf("manifest name %q invalid: %v", err.Name, err.Reason) -} diff --git a/ui/wasm/vendor/github.com/docker/distribution/manifests.go b/ui/wasm/vendor/github.com/docker/distribution/manifests.go deleted file mode 100644 index 1816baea1d6..00000000000 --- a/ui/wasm/vendor/github.com/docker/distribution/manifests.go +++ /dev/null @@ -1,125 +0,0 @@ -package distribution - -import ( - "context" - "fmt" - "mime" - - "github.com/opencontainers/go-digest" -) - -// Manifest represents a registry object specifying a set of -// references and an optional target -type Manifest interface { - // References returns a list of objects which make up this manifest. - // A reference is anything which can be represented by a - // distribution.Descriptor. These can consist of layers, resources or other - // manifests. - // - // While no particular order is required, implementations should return - // them from highest to lowest priority. For example, one might want to - // return the base layer before the top layer. - References() []Descriptor - - // Payload provides the serialized format of the manifest, in addition to - // the media type. - Payload() (mediaType string, payload []byte, err error) -} - -// ManifestBuilder creates a manifest allowing one to include dependencies. -// Instances can be obtained from a version-specific manifest package. Manifest -// specific data is passed into the function which creates the builder. -type ManifestBuilder interface { - // Build creates the manifest from his builder. - Build(ctx context.Context) (Manifest, error) - - // References returns a list of objects which have been added to this - // builder. The dependencies are returned in the order they were added, - // which should be from base to head. - References() []Descriptor - - // AppendReference includes the given object in the manifest after any - // existing dependencies. If the add fails, such as when adding an - // unsupported dependency, an error may be returned. - // - // The destination of the reference is dependent on the manifest type and - // the dependency type. - AppendReference(dependency Describable) error -} - -// ManifestService describes operations on image manifests. -type ManifestService interface { - // Exists returns true if the manifest exists. - Exists(ctx context.Context, dgst digest.Digest) (bool, error) - - // Get retrieves the manifest specified by the given digest - Get(ctx context.Context, dgst digest.Digest, options ...ManifestServiceOption) (Manifest, error) - - // Put creates or updates the given manifest returning the manifest digest - Put(ctx context.Context, manifest Manifest, options ...ManifestServiceOption) (digest.Digest, error) - - // Delete removes the manifest specified by the given digest. Deleting - // a manifest that doesn't exist will return ErrManifestNotFound - Delete(ctx context.Context, dgst digest.Digest) error -} - -// ManifestEnumerator enables iterating over manifests -type ManifestEnumerator interface { - // Enumerate calls ingester for each manifest. - Enumerate(ctx context.Context, ingester func(digest.Digest) error) error -} - -// Describable is an interface for descriptors -type Describable interface { - Descriptor() Descriptor -} - -// ManifestMediaTypes returns the supported media types for manifests. -func ManifestMediaTypes() (mediaTypes []string) { - for t := range mappings { - if t != "" { - mediaTypes = append(mediaTypes, t) - } - } - return -} - -// UnmarshalFunc implements manifest unmarshalling a given MediaType -type UnmarshalFunc func([]byte) (Manifest, Descriptor, error) - -var mappings = make(map[string]UnmarshalFunc, 0) - -// UnmarshalManifest looks up manifest unmarshal functions based on -// MediaType -func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) { - // Need to look up by the actual media type, not the raw contents of - // the header. Strip semicolons and anything following them. - var mediaType string - if ctHeader != "" { - var err error - mediaType, _, err = mime.ParseMediaType(ctHeader) - if err != nil { - return nil, Descriptor{}, err - } - } - - unmarshalFunc, ok := mappings[mediaType] - if !ok { - unmarshalFunc, ok = mappings[""] - if !ok { - return nil, Descriptor{}, fmt.Errorf("unsupported manifest media type and no default available: %s", mediaType) - } - } - - return unmarshalFunc(p) -} - -// RegisterManifestSchema registers an UnmarshalFunc for a given schema type. This -// should be called from specific -func RegisterManifestSchema(mediaType string, u UnmarshalFunc) error { - if _, ok := mappings[mediaType]; ok { - return fmt.Errorf("manifest media type registration would overwrite existing: %s", mediaType) - } - mappings[mediaType] = u - return nil -} diff --git a/ui/wasm/vendor/github.com/docker/distribution/metrics/prometheus.go b/ui/wasm/vendor/github.com/docker/distribution/metrics/prometheus.go deleted file mode 100644 index b5a5321448a..00000000000 --- a/ui/wasm/vendor/github.com/docker/distribution/metrics/prometheus.go +++ /dev/null @@ -1,13 +0,0 @@ -package metrics - -import "github.com/docker/go-metrics" - -const ( - // NamespacePrefix is the namespace of prometheus metrics - NamespacePrefix = "registry" -) - -var ( - // StorageNamespace is the prometheus namespace of blob/cache related operations - StorageNamespace = metrics.NewNamespace(NamespacePrefix, "storage", nil) -) diff --git a/ui/wasm/vendor/github.com/docker/distribution/reference/helpers.go b/ui/wasm/vendor/github.com/docker/distribution/reference/helpers.go deleted file mode 100644 index 978df7eabbf..00000000000 --- a/ui/wasm/vendor/github.com/docker/distribution/reference/helpers.go +++ /dev/null @@ -1,42 +0,0 @@ -package reference - -import "path" - -// IsNameOnly returns true if reference only contains a repo name. -func IsNameOnly(ref Named) bool { - if _, ok := ref.(NamedTagged); ok { - return false - } - if _, ok := ref.(Canonical); ok { - return false - } - return true -} - -// FamiliarName returns the familiar name string -// for the given named, familiarizing if needed. -func FamiliarName(ref Named) string { - if nn, ok := ref.(normalizedNamed); ok { - return nn.Familiar().Name() - } - return ref.Name() -} - -// FamiliarString returns the familiar string representation -// for the given reference, familiarizing if needed. -func FamiliarString(ref Reference) string { - if nn, ok := ref.(normalizedNamed); ok { - return nn.Familiar().String() - } - return ref.String() -} - -// FamiliarMatch reports whether ref matches the specified pattern. -// See https://godoc.org/path#Match for supported patterns. -func FamiliarMatch(pattern string, ref Reference) (bool, error) { - matched, err := path.Match(pattern, FamiliarString(ref)) - if namedRef, isNamed := ref.(Named); isNamed && !matched { - matched, _ = path.Match(pattern, FamiliarName(namedRef)) - } - return matched, err -} diff --git a/ui/wasm/vendor/github.com/docker/distribution/reference/normalize.go b/ui/wasm/vendor/github.com/docker/distribution/reference/normalize.go deleted file mode 100644 index 2d71fc5e9ff..00000000000 --- a/ui/wasm/vendor/github.com/docker/distribution/reference/normalize.go +++ /dev/null @@ -1,170 +0,0 @@ -package reference - -import ( - "errors" - "fmt" - "strings" - - "github.com/docker/distribution/digestset" - "github.com/opencontainers/go-digest" -) - -var ( - legacyDefaultDomain = "index.docker.io" - defaultDomain = "docker.io" - officialRepoName = "library" - defaultTag = "latest" -) - -// normalizedNamed represents a name which has been -// normalized and has a familiar form. A familiar name -// is what is used in Docker UI. An example normalized -// name is "docker.io/library/ubuntu" and corresponding -// familiar name of "ubuntu". -type normalizedNamed interface { - Named - Familiar() Named -} - -// ParseNormalizedNamed parses a string into a named reference -// transforming a familiar name from Docker UI to a fully -// qualified reference. If the value may be an identifier -// use ParseAnyReference. -func ParseNormalizedNamed(s string) (Named, error) { - if ok := anchoredIdentifierRegexp.MatchString(s); ok { - return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) - } - domain, remainder := splitDockerDomain(s) - var remoteName string - if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { - remoteName = remainder[:tagSep] - } else { - remoteName = remainder - } - if strings.ToLower(remoteName) != remoteName { - return nil, errors.New("invalid reference format: repository name must be lowercase") - } - - ref, err := Parse(domain + "/" + remainder) - if err != nil { - return nil, err - } - named, isNamed := ref.(Named) - if !isNamed { - return nil, fmt.Errorf("reference %s has no name", ref.String()) - } - return named, nil -} - -// splitDockerDomain splits a repository name to domain and remotename string. -// If no valid domain is found, the default domain is used. Repository name -// needs to be already validated before. -func splitDockerDomain(name string) (domain, remainder string) { - i := strings.IndexRune(name, '/') - if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { - domain, remainder = defaultDomain, name - } else { - domain, remainder = name[:i], name[i+1:] - } - if domain == legacyDefaultDomain { - domain = defaultDomain - } - if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { - remainder = officialRepoName + "/" + remainder - } - return -} - -// familiarizeName returns a shortened version of the name familiar -// to to the Docker UI. Familiar names have the default domain -// "docker.io" and "library/" repository prefix removed. -// For example, "docker.io/library/redis" will have the familiar -// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". -// Returns a familiarized named only reference. -func familiarizeName(named namedRepository) repository { - repo := repository{ - domain: named.Domain(), - path: named.Path(), - } - - if repo.domain == defaultDomain { - repo.domain = "" - // Handle official repositories which have the pattern "library/" - if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { - repo.path = split[1] - } - } - return repo -} - -func (r reference) Familiar() Named { - return reference{ - namedRepository: familiarizeName(r.namedRepository), - tag: r.tag, - digest: r.digest, - } -} - -func (r repository) Familiar() Named { - return familiarizeName(r) -} - -func (t taggedReference) Familiar() Named { - return taggedReference{ - namedRepository: familiarizeName(t.namedRepository), - tag: t.tag, - } -} - -func (c canonicalReference) Familiar() Named { - return canonicalReference{ - namedRepository: familiarizeName(c.namedRepository), - digest: c.digest, - } -} - -// TagNameOnly adds the default tag "latest" to a reference if it only has -// a repo name. -func TagNameOnly(ref Named) Named { - if IsNameOnly(ref) { - namedTagged, err := WithTag(ref, defaultTag) - if err != nil { - // Default tag must be valid, to create a NamedTagged - // type with non-validated input the WithTag function - // should be used instead - panic(err) - } - return namedTagged - } - return ref -} - -// ParseAnyReference parses a reference string as a possible identifier, -// full digest, or familiar name. -func ParseAnyReference(ref string) (Reference, error) { - if ok := anchoredIdentifierRegexp.MatchString(ref); ok { - return digestReference("sha256:" + ref), nil - } - if dgst, err := digest.Parse(ref); err == nil { - return digestReference(dgst), nil - } - - return ParseNormalizedNamed(ref) -} - -// ParseAnyReferenceWithSet parses a reference string as a possible short -// identifier to be matched in a digest set, a full digest, or familiar name. -func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) { - if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok { - dgst, err := ds.Lookup(ref) - if err == nil { - return digestReference(dgst), nil - } - } else { - if dgst, err := digest.Parse(ref); err == nil { - return digestReference(dgst), nil - } - } - - return ParseNormalizedNamed(ref) -} diff --git a/ui/wasm/vendor/github.com/docker/distribution/reference/reference.go b/ui/wasm/vendor/github.com/docker/distribution/reference/reference.go deleted file mode 100644 index 2f66cca87a3..00000000000 --- a/ui/wasm/vendor/github.com/docker/distribution/reference/reference.go +++ /dev/null @@ -1,433 +0,0 @@ -// Package reference provides a general type to represent any way of referencing images within the registry. -// Its main purpose is to abstract tags and digests (content-addressable hash). -// -// Grammar -// -// reference := name [ ":" tag ] [ "@" digest ] -// name := [domain '/'] path-component ['/' path-component]* -// domain := domain-component ['.' domain-component]* [':' port-number] -// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ -// port-number := /[0-9]+/ -// path-component := alpha-numeric [separator alpha-numeric]* -// alpha-numeric := /[a-z0-9]+/ -// separator := /[_.]|__|[-]*/ -// -// tag := /[\w][\w.-]{0,127}/ -// -// digest := digest-algorithm ":" digest-hex -// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]* -// digest-algorithm-separator := /[+.-_]/ -// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ -// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value -// -// identifier := /[a-f0-9]{64}/ -// short-identifier := /[a-f0-9]{6,64}/ -package reference - -import ( - "errors" - "fmt" - "strings" - - "github.com/opencontainers/go-digest" -) - -const ( - // NameTotalLengthMax is the maximum total number of characters in a repository name. - NameTotalLengthMax = 255 -) - -var ( - // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. - ErrReferenceInvalidFormat = errors.New("invalid reference format") - - // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. - ErrTagInvalidFormat = errors.New("invalid tag format") - - // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. - ErrDigestInvalidFormat = errors.New("invalid digest format") - - // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. - ErrNameContainsUppercase = errors.New("repository name must be lowercase") - - // ErrNameEmpty is returned for empty, invalid repository names. - ErrNameEmpty = errors.New("repository name must have at least one component") - - // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. - ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) - - // ErrNameNotCanonical is returned when a name is not canonical. - ErrNameNotCanonical = errors.New("repository name must be canonical") -) - -// Reference is an opaque object reference identifier that may include -// modifiers such as a hostname, name, tag, and digest. -type Reference interface { - // String returns the full reference - String() string -} - -// Field provides a wrapper type for resolving correct reference types when -// working with encoding. -type Field struct { - reference Reference -} - -// AsField wraps a reference in a Field for encoding. -func AsField(reference Reference) Field { - return Field{reference} -} - -// Reference unwraps the reference type from the field to -// return the Reference object. This object should be -// of the appropriate type to further check for different -// reference types. -func (f Field) Reference() Reference { - return f.reference -} - -// MarshalText serializes the field to byte text which -// is the string of the reference. -func (f Field) MarshalText() (p []byte, err error) { - return []byte(f.reference.String()), nil -} - -// UnmarshalText parses text bytes by invoking the -// reference parser to ensure the appropriately -// typed reference object is wrapped by field. -func (f *Field) UnmarshalText(p []byte) error { - r, err := Parse(string(p)) - if err != nil { - return err - } - - f.reference = r - return nil -} - -// Named is an object with a full name -type Named interface { - Reference - Name() string -} - -// Tagged is an object which has a tag -type Tagged interface { - Reference - Tag() string -} - -// NamedTagged is an object including a name and tag. -type NamedTagged interface { - Named - Tag() string -} - -// Digested is an object which has a digest -// in which it can be referenced by -type Digested interface { - Reference - Digest() digest.Digest -} - -// Canonical reference is an object with a fully unique -// name including a name with domain and digest -type Canonical interface { - Named - Digest() digest.Digest -} - -// namedRepository is a reference to a repository with a name. -// A namedRepository has both domain and path components. -type namedRepository interface { - Named - Domain() string - Path() string -} - -// Domain returns the domain part of the Named reference -func Domain(named Named) string { - if r, ok := named.(namedRepository); ok { - return r.Domain() - } - domain, _ := splitDomain(named.Name()) - return domain -} - -// Path returns the name without the domain part of the Named reference -func Path(named Named) (name string) { - if r, ok := named.(namedRepository); ok { - return r.Path() - } - _, path := splitDomain(named.Name()) - return path -} - -func splitDomain(name string) (string, string) { - match := anchoredNameRegexp.FindStringSubmatch(name) - if len(match) != 3 { - return "", name - } - return match[1], match[2] -} - -// SplitHostname splits a named reference into a -// hostname and name string. If no valid hostname is -// found, the hostname is empty and the full value -// is returned as name -// DEPRECATED: Use Domain or Path -func SplitHostname(named Named) (string, string) { - if r, ok := named.(namedRepository); ok { - return r.Domain(), r.Path() - } - return splitDomain(named.Name()) -} - -// Parse parses s and returns a syntactically valid Reference. -// If an error was encountered it is returned, along with a nil Reference. -// NOTE: Parse will not handle short digests. -func Parse(s string) (Reference, error) { - matches := ReferenceRegexp.FindStringSubmatch(s) - if matches == nil { - if s == "" { - return nil, ErrNameEmpty - } - if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { - return nil, ErrNameContainsUppercase - } - return nil, ErrReferenceInvalidFormat - } - - if len(matches[1]) > NameTotalLengthMax { - return nil, ErrNameTooLong - } - - var repo repository - - nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) - if nameMatch != nil && len(nameMatch) == 3 { - repo.domain = nameMatch[1] - repo.path = nameMatch[2] - } else { - repo.domain = "" - repo.path = matches[1] - } - - ref := reference{ - namedRepository: repo, - tag: matches[2], - } - if matches[3] != "" { - var err error - ref.digest, err = digest.Parse(matches[3]) - if err != nil { - return nil, err - } - } - - r := getBestReferenceType(ref) - if r == nil { - return nil, ErrNameEmpty - } - - return r, nil -} - -// ParseNamed parses s and returns a syntactically valid reference implementing -// the Named interface. The reference must have a name and be in the canonical -// form, otherwise an error is returned. -// If an error was encountered it is returned, along with a nil Reference. -// NOTE: ParseNamed will not handle short digests. -func ParseNamed(s string) (Named, error) { - named, err := ParseNormalizedNamed(s) - if err != nil { - return nil, err - } - if named.String() != s { - return nil, ErrNameNotCanonical - } - return named, nil -} - -// WithName returns a named object representing the given string. If the input -// is invalid ErrReferenceInvalidFormat will be returned. -func WithName(name string) (Named, error) { - if len(name) > NameTotalLengthMax { - return nil, ErrNameTooLong - } - - match := anchoredNameRegexp.FindStringSubmatch(name) - if match == nil || len(match) != 3 { - return nil, ErrReferenceInvalidFormat - } - return repository{ - domain: match[1], - path: match[2], - }, nil -} - -// WithTag combines the name from "name" and the tag from "tag" to form a -// reference incorporating both the name and the tag. -func WithTag(name Named, tag string) (NamedTagged, error) { - if !anchoredTagRegexp.MatchString(tag) { - return nil, ErrTagInvalidFormat - } - var repo repository - if r, ok := name.(namedRepository); ok { - repo.domain = r.Domain() - repo.path = r.Path() - } else { - repo.path = name.Name() - } - if canonical, ok := name.(Canonical); ok { - return reference{ - namedRepository: repo, - tag: tag, - digest: canonical.Digest(), - }, nil - } - return taggedReference{ - namedRepository: repo, - tag: tag, - }, nil -} - -// WithDigest combines the name from "name" and the digest from "digest" to form -// a reference incorporating both the name and the digest. -func WithDigest(name Named, digest digest.Digest) (Canonical, error) { - if !anchoredDigestRegexp.MatchString(digest.String()) { - return nil, ErrDigestInvalidFormat - } - var repo repository - if r, ok := name.(namedRepository); ok { - repo.domain = r.Domain() - repo.path = r.Path() - } else { - repo.path = name.Name() - } - if tagged, ok := name.(Tagged); ok { - return reference{ - namedRepository: repo, - tag: tagged.Tag(), - digest: digest, - }, nil - } - return canonicalReference{ - namedRepository: repo, - digest: digest, - }, nil -} - -// TrimNamed removes any tag or digest from the named reference. -func TrimNamed(ref Named) Named { - domain, path := SplitHostname(ref) - return repository{ - domain: domain, - path: path, - } -} - -func getBestReferenceType(ref reference) Reference { - if ref.Name() == "" { - // Allow digest only references - if ref.digest != "" { - return digestReference(ref.digest) - } - return nil - } - if ref.tag == "" { - if ref.digest != "" { - return canonicalReference{ - namedRepository: ref.namedRepository, - digest: ref.digest, - } - } - return ref.namedRepository - } - if ref.digest == "" { - return taggedReference{ - namedRepository: ref.namedRepository, - tag: ref.tag, - } - } - - return ref -} - -type reference struct { - namedRepository - tag string - digest digest.Digest -} - -func (r reference) String() string { - return r.Name() + ":" + r.tag + "@" + r.digest.String() -} - -func (r reference) Tag() string { - return r.tag -} - -func (r reference) Digest() digest.Digest { - return r.digest -} - -type repository struct { - domain string - path string -} - -func (r repository) String() string { - return r.Name() -} - -func (r repository) Name() string { - if r.domain == "" { - return r.path - } - return r.domain + "/" + r.path -} - -func (r repository) Domain() string { - return r.domain -} - -func (r repository) Path() string { - return r.path -} - -type digestReference digest.Digest - -func (d digestReference) String() string { - return digest.Digest(d).String() -} - -func (d digestReference) Digest() digest.Digest { - return digest.Digest(d) -} - -type taggedReference struct { - namedRepository - tag string -} - -func (t taggedReference) String() string { - return t.Name() + ":" + t.tag -} - -func (t taggedReference) Tag() string { - return t.tag -} - -type canonicalReference struct { - namedRepository - digest digest.Digest -} - -func (c canonicalReference) String() string { - return c.Name() + "@" + c.digest.String() -} - -func (c canonicalReference) Digest() digest.Digest { - return c.digest -} diff --git a/ui/wasm/vendor/github.com/docker/distribution/reference/regexp.go b/ui/wasm/vendor/github.com/docker/distribution/reference/regexp.go deleted file mode 100644 index 78603493203..00000000000 --- a/ui/wasm/vendor/github.com/docker/distribution/reference/regexp.go +++ /dev/null @@ -1,143 +0,0 @@ -package reference - -import "regexp" - -var ( - // alphaNumericRegexp defines the alpha numeric atom, typically a - // component of names. This only allows lower case characters and digits. - alphaNumericRegexp = match(`[a-z0-9]+`) - - // separatorRegexp defines the separators allowed to be embedded in name - // components. This allow one period, one or two underscore and multiple - // dashes. - separatorRegexp = match(`(?:[._]|__|[-]*)`) - - // nameComponentRegexp restricts registry path component names to start - // with at least one letter or number, with following parts able to be - // separated by one period, one or two underscore and multiple dashes. - nameComponentRegexp = expression( - alphaNumericRegexp, - optional(repeated(separatorRegexp, alphaNumericRegexp))) - - // domainComponentRegexp restricts the registry domain component of a - // repository name to start with a component as defined by DomainRegexp - // and followed by an optional port. - domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) - - // DomainRegexp defines the structure of potential domain components - // that may be part of image names. This is purposely a subset of what is - // allowed by DNS to ensure backwards compatibility with Docker image - // names. - DomainRegexp = expression( - domainComponentRegexp, - optional(repeated(literal(`.`), domainComponentRegexp)), - optional(literal(`:`), match(`[0-9]+`))) - - // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. - TagRegexp = match(`[\w][\w.-]{0,127}`) - - // anchoredTagRegexp matches valid tag names, anchored at the start and - // end of the matched string. - anchoredTagRegexp = anchored(TagRegexp) - - // DigestRegexp matches valid digests. - DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) - - // anchoredDigestRegexp matches valid digests, anchored at the start and - // end of the matched string. - anchoredDigestRegexp = anchored(DigestRegexp) - - // NameRegexp is the format for the name component of references. The - // regexp has capturing groups for the domain and name part omitting - // the separating forward slash from either. - NameRegexp = expression( - optional(DomainRegexp, literal(`/`)), - nameComponentRegexp, - optional(repeated(literal(`/`), nameComponentRegexp))) - - // anchoredNameRegexp is used to parse a name value, capturing the - // domain and trailing components. - anchoredNameRegexp = anchored( - optional(capture(DomainRegexp), literal(`/`)), - capture(nameComponentRegexp, - optional(repeated(literal(`/`), nameComponentRegexp)))) - - // ReferenceRegexp is the full supported format of a reference. The regexp - // is anchored and has capturing groups for name, tag, and digest - // components. - ReferenceRegexp = anchored(capture(NameRegexp), - optional(literal(":"), capture(TagRegexp)), - optional(literal("@"), capture(DigestRegexp))) - - // IdentifierRegexp is the format for string identifier used as a - // content addressable identifier using sha256. These identifiers - // are like digests without the algorithm, since sha256 is used. - IdentifierRegexp = match(`([a-f0-9]{64})`) - - // ShortIdentifierRegexp is the format used to represent a prefix - // of an identifier. A prefix may be used to match a sha256 identifier - // within a list of trusted identifiers. - ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`) - - // anchoredIdentifierRegexp is used to check or match an - // identifier value, anchored at start and end of string. - anchoredIdentifierRegexp = anchored(IdentifierRegexp) - - // anchoredShortIdentifierRegexp is used to check if a value - // is a possible identifier prefix, anchored at start and end - // of string. - anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp) -) - -// match compiles the string to a regular expression. -var match = regexp.MustCompile - -// literal compiles s into a literal regular expression, escaping any regexp -// reserved characters. -func literal(s string) *regexp.Regexp { - re := match(regexp.QuoteMeta(s)) - - if _, complete := re.LiteralPrefix(); !complete { - panic("must be a literal") - } - - return re -} - -// expression defines a full expression, where each regular expression must -// follow the previous. -func expression(res ...*regexp.Regexp) *regexp.Regexp { - var s string - for _, re := range res { - s += re.String() - } - - return match(s) -} - -// optional wraps the expression in a non-capturing group and makes the -// production optional. -func optional(res ...*regexp.Regexp) *regexp.Regexp { - return match(group(expression(res...)).String() + `?`) -} - -// repeated wraps the regexp in a non-capturing group to get one or more -// matches. -func repeated(res ...*regexp.Regexp) *regexp.Regexp { - return match(group(expression(res...)).String() + `+`) -} - -// group wraps the regexp in a non-capturing group. -func group(res ...*regexp.Regexp) *regexp.Regexp { - return match(`(?:` + expression(res...).String() + `)`) -} - -// capture wraps the expression in a capturing group. -func capture(res ...*regexp.Regexp) *regexp.Regexp { - return match(`(` + expression(res...).String() + `)`) -} - -// anchored anchors the regular expression by adding start and end delimiters. -func anchored(res ...*regexp.Regexp) *regexp.Regexp { - return match(`^` + expression(res...).String() + `$`) -} diff --git a/ui/wasm/vendor/github.com/docker/distribution/registry.go b/ui/wasm/vendor/github.com/docker/distribution/registry.go deleted file mode 100644 index 6c32109894d..00000000000 --- a/ui/wasm/vendor/github.com/docker/distribution/registry.go +++ /dev/null @@ -1,118 +0,0 @@ -package distribution - -import ( - "context" - - "github.com/docker/distribution/reference" -) - -// Scope defines the set of items that match a namespace. -type Scope interface { - // Contains returns true if the name belongs to the namespace. - Contains(name string) bool -} - -type fullScope struct{} - -func (f fullScope) Contains(string) bool { - return true -} - -// GlobalScope represents the full namespace scope which contains -// all other scopes. -var GlobalScope = Scope(fullScope{}) - -// Namespace represents a collection of repositories, addressable by name. -// Generally, a namespace is backed by a set of one or more services, -// providing facilities such as registry access, trust, and indexing. -type Namespace interface { - // Scope describes the names that can be used with this Namespace. The - // global namespace will have a scope that matches all names. The scope - // effectively provides an identity for the namespace. - Scope() Scope - - // Repository should return a reference to the named repository. The - // registry may or may not have the repository but should always return a - // reference. - Repository(ctx context.Context, name reference.Named) (Repository, error) - - // Repositories fills 'repos' with a lexicographically sorted catalog of repositories - // up to the size of 'repos' and returns the value 'n' for the number of entries - // which were filled. 'last' contains an offset in the catalog, and 'err' will be - // set to io.EOF if there are no more entries to obtain. - Repositories(ctx context.Context, repos []string, last string) (n int, err error) - - // Blobs returns a blob enumerator to access all blobs - Blobs() BlobEnumerator - - // BlobStatter returns a BlobStatter to control - BlobStatter() BlobStatter -} - -// RepositoryEnumerator describes an operation to enumerate repositories -type RepositoryEnumerator interface { - Enumerate(ctx context.Context, ingester func(string) error) error -} - -// RepositoryRemover removes given repository -type RepositoryRemover interface { - Remove(ctx context.Context, name reference.Named) error -} - -// ManifestServiceOption is a function argument for Manifest Service methods -type ManifestServiceOption interface { - Apply(ManifestService) error -} - -// WithTag allows a tag to be passed into Put -func WithTag(tag string) ManifestServiceOption { - return WithTagOption{tag} -} - -// WithTagOption holds a tag -type WithTagOption struct{ Tag string } - -// Apply conforms to the ManifestServiceOption interface -func (o WithTagOption) Apply(m ManifestService) error { - // no implementation - return nil -} - -// WithManifestMediaTypes lists the media types the client wishes -// the server to provide. -func WithManifestMediaTypes(mediaTypes []string) ManifestServiceOption { - return WithManifestMediaTypesOption{mediaTypes} -} - -// WithManifestMediaTypesOption holds a list of accepted media types -type WithManifestMediaTypesOption struct{ MediaTypes []string } - -// Apply conforms to the ManifestServiceOption interface -func (o WithManifestMediaTypesOption) Apply(m ManifestService) error { - // no implementation - return nil -} - -// Repository is a named collection of manifests and layers. -type Repository interface { - // Named returns the name of the repository. - Named() reference.Named - - // Manifests returns a reference to this repository's manifest service. - // with the supplied options applied. - Manifests(ctx context.Context, options ...ManifestServiceOption) (ManifestService, error) - - // Blobs returns a reference to this repository's blob service. - Blobs(ctx context.Context) BlobStore - - // TODO(stevvooe): The above BlobStore return can probably be relaxed to - // be a BlobService for use with clients. This will allow such - // implementations to avoid implementing ServeBlob. - - // Tags returns a reference to this repositories tag service - Tags(ctx context.Context) TagService -} - -// TODO(stevvooe): Must add close methods to all these. May want to change the -// way instances are created to better reflect internal dependency -// relationships. diff --git a/ui/wasm/vendor/github.com/docker/distribution/registry/api/errcode/errors.go b/ui/wasm/vendor/github.com/docker/distribution/registry/api/errcode/errors.go deleted file mode 100644 index 6d9bb4b62af..00000000000 --- a/ui/wasm/vendor/github.com/docker/distribution/registry/api/errcode/errors.go +++ /dev/null @@ -1,267 +0,0 @@ -package errcode - -import ( - "encoding/json" - "fmt" - "strings" -) - -// ErrorCoder is the base interface for ErrorCode and Error allowing -// users of each to just call ErrorCode to get the real ID of each -type ErrorCoder interface { - ErrorCode() ErrorCode -} - -// ErrorCode represents the error type. The errors are serialized via strings -// and the integer format may change and should *never* be exported. -type ErrorCode int - -var _ error = ErrorCode(0) - -// ErrorCode just returns itself -func (ec ErrorCode) ErrorCode() ErrorCode { - return ec -} - -// Error returns the ID/Value -func (ec ErrorCode) Error() string { - // NOTE(stevvooe): Cannot use message here since it may have unpopulated args. - return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1)) -} - -// Descriptor returns the descriptor for the error code. -func (ec ErrorCode) Descriptor() ErrorDescriptor { - d, ok := errorCodeToDescriptors[ec] - - if !ok { - return ErrorCodeUnknown.Descriptor() - } - - return d -} - -// String returns the canonical identifier for this error code. -func (ec ErrorCode) String() string { - return ec.Descriptor().Value -} - -// Message returned the human-readable error message for this error code. -func (ec ErrorCode) Message() string { - return ec.Descriptor().Message -} - -// MarshalText encodes the receiver into UTF-8-encoded text and returns the -// result. -func (ec ErrorCode) MarshalText() (text []byte, err error) { - return []byte(ec.String()), nil -} - -// UnmarshalText decodes the form generated by MarshalText. -func (ec *ErrorCode) UnmarshalText(text []byte) error { - desc, ok := idToDescriptors[string(text)] - - if !ok { - desc = ErrorCodeUnknown.Descriptor() - } - - *ec = desc.Code - - return nil -} - -// WithMessage creates a new Error struct based on the passed-in info and -// overrides the Message property. -func (ec ErrorCode) WithMessage(message string) Error { - return Error{ - Code: ec, - Message: message, - } -} - -// WithDetail creates a new Error struct based on the passed-in info and -// set the Detail property appropriately -func (ec ErrorCode) WithDetail(detail interface{}) Error { - return Error{ - Code: ec, - Message: ec.Message(), - }.WithDetail(detail) -} - -// WithArgs creates a new Error struct and sets the Args slice -func (ec ErrorCode) WithArgs(args ...interface{}) Error { - return Error{ - Code: ec, - Message: ec.Message(), - }.WithArgs(args...) -} - -// Error provides a wrapper around ErrorCode with extra Details provided. -type Error struct { - Code ErrorCode `json:"code"` - Message string `json:"message"` - Detail interface{} `json:"detail,omitempty"` - - // TODO(duglin): See if we need an "args" property so we can do the - // variable substitution right before showing the message to the user -} - -var _ error = Error{} - -// ErrorCode returns the ID/Value of this Error -func (e Error) ErrorCode() ErrorCode { - return e.Code -} - -// Error returns a human readable representation of the error. -func (e Error) Error() string { - return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message) -} - -// WithDetail will return a new Error, based on the current one, but with -// some Detail info added -func (e Error) WithDetail(detail interface{}) Error { - return Error{ - Code: e.Code, - Message: e.Message, - Detail: detail, - } -} - -// WithArgs uses the passed-in list of interface{} as the substitution -// variables in the Error's Message string, but returns a new Error -func (e Error) WithArgs(args ...interface{}) Error { - return Error{ - Code: e.Code, - Message: fmt.Sprintf(e.Code.Message(), args...), - Detail: e.Detail, - } -} - -// ErrorDescriptor provides relevant information about a given error code. -type ErrorDescriptor struct { - // Code is the error code that this descriptor describes. - Code ErrorCode - - // Value provides a unique, string key, often captilized with - // underscores, to identify the error code. This value is used as the - // keyed value when serializing api errors. - Value string - - // Message is a short, human readable decription of the error condition - // included in API responses. - Message string - - // Description provides a complete account of the errors purpose, suitable - // for use in documentation. - Description string - - // HTTPStatusCode provides the http status code that is associated with - // this error condition. - HTTPStatusCode int -} - -// ParseErrorCode returns the value by the string error code. -// `ErrorCodeUnknown` will be returned if the error is not known. -func ParseErrorCode(value string) ErrorCode { - ed, ok := idToDescriptors[value] - if ok { - return ed.Code - } - - return ErrorCodeUnknown -} - -// Errors provides the envelope for multiple errors and a few sugar methods -// for use within the application. -type Errors []error - -var _ error = Errors{} - -func (errs Errors) Error() string { - switch len(errs) { - case 0: - return "" - case 1: - return errs[0].Error() - default: - msg := "errors:\n" - for _, err := range errs { - msg += err.Error() + "\n" - } - return msg - } -} - -// Len returns the current number of errors. -func (errs Errors) Len() int { - return len(errs) -} - -// MarshalJSON converts slice of error, ErrorCode or Error into a -// slice of Error - then serializes -func (errs Errors) MarshalJSON() ([]byte, error) { - var tmpErrs struct { - Errors []Error `json:"errors,omitempty"` - } - - for _, daErr := range errs { - var err Error - - switch daErr.(type) { - case ErrorCode: - err = daErr.(ErrorCode).WithDetail(nil) - case Error: - err = daErr.(Error) - default: - err = ErrorCodeUnknown.WithDetail(daErr) - - } - - // If the Error struct was setup and they forgot to set the - // Message field (meaning its "") then grab it from the ErrCode - msg := err.Message - if msg == "" { - msg = err.Code.Message() - } - - tmpErrs.Errors = append(tmpErrs.Errors, Error{ - Code: err.Code, - Message: msg, - Detail: err.Detail, - }) - } - - return json.Marshal(tmpErrs) -} - -// UnmarshalJSON deserializes []Error and then converts it into slice of -// Error or ErrorCode -func (errs *Errors) UnmarshalJSON(data []byte) error { - var tmpErrs struct { - Errors []Error - } - - if err := json.Unmarshal(data, &tmpErrs); err != nil { - return err - } - - var newErrs Errors - for _, daErr := range tmpErrs.Errors { - // If Message is empty or exactly matches the Code's message string - // then just use the Code, no need for a full Error struct - if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) { - // Error's w/o details get converted to ErrorCode - newErrs = append(newErrs, daErr.Code) - } else { - // Error's w/ details are untouched - newErrs = append(newErrs, Error{ - Code: daErr.Code, - Message: daErr.Message, - Detail: daErr.Detail, - }) - } - } - - *errs = newErrs - return nil -} diff --git a/ui/wasm/vendor/github.com/docker/distribution/registry/api/errcode/handler.go b/ui/wasm/vendor/github.com/docker/distribution/registry/api/errcode/handler.go deleted file mode 100644 index d77e70473e7..00000000000 --- a/ui/wasm/vendor/github.com/docker/distribution/registry/api/errcode/handler.go +++ /dev/null @@ -1,40 +0,0 @@ -package errcode - -import ( - "encoding/json" - "net/http" -) - -// ServeJSON attempts to serve the errcode in a JSON envelope. It marshals err -// and sets the content-type header to 'application/json'. It will handle -// ErrorCoder and Errors, and if necessary will create an envelope. -func ServeJSON(w http.ResponseWriter, err error) error { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - var sc int - - switch errs := err.(type) { - case Errors: - if len(errs) < 1 { - break - } - - if err, ok := errs[0].(ErrorCoder); ok { - sc = err.ErrorCode().Descriptor().HTTPStatusCode - } - case ErrorCoder: - sc = errs.ErrorCode().Descriptor().HTTPStatusCode - err = Errors{err} // create an envelope. - default: - // We just have an unhandled error type, so just place in an envelope - // and move along. - err = Errors{err} - } - - if sc == 0 { - sc = http.StatusInternalServerError - } - - w.WriteHeader(sc) - - return json.NewEncoder(w).Encode(err) -} diff --git a/ui/wasm/vendor/github.com/docker/distribution/registry/api/errcode/register.go b/ui/wasm/vendor/github.com/docker/distribution/registry/api/errcode/register.go deleted file mode 100644 index d1e8826c6d7..00000000000 --- a/ui/wasm/vendor/github.com/docker/distribution/registry/api/errcode/register.go +++ /dev/null @@ -1,138 +0,0 @@ -package errcode - -import ( - "fmt" - "net/http" - "sort" - "sync" -) - -var ( - errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{} - idToDescriptors = map[string]ErrorDescriptor{} - groupToDescriptors = map[string][]ErrorDescriptor{} -) - -var ( - // ErrorCodeUnknown is a generic error that can be used as a last - // resort if there is no situation-specific error message that can be used - ErrorCodeUnknown = Register("errcode", ErrorDescriptor{ - Value: "UNKNOWN", - Message: "unknown error", - Description: `Generic error returned when the error does not have an - API classification.`, - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeUnsupported is returned when an operation is not supported. - ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{ - Value: "UNSUPPORTED", - Message: "The operation is unsupported.", - Description: `The operation was unsupported due to a missing - implementation or invalid set of parameters.`, - HTTPStatusCode: http.StatusMethodNotAllowed, - }) - - // ErrorCodeUnauthorized is returned if a request requires - // authentication. - ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{ - Value: "UNAUTHORIZED", - Message: "authentication required", - Description: `The access controller was unable to authenticate - the client. Often this will be accompanied by a - Www-Authenticate HTTP response header indicating how to - authenticate.`, - HTTPStatusCode: http.StatusUnauthorized, - }) - - // ErrorCodeDenied is returned if a client does not have sufficient - // permission to perform an action. - ErrorCodeDenied = Register("errcode", ErrorDescriptor{ - Value: "DENIED", - Message: "requested access to the resource is denied", - Description: `The access controller denied access for the - operation on a resource.`, - HTTPStatusCode: http.StatusForbidden, - }) - - // ErrorCodeUnavailable provides a common error to report unavailability - // of a service or endpoint. - ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{ - Value: "UNAVAILABLE", - Message: "service unavailable", - Description: "Returned when a service is not available", - HTTPStatusCode: http.StatusServiceUnavailable, - }) - - // ErrorCodeTooManyRequests is returned if a client attempts too many - // times to contact a service endpoint. - ErrorCodeTooManyRequests = Register("errcode", ErrorDescriptor{ - Value: "TOOMANYREQUESTS", - Message: "too many requests", - Description: `Returned when a client attempts to contact a - service too many times`, - HTTPStatusCode: http.StatusTooManyRequests, - }) -) - -var nextCode = 1000 -var registerLock sync.Mutex - -// Register will make the passed-in error known to the environment and -// return a new ErrorCode -func Register(group string, descriptor ErrorDescriptor) ErrorCode { - registerLock.Lock() - defer registerLock.Unlock() - - descriptor.Code = ErrorCode(nextCode) - - if _, ok := idToDescriptors[descriptor.Value]; ok { - panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value)) - } - if _, ok := errorCodeToDescriptors[descriptor.Code]; ok { - panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code)) - } - - groupToDescriptors[group] = append(groupToDescriptors[group], descriptor) - errorCodeToDescriptors[descriptor.Code] = descriptor - idToDescriptors[descriptor.Value] = descriptor - - nextCode++ - return descriptor.Code -} - -type byValue []ErrorDescriptor - -func (a byValue) Len() int { return len(a) } -func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value } - -// GetGroupNames returns the list of Error group names that are registered -func GetGroupNames() []string { - keys := []string{} - - for k := range groupToDescriptors { - keys = append(keys, k) - } - sort.Strings(keys) - return keys -} - -// GetErrorCodeGroup returns the named group of error descriptors -func GetErrorCodeGroup(name string) []ErrorDescriptor { - desc := groupToDescriptors[name] - sort.Sort(byValue(desc)) - return desc -} - -// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are -// registered, irrespective of what group they're in -func GetErrorAllDescriptors() []ErrorDescriptor { - result := []ErrorDescriptor{} - - for _, group := range GetGroupNames() { - result = append(result, GetErrorCodeGroup(group)...) - } - sort.Sort(byValue(result)) - return result -} diff --git a/ui/wasm/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go b/ui/wasm/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go deleted file mode 100644 index a9616c58ad5..00000000000 --- a/ui/wasm/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go +++ /dev/null @@ -1,1596 +0,0 @@ -package v2 - -import ( - "net/http" - "regexp" - - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/api/errcode" - "github.com/opencontainers/go-digest" -) - -var ( - nameParameterDescriptor = ParameterDescriptor{ - Name: "name", - Type: "string", - Format: reference.NameRegexp.String(), - Required: true, - Description: `Name of the target repository.`, - } - - referenceParameterDescriptor = ParameterDescriptor{ - Name: "reference", - Type: "string", - Format: reference.TagRegexp.String(), - Required: true, - Description: `Tag or digest of the target manifest.`, - } - - uuidParameterDescriptor = ParameterDescriptor{ - Name: "uuid", - Type: "opaque", - Required: true, - Description: "A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.", - } - - digestPathParameter = ParameterDescriptor{ - Name: "digest", - Type: "path", - Required: true, - Format: digest.DigestRegexp.String(), - Description: `Digest of desired blob.`, - } - - hostHeader = ParameterDescriptor{ - Name: "Host", - Type: "string", - Description: "Standard HTTP Host Header. Should be set to the registry host.", - Format: "", - Examples: []string{"registry-1.docker.io"}, - } - - authHeader = ParameterDescriptor{ - Name: "Authorization", - Type: "string", - Description: "An RFC7235 compliant authorization header.", - Format: " ", - Examples: []string{"Bearer dGhpcyBpcyBhIGZha2UgYmVhcmVyIHRva2VuIQ=="}, - } - - authChallengeHeader = ParameterDescriptor{ - Name: "WWW-Authenticate", - Type: "string", - Description: "An RFC7235 compliant authentication challenge header.", - Format: ` realm="", ..."`, - Examples: []string{ - `Bearer realm="https://auth.docker.com/", service="registry.docker.com", scopes="repository:library/ubuntu:pull"`, - }, - } - - contentLengthZeroHeader = ParameterDescriptor{ - Name: "Content-Length", - Description: "The `Content-Length` header must be zero and the body must be empty.", - Type: "integer", - Format: "0", - } - - dockerUploadUUIDHeader = ParameterDescriptor{ - Name: "Docker-Upload-UUID", - Description: "Identifies the docker upload uuid for the current request.", - Type: "uuid", - Format: "", - } - - digestHeader = ParameterDescriptor{ - Name: "Docker-Content-Digest", - Description: "Digest of the targeted content for the request.", - Type: "digest", - Format: "", - } - - linkHeader = ParameterDescriptor{ - Name: "Link", - Type: "link", - Description: "RFC5988 compliant rel='next' with URL to next result set, if available", - Format: `<?n=&last=>; rel="next"`, - } - - paginationParameters = []ParameterDescriptor{ - { - Name: "n", - Type: "integer", - Description: "Limit the number of entries in each response. It not present, all entries will be returned.", - Format: "", - Required: false, - }, - { - Name: "last", - Type: "string", - Description: "Result set will include values lexically after last.", - Format: "", - Required: false, - }, - } - - unauthorizedResponseDescriptor = ResponseDescriptor{ - Name: "Authentication Required", - StatusCode: http.StatusUnauthorized, - Description: "The client is not authenticated.", - Headers: []ParameterDescriptor{ - authChallengeHeader, - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnauthorized, - }, - } - - repositoryNotFoundResponseDescriptor = ResponseDescriptor{ - Name: "No Such Repository Error", - StatusCode: http.StatusNotFound, - Description: "The repository is not known to the registry.", - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - }, - } - - deniedResponseDescriptor = ResponseDescriptor{ - Name: "Access Denied", - StatusCode: http.StatusForbidden, - Description: "The client does not have required access to the repository.", - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeDenied, - }, - } - - tooManyRequestsDescriptor = ResponseDescriptor{ - Name: "Too Many Requests", - StatusCode: http.StatusTooManyRequests, - Description: "The client made too many requests within a time interval.", - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeTooManyRequests, - }, - } -) - -const ( - manifestBody = `{ - "name": , - "tag": , - "fsLayers": [ - { - "blobSum": "" - }, - ... - ] - ], - "history": , - "signature": -}` - - errorsBody = `{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -}` -) - -// APIDescriptor exports descriptions of the layout of the v2 registry API. -var APIDescriptor = struct { - // RouteDescriptors provides a list of the routes available in the API. - RouteDescriptors []RouteDescriptor -}{ - RouteDescriptors: routeDescriptors, -} - -// RouteDescriptor describes a route specified by name. -type RouteDescriptor struct { - // Name is the name of the route, as specified in RouteNameXXX exports. - // These names a should be considered a unique reference for a route. If - // the route is registered with gorilla, this is the name that will be - // used. - Name string - - // Path is a gorilla/mux-compatible regexp that can be used to match the - // route. For any incoming method and path, only one route descriptor - // should match. - Path string - - // Entity should be a short, human-readalbe description of the object - // targeted by the endpoint. - Entity string - - // Description should provide an accurate overview of the functionality - // provided by the route. - Description string - - // Methods should describe the various HTTP methods that may be used on - // this route, including request and response formats. - Methods []MethodDescriptor -} - -// MethodDescriptor provides a description of the requests that may be -// conducted with the target method. -type MethodDescriptor struct { - - // Method is an HTTP method, such as GET, PUT or POST. - Method string - - // Description should provide an overview of the functionality provided by - // the covered method, suitable for use in documentation. Use of markdown - // here is encouraged. - Description string - - // Requests is a slice of request descriptors enumerating how this - // endpoint may be used. - Requests []RequestDescriptor -} - -// RequestDescriptor covers a particular set of headers and parameters that -// can be carried out with the parent method. Its most helpful to have one -// RequestDescriptor per API use case. -type RequestDescriptor struct { - // Name provides a short identifier for the request, usable as a title or - // to provide quick context for the particular request. - Name string - - // Description should cover the requests purpose, covering any details for - // this particular use case. - Description string - - // Headers describes headers that must be used with the HTTP request. - Headers []ParameterDescriptor - - // PathParameters enumerate the parameterized path components for the - // given request, as defined in the route's regular expression. - PathParameters []ParameterDescriptor - - // QueryParameters provides a list of query parameters for the given - // request. - QueryParameters []ParameterDescriptor - - // Body describes the format of the request body. - Body BodyDescriptor - - // Successes enumerates the possible responses that are considered to be - // the result of a successful request. - Successes []ResponseDescriptor - - // Failures covers the possible failures from this particular request. - Failures []ResponseDescriptor -} - -// ResponseDescriptor describes the components of an API response. -type ResponseDescriptor struct { - // Name provides a short identifier for the response, usable as a title or - // to provide quick context for the particular response. - Name string - - // Description should provide a brief overview of the role of the - // response. - Description string - - // StatusCode specifies the status received by this particular response. - StatusCode int - - // Headers covers any headers that may be returned from the response. - Headers []ParameterDescriptor - - // Fields describes any fields that may be present in the response. - Fields []ParameterDescriptor - - // ErrorCodes enumerates the error codes that may be returned along with - // the response. - ErrorCodes []errcode.ErrorCode - - // Body describes the body of the response, if any. - Body BodyDescriptor -} - -// BodyDescriptor describes a request body and its expected content type. For -// the most part, it should be example json or some placeholder for body -// data in documentation. -type BodyDescriptor struct { - ContentType string - Format string -} - -// ParameterDescriptor describes the format of a request parameter, which may -// be a header, path parameter or query parameter. -type ParameterDescriptor struct { - // Name is the name of the parameter, either of the path component or - // query parameter. - Name string - - // Type specifies the type of the parameter, such as string, integer, etc. - Type string - - // Description provides a human-readable description of the parameter. - Description string - - // Required means the field is required when set. - Required bool - - // Format is a specifying the string format accepted by this parameter. - Format string - - // Regexp is a compiled regular expression that can be used to validate - // the contents of the parameter. - Regexp *regexp.Regexp - - // Examples provides multiple examples for the values that might be valid - // for this parameter. - Examples []string -} - -var routeDescriptors = []RouteDescriptor{ - { - Name: RouteNameBase, - Path: "/v2/", - Entity: "Base", - Description: `Base V2 API route. Typically, this can be used for lightweight version checks and to validate registry authentication.`, - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Check that the endpoint implements Docker Registry API V2.", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - Successes: []ResponseDescriptor{ - { - Description: "The API implements V2 protocol and is accessible.", - StatusCode: http.StatusOK, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "The registry does not implement the V2 API.", - StatusCode: http.StatusNotFound, - }, - unauthorizedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - }, - }, - { - Name: RouteNameTags, - Path: "/v2/{name:" + reference.NameRegexp.String() + "}/tags/list", - Entity: "Tags", - Description: "Retrieve information about tags.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Fetch the tags under the repository identified by `name`.", - Requests: []RequestDescriptor{ - { - Name: "Tags", - Description: "Return all tags for the repository", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - StatusCode: http.StatusOK, - Description: "A list of tags for the named repository.", - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: `{ - "name": , - "tags": [ - , - ... - ] -}`, - }, - }, - }, - Failures: []ResponseDescriptor{ - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - { - Name: "Tags Paginated", - Description: "Return a portion of the tags for the specified repository.", - PathParameters: []ParameterDescriptor{nameParameterDescriptor}, - QueryParameters: paginationParameters, - Successes: []ResponseDescriptor{ - { - StatusCode: http.StatusOK, - Description: "A list of tags for the named repository.", - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - linkHeader, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: `{ - "name": , - "tags": [ - , - ... - ], -}`, - }, - }, - }, - Failures: []ResponseDescriptor{ - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - }, - }, - { - Name: RouteNameManifest, - Path: "/v2/{name:" + reference.NameRegexp.String() + "}/manifests/{reference:" + reference.TagRegexp.String() + "|" + digest.DigestRegexp.String() + "}", - Entity: "Manifest", - Description: "Create, update, delete and retrieve manifests.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - referenceParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - Description: "The manifest identified by `name` and `reference`. The contents can be used to identify and resolve resources required to run the specified image.", - StatusCode: http.StatusOK, - Headers: []ParameterDescriptor{ - digestHeader, - }, - Body: BodyDescriptor{ - ContentType: "", - Format: manifestBody, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "The name or reference was invalid.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeTagInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - { - Method: "PUT", - Description: "Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest.", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - referenceParameterDescriptor, - }, - Body: BodyDescriptor{ - ContentType: "", - Format: manifestBody, - }, - Successes: []ResponseDescriptor{ - { - Description: "The manifest has been accepted by the registry and is stored under the specified `name` and `tag`.", - StatusCode: http.StatusCreated, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Description: "The canonical location url of the uploaded manifest.", - Format: "", - }, - contentLengthZeroHeader, - digestHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Manifest", - Description: "The received manifest was invalid in some way, as described by the error codes. The client should resolve the issue and retry the request.", - StatusCode: http.StatusBadRequest, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeTagInvalid, - ErrorCodeManifestInvalid, - ErrorCodeManifestUnverified, - ErrorCodeBlobUnknown, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - { - Name: "Missing Layer(s)", - Description: "One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: `{ - "errors:" [{ - "code": "BLOB_UNKNOWN", - "message": "blob unknown to registry", - "detail": { - "digest": "" - } - }, - ... - ] -}`, - }, - }, - { - Name: "Not allowed", - Description: "Manifest put is not allowed because the registry is configured as a pull-through cache or for some other reason", - StatusCode: http.StatusMethodNotAllowed, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnsupported, - }, - }, - }, - }, - }, - }, - { - Method: "DELETE", - Description: "Delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by `digest`.", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - referenceParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - StatusCode: http.StatusAccepted, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Reference", - Description: "The specified `name` or `reference` were invalid and the delete was unable to proceed.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeTagInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - { - Name: "Unknown Manifest", - Description: "The specified `name` or `reference` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - ErrorCodeManifestUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Name: "Not allowed", - Description: "Manifest delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled.", - StatusCode: http.StatusMethodNotAllowed, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnsupported, - }, - }, - }, - }, - }, - }, - }, - }, - - { - Name: RouteNameBlob, - Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/{digest:" + digest.DigestRegexp.String() + "}", - Entity: "Blob", - Description: "Operations on blobs identified by `name` and `digest`. Used to fetch or delete layers by digest.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", - Requests: []RequestDescriptor{ - { - Name: "Fetch Blob", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - digestPathParameter, - }, - Successes: []ResponseDescriptor{ - { - Description: "The blob identified by `digest` is available. The blob content will be present in the body of the request.", - StatusCode: http.StatusOK, - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "The length of the requested blob content.", - Format: "", - }, - digestHeader, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - }, - { - Description: "The blob identified by `digest` is available at the provided location.", - StatusCode: http.StatusTemporaryRedirect, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Description: "The location where the layer should be accessible.", - Format: "", - }, - digestHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeDigestInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", - StatusCode: http.StatusNotFound, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - ErrorCodeBlobUnknown, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - { - Name: "Fetch Blob Part", - Description: "This endpoint may also support RFC7233 compliant range requests. Support can be detected by issuing a HEAD request. If the header `Accept-Range: bytes` is returned, range requests can be used to fetch partial content.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - { - Name: "Range", - Type: "string", - Description: "HTTP Range header specifying blob chunk.", - Format: "bytes=-", - }, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - digestPathParameter, - }, - Successes: []ResponseDescriptor{ - { - Description: "The blob identified by `digest` is available. The specified chunk of blob content will be present in the body of the request.", - StatusCode: http.StatusPartialContent, - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "The length of the requested blob chunk.", - Format: "", - }, - { - Name: "Content-Range", - Type: "byte range", - Description: "Content range of blob chunk.", - Format: "bytes -/", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeDigestInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - ErrorCodeBlobUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The range specification cannot be satisfied for the requested content. This can happen when the range is not formatted correctly or if the range is outside of the valid size of the content.", - StatusCode: http.StatusRequestedRangeNotSatisfiable, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - { - Method: "DELETE", - Description: "Delete the blob identified by `name` and `digest`", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - digestPathParameter, - }, - Successes: []ResponseDescriptor{ - { - StatusCode: http.StatusAccepted, - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "0", - Format: "0", - }, - digestHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Digest", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - }, - }, - { - Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", - StatusCode: http.StatusNotFound, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - ErrorCodeBlobUnknown, - }, - }, - { - Description: "Blob delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled", - StatusCode: http.StatusMethodNotAllowed, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnsupported, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - - // TODO(stevvooe): We may want to add a PUT request here to - // kickoff an upload of a blob, integrated with the blob upload - // API. - }, - }, - - { - Name: RouteNameBlobUpload, - Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/", - Entity: "Initiate Blob Upload", - Description: "Initiate a blob upload. This endpoint can be used to create resumable uploads or monolithic uploads.", - Methods: []MethodDescriptor{ - { - Method: "POST", - Description: "Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request.", - Requests: []RequestDescriptor{ - { - Name: "Initiate Monolithic Blob Upload", - Description: "Upload a blob identified by the `digest` parameter in single request. This upload will not be resumable unless a recoverable error is returned.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - { - Name: "Content-Length", - Type: "integer", - Format: "", - }, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - }, - QueryParameters: []ParameterDescriptor{ - { - Name: "digest", - Type: "query", - Format: "", - Regexp: digest.DigestRegexp, - Description: `Digest of uploaded blob. If present, the upload will be completed, in a single request, with contents of the request body as the resulting blob.`, - }, - }, - Body: BodyDescriptor{ - ContentType: "application/octect-stream", - Format: "", - }, - Successes: []ResponseDescriptor{ - { - Description: "The blob has been created in the registry and is available at the provided location.", - StatusCode: http.StatusCreated, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Digest", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - }, - }, - { - Name: "Not allowed", - Description: "Blob upload is not allowed because the registry is configured as a pull-through cache or for some other reason", - StatusCode: http.StatusMethodNotAllowed, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnsupported, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - { - Name: "Initiate Resumable Blob Upload", - Description: "Initiate a resumable blob upload with an empty request body.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - contentLengthZeroHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - Description: "The upload has been created. The `Location` header must be used to complete the upload. The response should be identical to a `GET` request on the contents of the returned `Location` header.", - StatusCode: http.StatusAccepted, - Headers: []ParameterDescriptor{ - contentLengthZeroHeader, - { - Name: "Location", - Type: "url", - Format: "/v2//blobs/uploads/", - Description: "The location of the created upload. Clients should use the contents verbatim to complete the upload, adding parameters where required.", - }, - { - Name: "Range", - Format: "0-0", - Description: "Range header indicating the progress of the upload. When starting an upload, it will return an empty range, since no content has been received.", - }, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Digest", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - { - Name: "Mount Blob", - Description: "Mount a blob identified by the `mount` parameter from another repository.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - contentLengthZeroHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - }, - QueryParameters: []ParameterDescriptor{ - { - Name: "mount", - Type: "query", - Format: "", - Regexp: digest.DigestRegexp, - Description: `Digest of blob to mount from the source repository.`, - }, - { - Name: "from", - Type: "query", - Format: "", - Regexp: reference.NameRegexp, - Description: `Name of the source repository.`, - }, - }, - Successes: []ResponseDescriptor{ - { - Description: "The blob has been mounted in the repository and is available at the provided location.", - StatusCode: http.StatusCreated, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Digest", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - }, - }, - { - Name: "Not allowed", - Description: "Blob mount is not allowed because the registry is configured as a pull-through cache or for some other reason", - StatusCode: http.StatusMethodNotAllowed, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnsupported, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - }, - }, - - { - Name: RouteNameBlobUploadChunk, - Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/{uuid:[a-zA-Z0-9-_.=]+}", - Entity: "Blob Upload", - Description: "Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload.", - Requests: []RequestDescriptor{ - { - Description: "Retrieve the progress of the current upload, as reported by the `Range` header.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - Name: "Upload Progress", - Description: "The upload is known and in progress. The last received offset is available in the `Range` header.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - { - Name: "Range", - Type: "header", - Format: "0-", - Description: "Range indicating the current progress of the upload.", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was an error processing the upload and it must be restarted.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The upload is unknown to the registry. The upload must be restarted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - { - Method: "PATCH", - Description: "Upload a chunk of data for the specified upload.", - Requests: []RequestDescriptor{ - { - Name: "Stream upload", - Description: "Upload a stream of data to upload without completing the upload.", - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - Successes: []ResponseDescriptor{ - { - Name: "Data Accepted", - Description: "The stream of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "/v2//blobs/uploads/", - Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", - }, - { - Name: "Range", - Type: "header", - Format: "0-", - Description: "Range indicating the current progress of the upload.", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was an error processing the upload and it must be restarted.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The upload is unknown to the registry. The upload must be restarted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - { - Name: "Chunked upload", - Description: "Upload a chunk of data to specified upload without completing the upload. The data will be uploaded to the specified Content Range.", - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - { - Name: "Content-Range", - Type: "header", - Format: "-", - Required: true, - Description: "Range of bytes identifying the desired block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header.", - }, - { - Name: "Content-Length", - Type: "integer", - Format: "", - Description: "Length of the chunk being uploaded, corresponding the length of the request body.", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - Successes: []ResponseDescriptor{ - { - Name: "Chunk Accepted", - Description: "The chunk of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "/v2//blobs/uploads/", - Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", - }, - { - Name: "Range", - Type: "header", - Format: "0-", - Description: "Range indicating the current progress of the upload.", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was an error processing the upload and it must be restarted.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The upload is unknown to the registry. The upload must be restarted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid.", - StatusCode: http.StatusRequestedRangeNotSatisfiable, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - { - Method: "PUT", - Description: "Complete the upload specified by `uuid`, optionally appending the body as the final chunk.", - Requests: []RequestDescriptor{ - { - Description: "Complete the upload, providing all the data in the body, if necessary. A request without a body will just complete the upload with previously uploaded content.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - { - Name: "Content-Length", - Type: "integer", - Format: "", - Description: "Length of the data being uploaded, corresponding to the length of the request body. May be zero if no data is provided.", - }, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - QueryParameters: []ParameterDescriptor{ - { - Name: "digest", - Type: "string", - Format: "", - Regexp: digest.DigestRegexp, - Required: true, - Description: `Digest of uploaded blob.`, - }, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - Successes: []ResponseDescriptor{ - { - Name: "Upload Complete", - Description: "The upload has been completed and accepted by the registry. The canonical location will be available in the `Location` header.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "", - Description: "The canonical location of the blob for retrieval", - }, - { - Name: "Content-Range", - Type: "header", - Format: "-", - Description: "Range of bytes identifying the desired block of content represented by the body. Start must match the end of offset retrieved via status check. Note that this is a non-standard use of the `Content-Range` header.", - }, - contentLengthZeroHeader, - digestHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was an error processing the upload and it must be restarted.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - errcode.ErrorCodeUnsupported, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The upload is unknown to the registry. The upload must be restarted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - { - Method: "DELETE", - Description: "Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout.", - Requests: []RequestDescriptor{ - { - Description: "Cancel the upload specified by `uuid`.", - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - contentLengthZeroHeader, - }, - Successes: []ResponseDescriptor{ - { - Name: "Upload Deleted", - Description: "The upload has been successfully deleted.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - contentLengthZeroHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "An error was encountered processing the delete. The client may ignore this error.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The upload is unknown to the registry. The client may ignore this error and assume the upload has been deleted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - }, - }, - { - Name: RouteNameCatalog, - Path: "/v2/_catalog", - Entity: "Catalog", - Description: "List a set of available repositories in the local registry cluster. Does not provide any indication of what may be available upstream. Applications can only determine if a repository is available but not if it is not available.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Retrieve a sorted, json list of repositories available in the registry.", - Requests: []RequestDescriptor{ - { - Name: "Catalog Fetch", - Description: "Request an unabridged list of repositories available. The implementation may impose a maximum limit and return a partial set with pagination links.", - Successes: []ResponseDescriptor{ - { - Description: "Returns the unabridged list of repositories as a json response.", - StatusCode: http.StatusOK, - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: `{ - "repositories": [ - , - ... - ] -}`, - }, - }, - }, - }, - { - Name: "Catalog Fetch Paginated", - Description: "Return the specified portion of repositories.", - QueryParameters: paginationParameters, - Successes: []ResponseDescriptor{ - { - StatusCode: http.StatusOK, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: `{ - "repositories": [ - , - ... - ] - "next": "?last=&n=" -}`, - }, - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - linkHeader, - }, - }, - }, - }, - }, - }, - }, - }, -} - -var routeDescriptorsMap map[string]RouteDescriptor - -func init() { - routeDescriptorsMap = make(map[string]RouteDescriptor, len(routeDescriptors)) - - for _, descriptor := range routeDescriptors { - routeDescriptorsMap[descriptor.Name] = descriptor - } -} diff --git a/ui/wasm/vendor/github.com/docker/distribution/registry/api/v2/doc.go b/ui/wasm/vendor/github.com/docker/distribution/registry/api/v2/doc.go deleted file mode 100644 index cde0119594d..00000000000 --- a/ui/wasm/vendor/github.com/docker/distribution/registry/api/v2/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Package v2 describes routes, urls and the error codes used in the Docker -// Registry JSON HTTP API V2. In addition to declarations, descriptors are -// provided for routes and error codes that can be used for implementation and -// automatically generating documentation. -// -// Definitions here are considered to be locked down for the V2 registry api. -// Any changes must be considered carefully and should not proceed without a -// change proposal in docker core. -package v2 diff --git a/ui/wasm/vendor/github.com/docker/distribution/registry/api/v2/errors.go b/ui/wasm/vendor/github.com/docker/distribution/registry/api/v2/errors.go deleted file mode 100644 index 97d6923aa03..00000000000 --- a/ui/wasm/vendor/github.com/docker/distribution/registry/api/v2/errors.go +++ /dev/null @@ -1,136 +0,0 @@ -package v2 - -import ( - "net/http" - - "github.com/docker/distribution/registry/api/errcode" -) - -const errGroup = "registry.api.v2" - -var ( - // ErrorCodeDigestInvalid is returned when uploading a blob if the - // provided digest does not match the blob contents. - ErrorCodeDigestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "DIGEST_INVALID", - Message: "provided digest did not match uploaded content", - Description: `When a blob is uploaded, the registry will check that - the content matches the digest provided by the client. The error may - include a detail structure with the key "digest", including the - invalid digest string. This error may also be returned when a manifest - includes an invalid layer digest.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeSizeInvalid is returned when uploading a blob if the provided - ErrorCodeSizeInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "SIZE_INVALID", - Message: "provided length did not match content length", - Description: `When a layer is uploaded, the provided size will be - checked against the uploaded content. If they do not match, this error - will be returned.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeNameInvalid is returned when the name in the manifest does not - // match the provided name. - ErrorCodeNameInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NAME_INVALID", - Message: "invalid repository name", - Description: `Invalid repository name encountered either during - manifest validation or any API operation.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeTagInvalid is returned when the tag in the manifest does not - // match the provided tag. - ErrorCodeTagInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "TAG_INVALID", - Message: "manifest tag did not match URI", - Description: `During a manifest upload, if the tag in the manifest - does not match the uri tag, this error will be returned.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeNameUnknown when the repository name is not known. - ErrorCodeNameUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NAME_UNKNOWN", - Message: "repository name not known to registry", - Description: `This is returned if the name used during an operation is - unknown to the registry.`, - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeManifestUnknown returned when image manifest is unknown. - ErrorCodeManifestUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MANIFEST_UNKNOWN", - Message: "manifest unknown", - Description: `This error is returned when the manifest, identified by - name and tag is unknown to the repository.`, - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeManifestInvalid returned when an image manifest is invalid, - // typically during a PUT operation. This error encompasses all errors - // encountered during manifest validation that aren't signature errors. - ErrorCodeManifestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MANIFEST_INVALID", - Message: "manifest invalid", - Description: `During upload, manifests undergo several checks ensuring - validity. If those checks fail, this error may be returned, unless a - more specific error is included. The detail will contain information - the failed validation.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeManifestUnverified is returned when the manifest fails - // signature verification. - ErrorCodeManifestUnverified = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MANIFEST_UNVERIFIED", - Message: "manifest failed signature verification", - Description: `During manifest upload, if the manifest fails signature - verification, this error will be returned.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeManifestBlobUnknown is returned when a manifest blob is - // unknown to the registry. - ErrorCodeManifestBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MANIFEST_BLOB_UNKNOWN", - Message: "blob unknown to registry", - Description: `This error may be returned when a manifest blob is - unknown to the registry.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeBlobUnknown is returned when a blob is unknown to the - // registry. This can happen when the manifest references a nonexistent - // layer or the result is not found by a blob fetch. - ErrorCodeBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "BLOB_UNKNOWN", - Message: "blob unknown to registry", - Description: `This error may be returned when a blob is unknown to the - registry in a specified repository. This can be returned with a - standard get or if a manifest references an unknown layer during - upload.`, - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeBlobUploadUnknown is returned when an upload is unknown. - ErrorCodeBlobUploadUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "BLOB_UPLOAD_UNKNOWN", - Message: "blob upload unknown to registry", - Description: `If a blob upload has been cancelled or was never - started, this error code may be returned.`, - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeBlobUploadInvalid is returned when an upload is invalid. - ErrorCodeBlobUploadInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "BLOB_UPLOAD_INVALID", - Message: "blob upload invalid", - Description: `The blob upload encountered an error and can no - longer proceed.`, - HTTPStatusCode: http.StatusNotFound, - }) -) diff --git a/ui/wasm/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go b/ui/wasm/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go deleted file mode 100644 index 9bc41a3a64f..00000000000 --- a/ui/wasm/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go +++ /dev/null @@ -1,161 +0,0 @@ -package v2 - -import ( - "fmt" - "regexp" - "strings" - "unicode" -) - -var ( - // according to rfc7230 - reToken = regexp.MustCompile(`^[^"(),/:;<=>?@[\]{}[:space:][:cntrl:]]+`) - reQuotedValue = regexp.MustCompile(`^[^\\"]+`) - reEscapedCharacter = regexp.MustCompile(`^[[:blank:][:graph:]]`) -) - -// parseForwardedHeader is a benevolent parser of Forwarded header defined in rfc7239. The header contains -// a comma-separated list of forwarding key-value pairs. Each list element is set by single proxy. The -// function parses only the first element of the list, which is set by the very first proxy. It returns a map -// of corresponding key-value pairs and an unparsed slice of the input string. -// -// Examples of Forwarded header values: -// -// 1. Forwarded: For=192.0.2.43; Proto=https,For="[2001:db8:cafe::17]",For=unknown -// 2. Forwarded: for="192.0.2.43:443"; host="registry.example.org", for="10.10.05.40:80" -// -// The first will be parsed into {"for": "192.0.2.43", "proto": "https"} while the second into -// {"for": "192.0.2.43:443", "host": "registry.example.org"}. -func parseForwardedHeader(forwarded string) (map[string]string, string, error) { - // Following are states of forwarded header parser. Any state could transition to a failure. - const ( - // terminating state; can transition to Parameter - stateElement = iota - // terminating state; can transition to KeyValueDelimiter - stateParameter - // can transition to Value - stateKeyValueDelimiter - // can transition to one of { QuotedValue, PairEnd } - stateValue - // can transition to one of { EscapedCharacter, PairEnd } - stateQuotedValue - // can transition to one of { QuotedValue } - stateEscapedCharacter - // terminating state; can transition to one of { Parameter, Element } - statePairEnd - ) - - var ( - parameter string - value string - parse = forwarded[:] - res = map[string]string{} - state = stateElement - ) - -Loop: - for { - // skip spaces unless in quoted value - if state != stateQuotedValue && state != stateEscapedCharacter { - parse = strings.TrimLeftFunc(parse, unicode.IsSpace) - } - - if len(parse) == 0 { - if state != stateElement && state != statePairEnd && state != stateParameter { - return nil, parse, fmt.Errorf("unexpected end of input") - } - // terminating - break - } - - switch state { - // terminate at list element delimiter - case stateElement: - if parse[0] == ',' { - parse = parse[1:] - break Loop - } - state = stateParameter - - // parse parameter (the key of key-value pair) - case stateParameter: - match := reToken.FindString(parse) - if len(match) == 0 { - return nil, parse, fmt.Errorf("failed to parse token at position %d", len(forwarded)-len(parse)) - } - parameter = strings.ToLower(match) - parse = parse[len(match):] - state = stateKeyValueDelimiter - - // parse '=' - case stateKeyValueDelimiter: - if parse[0] != '=' { - return nil, parse, fmt.Errorf("expected '=', not '%c' at position %d", parse[0], len(forwarded)-len(parse)) - } - parse = parse[1:] - state = stateValue - - // parse value or quoted value - case stateValue: - if parse[0] == '"' { - parse = parse[1:] - state = stateQuotedValue - } else { - value = reToken.FindString(parse) - if len(value) == 0 { - return nil, parse, fmt.Errorf("failed to parse value at position %d", len(forwarded)-len(parse)) - } - if _, exists := res[parameter]; exists { - return nil, parse, fmt.Errorf("duplicate parameter %q at position %d", parameter, len(forwarded)-len(parse)) - } - res[parameter] = value - parse = parse[len(value):] - value = "" - state = statePairEnd - } - - // parse a part of quoted value until the first backslash - case stateQuotedValue: - match := reQuotedValue.FindString(parse) - value += match - parse = parse[len(match):] - switch { - case len(parse) == 0: - return nil, parse, fmt.Errorf("unterminated quoted string") - case parse[0] == '"': - res[parameter] = value - value = "" - parse = parse[1:] - state = statePairEnd - case parse[0] == '\\': - parse = parse[1:] - state = stateEscapedCharacter - } - - // parse escaped character in a quoted string, ignore the backslash - // transition back to QuotedValue state - case stateEscapedCharacter: - c := reEscapedCharacter.FindString(parse) - if len(c) == 0 { - return nil, parse, fmt.Errorf("invalid escape sequence at position %d", len(forwarded)-len(parse)-1) - } - value += c - parse = parse[1:] - state = stateQuotedValue - - // expect either a new key-value pair, new list or end of input - case statePairEnd: - switch parse[0] { - case ';': - parse = parse[1:] - state = stateParameter - case ',': - state = stateElement - default: - return nil, parse, fmt.Errorf("expected ',' or ';', not %c at position %d", parse[0], len(forwarded)-len(parse)) - } - } - } - - return res, parse, nil -} diff --git a/ui/wasm/vendor/github.com/docker/distribution/registry/api/v2/routes.go b/ui/wasm/vendor/github.com/docker/distribution/registry/api/v2/routes.go deleted file mode 100644 index 9612ac2e5a5..00000000000 --- a/ui/wasm/vendor/github.com/docker/distribution/registry/api/v2/routes.go +++ /dev/null @@ -1,40 +0,0 @@ -package v2 - -import "github.com/gorilla/mux" - -// The following are definitions of the name under which all V2 routes are -// registered. These symbols can be used to look up a route based on the name. -const ( - RouteNameBase = "base" - RouteNameManifest = "manifest" - RouteNameTags = "tags" - RouteNameBlob = "blob" - RouteNameBlobUpload = "blob-upload" - RouteNameBlobUploadChunk = "blob-upload-chunk" - RouteNameCatalog = "catalog" -) - -// Router builds a gorilla router with named routes for the various API -// methods. This can be used directly by both server implementations and -// clients. -func Router() *mux.Router { - return RouterWithPrefix("") -} - -// RouterWithPrefix builds a gorilla router with a configured prefix -// on all routes. -func RouterWithPrefix(prefix string) *mux.Router { - rootRouter := mux.NewRouter() - router := rootRouter - if prefix != "" { - router = router.PathPrefix(prefix).Subrouter() - } - - router.StrictSlash(true) - - for _, descriptor := range routeDescriptors { - router.Path(descriptor.Path).Name(descriptor.Name) - } - - return rootRouter -} diff --git a/ui/wasm/vendor/github.com/docker/distribution/registry/api/v2/urls.go b/ui/wasm/vendor/github.com/docker/distribution/registry/api/v2/urls.go deleted file mode 100644 index 1337bdb1276..00000000000 --- a/ui/wasm/vendor/github.com/docker/distribution/registry/api/v2/urls.go +++ /dev/null @@ -1,266 +0,0 @@ -package v2 - -import ( - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/docker/distribution/reference" - "github.com/gorilla/mux" -) - -// URLBuilder creates registry API urls from a single base endpoint. It can be -// used to create urls for use in a registry client or server. -// -// All urls will be created from the given base, including the api version. -// For example, if a root of "/foo/" is provided, urls generated will be fall -// under "/foo/v2/...". Most application will only provide a schema, host and -// port, such as "https://localhost:5000/". -type URLBuilder struct { - root *url.URL // url root (ie http://localhost/) - router *mux.Router - relative bool -} - -// NewURLBuilder creates a URLBuilder with provided root url object. -func NewURLBuilder(root *url.URL, relative bool) *URLBuilder { - return &URLBuilder{ - root: root, - router: Router(), - relative: relative, - } -} - -// NewURLBuilderFromString workes identically to NewURLBuilder except it takes -// a string argument for the root, returning an error if it is not a valid -// url. -func NewURLBuilderFromString(root string, relative bool) (*URLBuilder, error) { - u, err := url.Parse(root) - if err != nil { - return nil, err - } - - return NewURLBuilder(u, relative), nil -} - -// NewURLBuilderFromRequest uses information from an *http.Request to -// construct the root url. -func NewURLBuilderFromRequest(r *http.Request, relative bool) *URLBuilder { - var ( - scheme = "http" - host = r.Host - ) - - if r.TLS != nil { - scheme = "https" - } else if len(r.URL.Scheme) > 0 { - scheme = r.URL.Scheme - } - - // Handle fowarded headers - // Prefer "Forwarded" header as defined by rfc7239 if given - // see https://tools.ietf.org/html/rfc7239 - if forwarded := r.Header.Get("Forwarded"); len(forwarded) > 0 { - forwardedHeader, _, err := parseForwardedHeader(forwarded) - if err == nil { - if fproto := forwardedHeader["proto"]; len(fproto) > 0 { - scheme = fproto - } - if fhost := forwardedHeader["host"]; len(fhost) > 0 { - host = fhost - } - } - } else { - if forwardedProto := r.Header.Get("X-Forwarded-Proto"); len(forwardedProto) > 0 { - scheme = forwardedProto - } - if forwardedHost := r.Header.Get("X-Forwarded-Host"); len(forwardedHost) > 0 { - // According to the Apache mod_proxy docs, X-Forwarded-Host can be a - // comma-separated list of hosts, to which each proxy appends the - // requested host. We want to grab the first from this comma-separated - // list. - hosts := strings.SplitN(forwardedHost, ",", 2) - host = strings.TrimSpace(hosts[0]) - } - } - - basePath := routeDescriptorsMap[RouteNameBase].Path - - requestPath := r.URL.Path - index := strings.Index(requestPath, basePath) - - u := &url.URL{ - Scheme: scheme, - Host: host, - } - - if index > 0 { - // N.B. index+1 is important because we want to include the trailing / - u.Path = requestPath[0 : index+1] - } - - return NewURLBuilder(u, relative) -} - -// BuildBaseURL constructs a base url for the API, typically just "/v2/". -func (ub *URLBuilder) BuildBaseURL() (string, error) { - route := ub.cloneRoute(RouteNameBase) - - baseURL, err := route.URL() - if err != nil { - return "", err - } - - return baseURL.String(), nil -} - -// BuildCatalogURL constructs a url get a catalog of repositories -func (ub *URLBuilder) BuildCatalogURL(values ...url.Values) (string, error) { - route := ub.cloneRoute(RouteNameCatalog) - - catalogURL, err := route.URL() - if err != nil { - return "", err - } - - return appendValuesURL(catalogURL, values...).String(), nil -} - -// BuildTagsURL constructs a url to list the tags in the named repository. -func (ub *URLBuilder) BuildTagsURL(name reference.Named) (string, error) { - route := ub.cloneRoute(RouteNameTags) - - tagsURL, err := route.URL("name", name.Name()) - if err != nil { - return "", err - } - - return tagsURL.String(), nil -} - -// BuildManifestURL constructs a url for the manifest identified by name and -// reference. The argument reference may be either a tag or digest. -func (ub *URLBuilder) BuildManifestURL(ref reference.Named) (string, error) { - route := ub.cloneRoute(RouteNameManifest) - - tagOrDigest := "" - switch v := ref.(type) { - case reference.Tagged: - tagOrDigest = v.Tag() - case reference.Digested: - tagOrDigest = v.Digest().String() - default: - return "", fmt.Errorf("reference must have a tag or digest") - } - - manifestURL, err := route.URL("name", ref.Name(), "reference", tagOrDigest) - if err != nil { - return "", err - } - - return manifestURL.String(), nil -} - -// BuildBlobURL constructs the url for the blob identified by name and dgst. -func (ub *URLBuilder) BuildBlobURL(ref reference.Canonical) (string, error) { - route := ub.cloneRoute(RouteNameBlob) - - layerURL, err := route.URL("name", ref.Name(), "digest", ref.Digest().String()) - if err != nil { - return "", err - } - - return layerURL.String(), nil -} - -// BuildBlobUploadURL constructs a url to begin a blob upload in the -// repository identified by name. -func (ub *URLBuilder) BuildBlobUploadURL(name reference.Named, values ...url.Values) (string, error) { - route := ub.cloneRoute(RouteNameBlobUpload) - - uploadURL, err := route.URL("name", name.Name()) - if err != nil { - return "", err - } - - return appendValuesURL(uploadURL, values...).String(), nil -} - -// BuildBlobUploadChunkURL constructs a url for the upload identified by uuid, -// including any url values. This should generally not be used by clients, as -// this url is provided by server implementations during the blob upload -// process. -func (ub *URLBuilder) BuildBlobUploadChunkURL(name reference.Named, uuid string, values ...url.Values) (string, error) { - route := ub.cloneRoute(RouteNameBlobUploadChunk) - - uploadURL, err := route.URL("name", name.Name(), "uuid", uuid) - if err != nil { - return "", err - } - - return appendValuesURL(uploadURL, values...).String(), nil -} - -// clondedRoute returns a clone of the named route from the router. Routes -// must be cloned to avoid modifying them during url generation. -func (ub *URLBuilder) cloneRoute(name string) clonedRoute { - route := new(mux.Route) - root := new(url.URL) - - *route = *ub.router.GetRoute(name) // clone the route - *root = *ub.root - - return clonedRoute{Route: route, root: root, relative: ub.relative} -} - -type clonedRoute struct { - *mux.Route - root *url.URL - relative bool -} - -func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) { - routeURL, err := cr.Route.URL(pairs...) - if err != nil { - return nil, err - } - - if cr.relative { - return routeURL, nil - } - - if routeURL.Scheme == "" && routeURL.User == nil && routeURL.Host == "" { - routeURL.Path = routeURL.Path[1:] - } - - url := cr.root.ResolveReference(routeURL) - url.Scheme = cr.root.Scheme - return url, nil -} - -// appendValuesURL appends the parameters to the url. -func appendValuesURL(u *url.URL, values ...url.Values) *url.URL { - merged := u.Query() - - for _, v := range values { - for k, vv := range v { - merged[k] = append(merged[k], vv...) - } - } - - u.RawQuery = merged.Encode() - return u -} - -// appendValues appends the parameters to the url. Panics if the string is not -// a url. -func appendValues(u string, values ...url.Values) string { - up, err := url.Parse(u) - - if err != nil { - panic(err) // should never happen - } - - return appendValuesURL(up, values...).String() -} diff --git a/ui/wasm/vendor/github.com/docker/distribution/registry/client/auth/api_version.go b/ui/wasm/vendor/github.com/docker/distribution/registry/client/auth/api_version.go deleted file mode 100644 index 7d8f1d95768..00000000000 --- a/ui/wasm/vendor/github.com/docker/distribution/registry/client/auth/api_version.go +++ /dev/null @@ -1,58 +0,0 @@ -package auth - -import ( - "net/http" - "strings" -) - -// APIVersion represents a version of an API including its -// type and version number. -type APIVersion struct { - // Type refers to the name of a specific API specification - // such as "registry" - Type string - - // Version is the version of the API specification implemented, - // This may omit the revision number and only include - // the major and minor version, such as "2.0" - Version string -} - -// String returns the string formatted API Version -func (v APIVersion) String() string { - return v.Type + "/" + v.Version -} - -// APIVersions gets the API versions out of an HTTP response using the provided -// version header as the key for the HTTP header. -func APIVersions(resp *http.Response, versionHeader string) []APIVersion { - versions := []APIVersion{} - if versionHeader != "" { - for _, supportedVersions := range resp.Header[http.CanonicalHeaderKey(versionHeader)] { - for _, version := range strings.Fields(supportedVersions) { - versions = append(versions, ParseAPIVersion(version)) - } - } - } - return versions -} - -// ParseAPIVersion parses an API version string into an APIVersion -// Format (Expected, not enforced): -// API version string = '/' -// API type = [a-z][a-z0-9]* -// API version = [0-9]+(\.[0-9]+)? -// TODO(dmcgowan): Enforce format, add error condition, remove unknown type -func ParseAPIVersion(versionStr string) APIVersion { - idx := strings.IndexRune(versionStr, '/') - if idx == -1 { - return APIVersion{ - Type: "unknown", - Version: versionStr, - } - } - return APIVersion{ - Type: strings.ToLower(versionStr[:idx]), - Version: versionStr[idx+1:], - } -} diff --git a/ui/wasm/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go b/ui/wasm/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go deleted file mode 100644 index 2c3ebe16532..00000000000 --- a/ui/wasm/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go +++ /dev/null @@ -1,27 +0,0 @@ -package challenge - -import ( - "net/url" - "strings" -) - -// FROM: https://golang.org/src/net/http/http.go -// Given a string of the form "host", "host:port", or "[ipv6::address]:port", -// return true if the string includes a port. -func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } - -// FROM: http://golang.org/src/net/http/transport.go -var portMap = map[string]string{ - "http": "80", - "https": "443", -} - -// canonicalAddr returns url.Host but always with a ":port" suffix -// FROM: http://golang.org/src/net/http/transport.go -func canonicalAddr(url *url.URL) string { - addr := url.Host - if !hasPort(addr) { - return addr + ":" + portMap[url.Scheme] - } - return addr -} diff --git a/ui/wasm/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go b/ui/wasm/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go deleted file mode 100644 index 6e3f1ccc410..00000000000 --- a/ui/wasm/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go +++ /dev/null @@ -1,237 +0,0 @@ -package challenge - -import ( - "fmt" - "net/http" - "net/url" - "strings" - "sync" -) - -// Challenge carries information from a WWW-Authenticate response header. -// See RFC 2617. -type Challenge struct { - // Scheme is the auth-scheme according to RFC 2617 - Scheme string - - // Parameters are the auth-params according to RFC 2617 - Parameters map[string]string -} - -// Manager manages the challenges for endpoints. -// The challenges are pulled out of HTTP responses. Only -// responses which expect challenges should be added to -// the manager, since a non-unauthorized request will be -// viewed as not requiring challenges. -type Manager interface { - // GetChallenges returns the challenges for the given - // endpoint URL. - GetChallenges(endpoint url.URL) ([]Challenge, error) - - // AddResponse adds the response to the challenge - // manager. The challenges will be parsed out of - // the WWW-Authenicate headers and added to the - // URL which was produced the response. If the - // response was authorized, any challenges for the - // endpoint will be cleared. - AddResponse(resp *http.Response) error -} - -// NewSimpleManager returns an instance of -// Manger which only maps endpoints to challenges -// based on the responses which have been added the -// manager. The simple manager will make no attempt to -// perform requests on the endpoints or cache the responses -// to a backend. -func NewSimpleManager() Manager { - return &simpleManager{ - Challenges: make(map[string][]Challenge), - } -} - -type simpleManager struct { - sync.RWMutex - Challenges map[string][]Challenge -} - -func normalizeURL(endpoint *url.URL) { - endpoint.Host = strings.ToLower(endpoint.Host) - endpoint.Host = canonicalAddr(endpoint) -} - -func (m *simpleManager) GetChallenges(endpoint url.URL) ([]Challenge, error) { - normalizeURL(&endpoint) - - m.RLock() - defer m.RUnlock() - challenges := m.Challenges[endpoint.String()] - return challenges, nil -} - -func (m *simpleManager) AddResponse(resp *http.Response) error { - challenges := ResponseChallenges(resp) - if resp.Request == nil { - return fmt.Errorf("missing request reference") - } - urlCopy := url.URL{ - Path: resp.Request.URL.Path, - Host: resp.Request.URL.Host, - Scheme: resp.Request.URL.Scheme, - } - normalizeURL(&urlCopy) - - m.Lock() - defer m.Unlock() - m.Challenges[urlCopy.String()] = challenges - return nil -} - -// Octet types from RFC 2616. -type octetType byte - -var octetTypes [256]octetType - -const ( - isToken octetType = 1 << iota - isSpace -) - -func init() { - // OCTET = - // CHAR = - // CTL = - // CR = - // LF = - // SP = - // HT = - // <"> = - // CRLF = CR LF - // LWS = [CRLF] 1*( SP | HT ) - // TEXT = - // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> - // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT - // token = 1* - // qdtext = > - - for c := 0; c < 256; c++ { - var t octetType - isCtl := c <= 31 || c == 127 - isChar := 0 <= c && c <= 127 - isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 - if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { - t |= isSpace - } - if isChar && !isCtl && !isSeparator { - t |= isToken - } - octetTypes[c] = t - } -} - -// ResponseChallenges returns a list of authorization challenges -// for the given http Response. Challenges are only checked if -// the response status code was a 401. -func ResponseChallenges(resp *http.Response) []Challenge { - if resp.StatusCode == http.StatusUnauthorized { - // Parse the WWW-Authenticate Header and store the challenges - // on this endpoint object. - return parseAuthHeader(resp.Header) - } - - return nil -} - -func parseAuthHeader(header http.Header) []Challenge { - challenges := []Challenge{} - for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { - v, p := parseValueAndParams(h) - if v != "" { - challenges = append(challenges, Challenge{Scheme: v, Parameters: p}) - } - } - return challenges -} - -func parseValueAndParams(header string) (value string, params map[string]string) { - params = make(map[string]string) - value, s := expectToken(header) - if value == "" { - return - } - value = strings.ToLower(value) - s = "," + skipSpace(s) - for strings.HasPrefix(s, ",") { - var pkey string - pkey, s = expectToken(skipSpace(s[1:])) - if pkey == "" { - return - } - if !strings.HasPrefix(s, "=") { - return - } - var pvalue string - pvalue, s = expectTokenOrQuoted(s[1:]) - if pvalue == "" { - return - } - pkey = strings.ToLower(pkey) - params[pkey] = pvalue - s = skipSpace(s) - } - return -} - -func skipSpace(s string) (rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isSpace == 0 { - break - } - } - return s[i:] -} - -func expectToken(s string) (token, rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isToken == 0 { - break - } - } - return s[:i], s[i:] -} - -func expectTokenOrQuoted(s string) (value string, rest string) { - if !strings.HasPrefix(s, "\"") { - return expectToken(s) - } - s = s[1:] - for i := 0; i < len(s); i++ { - switch s[i] { - case '"': - return s[:i], s[i+1:] - case '\\': - p := make([]byte, len(s)-1) - j := copy(p, s[:i]) - escape := true - for i = i + 1; i < len(s); i++ { - b := s[i] - switch { - case escape: - escape = false - p[j] = b - j++ - case b == '\\': - escape = true - case b == '"': - return string(p[:j]), s[i+1:] - default: - p[j] = b - j++ - } - } - return "", "" - } - } - return "", "" -} diff --git a/ui/wasm/vendor/github.com/docker/distribution/registry/client/auth/session.go b/ui/wasm/vendor/github.com/docker/distribution/registry/client/auth/session.go deleted file mode 100644 index aad8a0e6f5c..00000000000 --- a/ui/wasm/vendor/github.com/docker/distribution/registry/client/auth/session.go +++ /dev/null @@ -1,530 +0,0 @@ -package auth - -import ( - "encoding/json" - "errors" - "fmt" - "net/http" - "net/url" - "strings" - "sync" - "time" - - "github.com/docker/distribution/registry/client" - "github.com/docker/distribution/registry/client/auth/challenge" - "github.com/docker/distribution/registry/client/transport" -) - -var ( - // ErrNoBasicAuthCredentials is returned if a request can't be authorized with - // basic auth due to lack of credentials. - ErrNoBasicAuthCredentials = errors.New("no basic auth credentials") - - // ErrNoToken is returned if a request is successful but the body does not - // contain an authorization token. - ErrNoToken = errors.New("authorization server did not include a token in the response") -) - -const defaultClientID = "registry-client" - -// AuthenticationHandler is an interface for authorizing a request from -// params from a "WWW-Authenicate" header for a single scheme. -type AuthenticationHandler interface { - // Scheme returns the scheme as expected from the "WWW-Authenicate" header. - Scheme() string - - // AuthorizeRequest adds the authorization header to a request (if needed) - // using the parameters from "WWW-Authenticate" method. The parameters - // values depend on the scheme. - AuthorizeRequest(req *http.Request, params map[string]string) error -} - -// CredentialStore is an interface for getting credentials for -// a given URL -type CredentialStore interface { - // Basic returns basic auth for the given URL - Basic(*url.URL) (string, string) - - // RefreshToken returns a refresh token for the - // given URL and service - RefreshToken(*url.URL, string) string - - // SetRefreshToken sets the refresh token if none - // is provided for the given url and service - SetRefreshToken(realm *url.URL, service, token string) -} - -// NewAuthorizer creates an authorizer which can handle multiple authentication -// schemes. The handlers are tried in order, the higher priority authentication -// methods should be first. The challengeMap holds a list of challenges for -// a given root API endpoint (for example "https://registry-1.docker.io/v2/"). -func NewAuthorizer(manager challenge.Manager, handlers ...AuthenticationHandler) transport.RequestModifier { - return &endpointAuthorizer{ - challenges: manager, - handlers: handlers, - } -} - -type endpointAuthorizer struct { - challenges challenge.Manager - handlers []AuthenticationHandler -} - -func (ea *endpointAuthorizer) ModifyRequest(req *http.Request) error { - pingPath := req.URL.Path - if v2Root := strings.Index(req.URL.Path, "/v2/"); v2Root != -1 { - pingPath = pingPath[:v2Root+4] - } else if v1Root := strings.Index(req.URL.Path, "/v1/"); v1Root != -1 { - pingPath = pingPath[:v1Root] + "/v2/" - } else { - return nil - } - - ping := url.URL{ - Host: req.URL.Host, - Scheme: req.URL.Scheme, - Path: pingPath, - } - - challenges, err := ea.challenges.GetChallenges(ping) - if err != nil { - return err - } - - if len(challenges) > 0 { - for _, handler := range ea.handlers { - for _, c := range challenges { - if c.Scheme != handler.Scheme() { - continue - } - if err := handler.AuthorizeRequest(req, c.Parameters); err != nil { - return err - } - } - } - } - - return nil -} - -// This is the minimum duration a token can last (in seconds). -// A token must not live less than 60 seconds because older versions -// of the Docker client didn't read their expiration from the token -// response and assumed 60 seconds. So to remain compatible with -// those implementations, a token must live at least this long. -const minimumTokenLifetimeSeconds = 60 - -// Private interface for time used by this package to enable tests to provide their own implementation. -type clock interface { - Now() time.Time -} - -type tokenHandler struct { - creds CredentialStore - transport http.RoundTripper - clock clock - - offlineAccess bool - forceOAuth bool - clientID string - scopes []Scope - - tokenLock sync.Mutex - tokenCache string - tokenExpiration time.Time - - logger Logger -} - -// Scope is a type which is serializable to a string -// using the allow scope grammar. -type Scope interface { - String() string -} - -// RepositoryScope represents a token scope for access -// to a repository. -type RepositoryScope struct { - Repository string - Class string - Actions []string -} - -// String returns the string representation of the repository -// using the scope grammar -func (rs RepositoryScope) String() string { - repoType := "repository" - // Keep existing format for image class to maintain backwards compatibility - // with authorization servers which do not support the expanded grammar. - if rs.Class != "" && rs.Class != "image" { - repoType = fmt.Sprintf("%s(%s)", repoType, rs.Class) - } - return fmt.Sprintf("%s:%s:%s", repoType, rs.Repository, strings.Join(rs.Actions, ",")) -} - -// RegistryScope represents a token scope for access -// to resources in the registry. -type RegistryScope struct { - Name string - Actions []string -} - -// String returns the string representation of the user -// using the scope grammar -func (rs RegistryScope) String() string { - return fmt.Sprintf("registry:%s:%s", rs.Name, strings.Join(rs.Actions, ",")) -} - -// Logger defines the injectable logging interface, used on TokenHandlers. -type Logger interface { - Debugf(format string, args ...interface{}) -} - -func logDebugf(logger Logger, format string, args ...interface{}) { - if logger == nil { - return - } - logger.Debugf(format, args...) -} - -// TokenHandlerOptions is used to configure a new token handler -type TokenHandlerOptions struct { - Transport http.RoundTripper - Credentials CredentialStore - - OfflineAccess bool - ForceOAuth bool - ClientID string - Scopes []Scope - Logger Logger -} - -// An implementation of clock for providing real time data. -type realClock struct{} - -// Now implements clock -func (realClock) Now() time.Time { return time.Now() } - -// NewTokenHandler creates a new AuthenicationHandler which supports -// fetching tokens from a remote token server. -func NewTokenHandler(transport http.RoundTripper, creds CredentialStore, scope string, actions ...string) AuthenticationHandler { - // Create options... - return NewTokenHandlerWithOptions(TokenHandlerOptions{ - Transport: transport, - Credentials: creds, - Scopes: []Scope{ - RepositoryScope{ - Repository: scope, - Actions: actions, - }, - }, - }) -} - -// NewTokenHandlerWithOptions creates a new token handler using the provided -// options structure. -func NewTokenHandlerWithOptions(options TokenHandlerOptions) AuthenticationHandler { - handler := &tokenHandler{ - transport: options.Transport, - creds: options.Credentials, - offlineAccess: options.OfflineAccess, - forceOAuth: options.ForceOAuth, - clientID: options.ClientID, - scopes: options.Scopes, - clock: realClock{}, - logger: options.Logger, - } - - return handler -} - -func (th *tokenHandler) client() *http.Client { - return &http.Client{ - Transport: th.transport, - Timeout: 15 * time.Second, - } -} - -func (th *tokenHandler) Scheme() string { - return "bearer" -} - -func (th *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { - var additionalScopes []string - if fromParam := req.URL.Query().Get("from"); fromParam != "" { - additionalScopes = append(additionalScopes, RepositoryScope{ - Repository: fromParam, - Actions: []string{"pull"}, - }.String()) - } - - token, err := th.getToken(params, additionalScopes...) - if err != nil { - return err - } - - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) - - return nil -} - -func (th *tokenHandler) getToken(params map[string]string, additionalScopes ...string) (string, error) { - th.tokenLock.Lock() - defer th.tokenLock.Unlock() - scopes := make([]string, 0, len(th.scopes)+len(additionalScopes)) - for _, scope := range th.scopes { - scopes = append(scopes, scope.String()) - } - var addedScopes bool - for _, scope := range additionalScopes { - if hasScope(scopes, scope) { - continue - } - scopes = append(scopes, scope) - addedScopes = true - } - - now := th.clock.Now() - if now.After(th.tokenExpiration) || addedScopes { - token, expiration, err := th.fetchToken(params, scopes) - if err != nil { - return "", err - } - - // do not update cache for added scope tokens - if !addedScopes { - th.tokenCache = token - th.tokenExpiration = expiration - } - - return token, nil - } - - return th.tokenCache, nil -} - -func hasScope(scopes []string, scope string) bool { - for _, s := range scopes { - if s == scope { - return true - } - } - return false -} - -type postTokenResponse struct { - AccessToken string `json:"access_token"` - RefreshToken string `json:"refresh_token"` - ExpiresIn int `json:"expires_in"` - IssuedAt time.Time `json:"issued_at"` - Scope string `json:"scope"` -} - -func (th *tokenHandler) fetchTokenWithOAuth(realm *url.URL, refreshToken, service string, scopes []string) (token string, expiration time.Time, err error) { - form := url.Values{} - form.Set("scope", strings.Join(scopes, " ")) - form.Set("service", service) - - clientID := th.clientID - if clientID == "" { - // Use default client, this is a required field - clientID = defaultClientID - } - form.Set("client_id", clientID) - - if refreshToken != "" { - form.Set("grant_type", "refresh_token") - form.Set("refresh_token", refreshToken) - } else if th.creds != nil { - form.Set("grant_type", "password") - username, password := th.creds.Basic(realm) - form.Set("username", username) - form.Set("password", password) - - // attempt to get a refresh token - form.Set("access_type", "offline") - } else { - // refuse to do oauth without a grant type - return "", time.Time{}, fmt.Errorf("no supported grant type") - } - - resp, err := th.client().PostForm(realm.String(), form) - if err != nil { - return "", time.Time{}, err - } - defer resp.Body.Close() - - if !client.SuccessStatus(resp.StatusCode) { - err := client.HandleErrorResponse(resp) - return "", time.Time{}, err - } - - decoder := json.NewDecoder(resp.Body) - - var tr postTokenResponse - if err = decoder.Decode(&tr); err != nil { - return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err) - } - - if tr.RefreshToken != "" && tr.RefreshToken != refreshToken { - th.creds.SetRefreshToken(realm, service, tr.RefreshToken) - } - - if tr.ExpiresIn < minimumTokenLifetimeSeconds { - // The default/minimum lifetime. - tr.ExpiresIn = minimumTokenLifetimeSeconds - logDebugf(th.logger, "Increasing token expiration to: %d seconds", tr.ExpiresIn) - } - - if tr.IssuedAt.IsZero() { - // issued_at is optional in the token response. - tr.IssuedAt = th.clock.Now().UTC() - } - - return tr.AccessToken, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil -} - -type getTokenResponse struct { - Token string `json:"token"` - AccessToken string `json:"access_token"` - ExpiresIn int `json:"expires_in"` - IssuedAt time.Time `json:"issued_at"` - RefreshToken string `json:"refresh_token"` -} - -func (th *tokenHandler) fetchTokenWithBasicAuth(realm *url.URL, service string, scopes []string) (token string, expiration time.Time, err error) { - - req, err := http.NewRequest("GET", realm.String(), nil) - if err != nil { - return "", time.Time{}, err - } - - reqParams := req.URL.Query() - - if service != "" { - reqParams.Add("service", service) - } - - for _, scope := range scopes { - reqParams.Add("scope", scope) - } - - if th.offlineAccess { - reqParams.Add("offline_token", "true") - clientID := th.clientID - if clientID == "" { - clientID = defaultClientID - } - reqParams.Add("client_id", clientID) - } - - if th.creds != nil { - username, password := th.creds.Basic(realm) - if username != "" && password != "" { - reqParams.Add("account", username) - req.SetBasicAuth(username, password) - } - } - - req.URL.RawQuery = reqParams.Encode() - - resp, err := th.client().Do(req) - if err != nil { - return "", time.Time{}, err - } - defer resp.Body.Close() - - if !client.SuccessStatus(resp.StatusCode) { - err := client.HandleErrorResponse(resp) - return "", time.Time{}, err - } - - decoder := json.NewDecoder(resp.Body) - - var tr getTokenResponse - if err = decoder.Decode(&tr); err != nil { - return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err) - } - - if tr.RefreshToken != "" && th.creds != nil { - th.creds.SetRefreshToken(realm, service, tr.RefreshToken) - } - - // `access_token` is equivalent to `token` and if both are specified - // the choice is undefined. Canonicalize `access_token` by sticking - // things in `token`. - if tr.AccessToken != "" { - tr.Token = tr.AccessToken - } - - if tr.Token == "" { - return "", time.Time{}, ErrNoToken - } - - if tr.ExpiresIn < minimumTokenLifetimeSeconds { - // The default/minimum lifetime. - tr.ExpiresIn = minimumTokenLifetimeSeconds - logDebugf(th.logger, "Increasing token expiration to: %d seconds", tr.ExpiresIn) - } - - if tr.IssuedAt.IsZero() { - // issued_at is optional in the token response. - tr.IssuedAt = th.clock.Now().UTC() - } - - return tr.Token, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil -} - -func (th *tokenHandler) fetchToken(params map[string]string, scopes []string) (token string, expiration time.Time, err error) { - realm, ok := params["realm"] - if !ok { - return "", time.Time{}, errors.New("no realm specified for token auth challenge") - } - - // TODO(dmcgowan): Handle empty scheme and relative realm - realmURL, err := url.Parse(realm) - if err != nil { - return "", time.Time{}, fmt.Errorf("invalid token auth challenge realm: %s", err) - } - - service := params["service"] - - var refreshToken string - - if th.creds != nil { - refreshToken = th.creds.RefreshToken(realmURL, service) - } - - if refreshToken != "" || th.forceOAuth { - return th.fetchTokenWithOAuth(realmURL, refreshToken, service, scopes) - } - - return th.fetchTokenWithBasicAuth(realmURL, service, scopes) -} - -type basicHandler struct { - creds CredentialStore -} - -// NewBasicHandler creaters a new authentiation handler which adds -// basic authentication credentials to a request. -func NewBasicHandler(creds CredentialStore) AuthenticationHandler { - return &basicHandler{ - creds: creds, - } -} - -func (*basicHandler) Scheme() string { - return "basic" -} - -func (bh *basicHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { - if bh.creds != nil { - username, password := bh.creds.Basic(req.URL) - if username != "" && password != "" { - req.SetBasicAuth(username, password) - return nil - } - } - return ErrNoBasicAuthCredentials -} diff --git a/ui/wasm/vendor/github.com/docker/distribution/registry/client/blob_writer.go b/ui/wasm/vendor/github.com/docker/distribution/registry/client/blob_writer.go deleted file mode 100644 index 695bf852f16..00000000000 --- a/ui/wasm/vendor/github.com/docker/distribution/registry/client/blob_writer.go +++ /dev/null @@ -1,162 +0,0 @@ -package client - -import ( - "bytes" - "context" - "fmt" - "io" - "io/ioutil" - "net/http" - "time" - - "github.com/docker/distribution" -) - -type httpBlobUpload struct { - statter distribution.BlobStatter - client *http.Client - - uuid string - startedAt time.Time - - location string // always the last value of the location header. - offset int64 - closed bool -} - -func (hbu *httpBlobUpload) Reader() (io.ReadCloser, error) { - panic("Not implemented") -} - -func (hbu *httpBlobUpload) handleErrorResponse(resp *http.Response) error { - if resp.StatusCode == http.StatusNotFound { - return distribution.ErrBlobUploadUnknown - } - return HandleErrorResponse(resp) -} - -func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) { - req, err := http.NewRequest("PATCH", hbu.location, ioutil.NopCloser(r)) - if err != nil { - return 0, err - } - defer req.Body.Close() - - resp, err := hbu.client.Do(req) - if err != nil { - return 0, err - } - - if !SuccessStatus(resp.StatusCode) { - return 0, hbu.handleErrorResponse(resp) - } - - hbu.uuid = resp.Header.Get("Docker-Upload-UUID") - hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) - if err != nil { - return 0, err - } - rng := resp.Header.Get("Range") - var start, end int64 - if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { - return 0, err - } else if n != 2 || end < start { - return 0, fmt.Errorf("bad range format: %s", rng) - } - - return (end - start + 1), nil - -} - -func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) { - req, err := http.NewRequest("PATCH", hbu.location, bytes.NewReader(p)) - if err != nil { - return 0, err - } - req.Header.Set("Content-Range", fmt.Sprintf("%d-%d", hbu.offset, hbu.offset+int64(len(p)-1))) - req.Header.Set("Content-Length", fmt.Sprintf("%d", len(p))) - req.Header.Set("Content-Type", "application/octet-stream") - - resp, err := hbu.client.Do(req) - if err != nil { - return 0, err - } - - if !SuccessStatus(resp.StatusCode) { - return 0, hbu.handleErrorResponse(resp) - } - - hbu.uuid = resp.Header.Get("Docker-Upload-UUID") - hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) - if err != nil { - return 0, err - } - rng := resp.Header.Get("Range") - var start, end int - if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { - return 0, err - } else if n != 2 || end < start { - return 0, fmt.Errorf("bad range format: %s", rng) - } - - return (end - start + 1), nil - -} - -func (hbu *httpBlobUpload) Size() int64 { - return hbu.offset -} - -func (hbu *httpBlobUpload) ID() string { - return hbu.uuid -} - -func (hbu *httpBlobUpload) StartedAt() time.Time { - return hbu.startedAt -} - -func (hbu *httpBlobUpload) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { - // TODO(dmcgowan): Check if already finished, if so just fetch - req, err := http.NewRequest("PUT", hbu.location, nil) - if err != nil { - return distribution.Descriptor{}, err - } - - values := req.URL.Query() - values.Set("digest", desc.Digest.String()) - req.URL.RawQuery = values.Encode() - - resp, err := hbu.client.Do(req) - if err != nil { - return distribution.Descriptor{}, err - } - defer resp.Body.Close() - - if !SuccessStatus(resp.StatusCode) { - return distribution.Descriptor{}, hbu.handleErrorResponse(resp) - } - - return hbu.statter.Stat(ctx, desc.Digest) -} - -func (hbu *httpBlobUpload) Cancel(ctx context.Context) error { - req, err := http.NewRequest("DELETE", hbu.location, nil) - if err != nil { - return err - } - resp, err := hbu.client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if resp.StatusCode == http.StatusNotFound || SuccessStatus(resp.StatusCode) { - return nil - } - return hbu.handleErrorResponse(resp) -} - -func (hbu *httpBlobUpload) Close() error { - hbu.closed = true - return nil -} diff --git a/ui/wasm/vendor/github.com/docker/distribution/registry/client/errors.go b/ui/wasm/vendor/github.com/docker/distribution/registry/client/errors.go deleted file mode 100644 index 52d49d5d295..00000000000 --- a/ui/wasm/vendor/github.com/docker/distribution/registry/client/errors.go +++ /dev/null @@ -1,139 +0,0 @@ -package client - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/client/auth/challenge" -) - -// ErrNoErrorsInBody is returned when an HTTP response body parses to an empty -// errcode.Errors slice. -var ErrNoErrorsInBody = errors.New("no error details found in HTTP response body") - -// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is -// returned when making a registry api call. -type UnexpectedHTTPStatusError struct { - Status string -} - -func (e *UnexpectedHTTPStatusError) Error() string { - return fmt.Sprintf("received unexpected HTTP status: %s", e.Status) -} - -// UnexpectedHTTPResponseError is returned when an expected HTTP status code -// is returned, but the content was unexpected and failed to be parsed. -type UnexpectedHTTPResponseError struct { - ParseErr error - StatusCode int - Response []byte -} - -func (e *UnexpectedHTTPResponseError) Error() string { - return fmt.Sprintf("error parsing HTTP %d response body: %s: %q", e.StatusCode, e.ParseErr.Error(), string(e.Response)) -} - -func parseHTTPErrorResponse(statusCode int, r io.Reader) error { - var errors errcode.Errors - body, err := ioutil.ReadAll(r) - if err != nil { - return err - } - - // For backward compatibility, handle irregularly formatted - // messages that contain a "details" field. - var detailsErr struct { - Details string `json:"details"` - } - err = json.Unmarshal(body, &detailsErr) - if err == nil && detailsErr.Details != "" { - switch statusCode { - case http.StatusUnauthorized: - return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details) - case http.StatusTooManyRequests: - return errcode.ErrorCodeTooManyRequests.WithMessage(detailsErr.Details) - default: - return errcode.ErrorCodeUnknown.WithMessage(detailsErr.Details) - } - } - - if err := json.Unmarshal(body, &errors); err != nil { - return &UnexpectedHTTPResponseError{ - ParseErr: err, - StatusCode: statusCode, - Response: body, - } - } - - if len(errors) == 0 { - // If there was no error specified in the body, return - // UnexpectedHTTPResponseError. - return &UnexpectedHTTPResponseError{ - ParseErr: ErrNoErrorsInBody, - StatusCode: statusCode, - Response: body, - } - } - - return errors -} - -func makeErrorList(err error) []error { - if errL, ok := err.(errcode.Errors); ok { - return []error(errL) - } - return []error{err} -} - -func mergeErrors(err1, err2 error) error { - return errcode.Errors(append(makeErrorList(err1), makeErrorList(err2)...)) -} - -// HandleErrorResponse returns error parsed from HTTP response for an -// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An -// UnexpectedHTTPStatusError returned for response code outside of expected -// range. -func HandleErrorResponse(resp *http.Response) error { - if resp.StatusCode >= 400 && resp.StatusCode < 500 { - // Check for OAuth errors within the `WWW-Authenticate` header first - // See https://tools.ietf.org/html/rfc6750#section-3 - for _, c := range challenge.ResponseChallenges(resp) { - if c.Scheme == "bearer" { - var err errcode.Error - // codes defined at https://tools.ietf.org/html/rfc6750#section-3.1 - switch c.Parameters["error"] { - case "invalid_token": - err.Code = errcode.ErrorCodeUnauthorized - case "insufficient_scope": - err.Code = errcode.ErrorCodeDenied - default: - continue - } - if description := c.Parameters["error_description"]; description != "" { - err.Message = description - } else { - err.Message = err.Code.Message() - } - - return mergeErrors(err, parseHTTPErrorResponse(resp.StatusCode, resp.Body)) - } - } - err := parseHTTPErrorResponse(resp.StatusCode, resp.Body) - if uErr, ok := err.(*UnexpectedHTTPResponseError); ok && resp.StatusCode == 401 { - return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response) - } - return err - } - return &UnexpectedHTTPStatusError{Status: resp.Status} -} - -// SuccessStatus returns true if the argument is a successful HTTP response -// code (in the range 200 - 399 inclusive). -func SuccessStatus(status int) bool { - return status >= 200 && status <= 399 -} diff --git a/ui/wasm/vendor/github.com/docker/distribution/registry/client/repository.go b/ui/wasm/vendor/github.com/docker/distribution/registry/client/repository.go deleted file mode 100644 index aa442e65406..00000000000 --- a/ui/wasm/vendor/github.com/docker/distribution/registry/client/repository.go +++ /dev/null @@ -1,867 +0,0 @@ -package client - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/distribution/registry/storage/cache" - "github.com/docker/distribution/registry/storage/cache/memory" - "github.com/opencontainers/go-digest" -) - -// Registry provides an interface for calling Repositories, which returns a catalog of repositories. -type Registry interface { - Repositories(ctx context.Context, repos []string, last string) (n int, err error) -} - -// checkHTTPRedirect is a callback that can manipulate redirected HTTP -// requests. It is used to preserve Accept and Range headers. -func checkHTTPRedirect(req *http.Request, via []*http.Request) error { - if len(via) >= 10 { - return errors.New("stopped after 10 redirects") - } - - if len(via) > 0 { - for headerName, headerVals := range via[0].Header { - if headerName != "Accept" && headerName != "Range" { - continue - } - for _, val := range headerVals { - // Don't add to redirected request if redirected - // request already has a header with the same - // name and value. - hasValue := false - for _, existingVal := range req.Header[headerName] { - if existingVal == val { - hasValue = true - break - } - } - if !hasValue { - req.Header.Add(headerName, val) - } - } - } - } - - return nil -} - -// NewRegistry creates a registry namespace which can be used to get a listing of repositories -func NewRegistry(baseURL string, transport http.RoundTripper) (Registry, error) { - ub, err := v2.NewURLBuilderFromString(baseURL, false) - if err != nil { - return nil, err - } - - client := &http.Client{ - Transport: transport, - Timeout: 1 * time.Minute, - CheckRedirect: checkHTTPRedirect, - } - - return ®istry{ - client: client, - ub: ub, - }, nil -} - -type registry struct { - client *http.Client - ub *v2.URLBuilder -} - -// Repositories returns a lexigraphically sorted catalog given a base URL. The 'entries' slice will be filled up to the size -// of the slice, starting at the value provided in 'last'. The number of entries will be returned along with io.EOF if there -// are no more entries -func (r *registry) Repositories(ctx context.Context, entries []string, last string) (int, error) { - var numFilled int - var returnErr error - - values := buildCatalogValues(len(entries), last) - u, err := r.ub.BuildCatalogURL(values) - if err != nil { - return 0, err - } - - resp, err := r.client.Get(u) - if err != nil { - return 0, err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - var ctlg struct { - Repositories []string `json:"repositories"` - } - decoder := json.NewDecoder(resp.Body) - - if err := decoder.Decode(&ctlg); err != nil { - return 0, err - } - - for cnt := range ctlg.Repositories { - entries[cnt] = ctlg.Repositories[cnt] - } - numFilled = len(ctlg.Repositories) - - link := resp.Header.Get("Link") - if link == "" { - returnErr = io.EOF - } - } else { - return 0, HandleErrorResponse(resp) - } - - return numFilled, returnErr -} - -// NewRepository creates a new Repository for the given repository name and base URL. -func NewRepository(name reference.Named, baseURL string, transport http.RoundTripper) (distribution.Repository, error) { - ub, err := v2.NewURLBuilderFromString(baseURL, false) - if err != nil { - return nil, err - } - - client := &http.Client{ - Transport: transport, - CheckRedirect: checkHTTPRedirect, - // TODO(dmcgowan): create cookie jar - } - - return &repository{ - client: client, - ub: ub, - name: name, - }, nil -} - -type repository struct { - client *http.Client - ub *v2.URLBuilder - name reference.Named -} - -func (r *repository) Named() reference.Named { - return r.name -} - -func (r *repository) Blobs(ctx context.Context) distribution.BlobStore { - statter := &blobStatter{ - name: r.name, - ub: r.ub, - client: r.client, - } - return &blobs{ - name: r.name, - ub: r.ub, - client: r.client, - statter: cache.NewCachedBlobStatter(memory.NewInMemoryBlobDescriptorCacheProvider(), statter), - } -} - -func (r *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { - // todo(richardscothern): options should be sent over the wire - return &manifests{ - name: r.name, - ub: r.ub, - client: r.client, - etags: make(map[string]string), - }, nil -} - -func (r *repository) Tags(ctx context.Context) distribution.TagService { - return &tags{ - client: r.client, - ub: r.ub, - name: r.Named(), - } -} - -// tags implements remote tagging operations. -type tags struct { - client *http.Client - ub *v2.URLBuilder - name reference.Named -} - -// All returns all tags -func (t *tags) All(ctx context.Context) ([]string, error) { - var tags []string - - listURLStr, err := t.ub.BuildTagsURL(t.name) - if err != nil { - return tags, err - } - - listURL, err := url.Parse(listURLStr) - if err != nil { - return tags, err - } - - for { - resp, err := t.client.Get(listURL.String()) - if err != nil { - return tags, err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - b, err := ioutil.ReadAll(resp.Body) - if err != nil { - return tags, err - } - - tagsResponse := struct { - Tags []string `json:"tags"` - }{} - if err := json.Unmarshal(b, &tagsResponse); err != nil { - return tags, err - } - tags = append(tags, tagsResponse.Tags...) - if link := resp.Header.Get("Link"); link != "" { - linkURLStr := strings.Trim(strings.Split(link, ";")[0], "<>") - linkURL, err := url.Parse(linkURLStr) - if err != nil { - return tags, err - } - - listURL = listURL.ResolveReference(linkURL) - } else { - return tags, nil - } - } else { - return tags, HandleErrorResponse(resp) - } - } -} - -func descriptorFromResponse(response *http.Response) (distribution.Descriptor, error) { - desc := distribution.Descriptor{} - headers := response.Header - - ctHeader := headers.Get("Content-Type") - if ctHeader == "" { - return distribution.Descriptor{}, errors.New("missing or empty Content-Type header") - } - desc.MediaType = ctHeader - - digestHeader := headers.Get("Docker-Content-Digest") - if digestHeader == "" { - bytes, err := ioutil.ReadAll(response.Body) - if err != nil { - return distribution.Descriptor{}, err - } - _, desc, err := distribution.UnmarshalManifest(ctHeader, bytes) - if err != nil { - return distribution.Descriptor{}, err - } - return desc, nil - } - - dgst, err := digest.Parse(digestHeader) - if err != nil { - return distribution.Descriptor{}, err - } - desc.Digest = dgst - - lengthHeader := headers.Get("Content-Length") - if lengthHeader == "" { - return distribution.Descriptor{}, errors.New("missing or empty Content-Length header") - } - length, err := strconv.ParseInt(lengthHeader, 10, 64) - if err != nil { - return distribution.Descriptor{}, err - } - desc.Size = length - - return desc, nil - -} - -// Get issues a HEAD request for a Manifest against its named endpoint in order -// to construct a descriptor for the tag. If the registry doesn't support HEADing -// a manifest, fallback to GET. -func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { - ref, err := reference.WithTag(t.name, tag) - if err != nil { - return distribution.Descriptor{}, err - } - u, err := t.ub.BuildManifestURL(ref) - if err != nil { - return distribution.Descriptor{}, err - } - - newRequest := func(method string) (*http.Response, error) { - req, err := http.NewRequest(method, u, nil) - if err != nil { - return nil, err - } - - for _, t := range distribution.ManifestMediaTypes() { - req.Header.Add("Accept", t) - } - resp, err := t.client.Do(req) - return resp, err - } - - resp, err := newRequest("HEAD") - if err != nil { - return distribution.Descriptor{}, err - } - defer resp.Body.Close() - - switch { - case resp.StatusCode >= 200 && resp.StatusCode < 400 && len(resp.Header.Get("Docker-Content-Digest")) > 0: - // if the response is a success AND a Docker-Content-Digest can be retrieved from the headers - return descriptorFromResponse(resp) - default: - // if the response is an error - there will be no body to decode. - // Issue a GET request: - // - for data from a server that does not handle HEAD - // - to get error details in case of a failure - resp, err = newRequest("GET") - if err != nil { - return distribution.Descriptor{}, err - } - defer resp.Body.Close() - - if resp.StatusCode >= 200 && resp.StatusCode < 400 { - return descriptorFromResponse(resp) - } - return distribution.Descriptor{}, HandleErrorResponse(resp) - } -} - -func (t *tags) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { - panic("not implemented") -} - -func (t *tags) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { - panic("not implemented") -} - -func (t *tags) Untag(ctx context.Context, tag string) error { - panic("not implemented") -} - -type manifests struct { - name reference.Named - ub *v2.URLBuilder - client *http.Client - etags map[string]string -} - -func (ms *manifests) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { - ref, err := reference.WithDigest(ms.name, dgst) - if err != nil { - return false, err - } - u, err := ms.ub.BuildManifestURL(ref) - if err != nil { - return false, err - } - - resp, err := ms.client.Head(u) - if err != nil { - return false, err - } - - if SuccessStatus(resp.StatusCode) { - return true, nil - } else if resp.StatusCode == http.StatusNotFound { - return false, nil - } - return false, HandleErrorResponse(resp) -} - -// AddEtagToTag allows a client to supply an eTag to Get which will be -// used for a conditional HTTP request. If the eTag matches, a nil manifest -// and ErrManifestNotModified error will be returned. etag is automatically -// quoted when added to this map. -func AddEtagToTag(tag, etag string) distribution.ManifestServiceOption { - return etagOption{tag, etag} -} - -type etagOption struct{ tag, etag string } - -func (o etagOption) Apply(ms distribution.ManifestService) error { - if ms, ok := ms.(*manifests); ok { - ms.etags[o.tag] = fmt.Sprintf(`"%s"`, o.etag) - return nil - } - return fmt.Errorf("etag options is a client-only option") -} - -// ReturnContentDigest allows a client to set a the content digest on -// a successful request from the 'Docker-Content-Digest' header. This -// returned digest is represents the digest which the registry uses -// to refer to the content and can be used to delete the content. -func ReturnContentDigest(dgst *digest.Digest) distribution.ManifestServiceOption { - return contentDigestOption{dgst} -} - -type contentDigestOption struct{ digest *digest.Digest } - -func (o contentDigestOption) Apply(ms distribution.ManifestService) error { - return nil -} - -func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { - var ( - digestOrTag string - ref reference.Named - err error - contentDgst *digest.Digest - mediaTypes []string - ) - - for _, option := range options { - switch opt := option.(type) { - case distribution.WithTagOption: - digestOrTag = opt.Tag - ref, err = reference.WithTag(ms.name, opt.Tag) - if err != nil { - return nil, err - } - case contentDigestOption: - contentDgst = opt.digest - case distribution.WithManifestMediaTypesOption: - mediaTypes = opt.MediaTypes - default: - err := option.Apply(ms) - if err != nil { - return nil, err - } - } - } - - if digestOrTag == "" { - digestOrTag = dgst.String() - ref, err = reference.WithDigest(ms.name, dgst) - if err != nil { - return nil, err - } - } - - if len(mediaTypes) == 0 { - mediaTypes = distribution.ManifestMediaTypes() - } - - u, err := ms.ub.BuildManifestURL(ref) - if err != nil { - return nil, err - } - - req, err := http.NewRequest("GET", u, nil) - if err != nil { - return nil, err - } - - for _, t := range mediaTypes { - req.Header.Add("Accept", t) - } - - if _, ok := ms.etags[digestOrTag]; ok { - req.Header.Set("If-None-Match", ms.etags[digestOrTag]) - } - - resp, err := ms.client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - if resp.StatusCode == http.StatusNotModified { - return nil, distribution.ErrManifestNotModified - } else if SuccessStatus(resp.StatusCode) { - if contentDgst != nil { - dgst, err := digest.Parse(resp.Header.Get("Docker-Content-Digest")) - if err == nil { - *contentDgst = dgst - } - } - mt := resp.Header.Get("Content-Type") - body, err := ioutil.ReadAll(resp.Body) - - if err != nil { - return nil, err - } - m, _, err := distribution.UnmarshalManifest(mt, body) - if err != nil { - return nil, err - } - return m, nil - } - return nil, HandleErrorResponse(resp) -} - -// Put puts a manifest. A tag can be specified using an options parameter which uses some shared state to hold the -// tag name in order to build the correct upload URL. -func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { - ref := ms.name - var tagged bool - - for _, option := range options { - if opt, ok := option.(distribution.WithTagOption); ok { - var err error - ref, err = reference.WithTag(ref, opt.Tag) - if err != nil { - return "", err - } - tagged = true - } else { - err := option.Apply(ms) - if err != nil { - return "", err - } - } - } - mediaType, p, err := m.Payload() - if err != nil { - return "", err - } - - if !tagged { - // generate a canonical digest and Put by digest - _, d, err := distribution.UnmarshalManifest(mediaType, p) - if err != nil { - return "", err - } - ref, err = reference.WithDigest(ref, d.Digest) - if err != nil { - return "", err - } - } - - manifestURL, err := ms.ub.BuildManifestURL(ref) - if err != nil { - return "", err - } - - putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(p)) - if err != nil { - return "", err - } - - putRequest.Header.Set("Content-Type", mediaType) - - resp, err := ms.client.Do(putRequest) - if err != nil { - return "", err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - dgstHeader := resp.Header.Get("Docker-Content-Digest") - dgst, err := digest.Parse(dgstHeader) - if err != nil { - return "", err - } - - return dgst, nil - } - - return "", HandleErrorResponse(resp) -} - -func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error { - ref, err := reference.WithDigest(ms.name, dgst) - if err != nil { - return err - } - u, err := ms.ub.BuildManifestURL(ref) - if err != nil { - return err - } - req, err := http.NewRequest("DELETE", u, nil) - if err != nil { - return err - } - - resp, err := ms.client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - return nil - } - return HandleErrorResponse(resp) -} - -// todo(richardscothern): Restore interface and implementation with merge of #1050 -/*func (ms *manifests) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) { - panic("not supported") -}*/ - -type blobs struct { - name reference.Named - ub *v2.URLBuilder - client *http.Client - - statter distribution.BlobDescriptorService - distribution.BlobDeleter -} - -func sanitizeLocation(location, base string) (string, error) { - baseURL, err := url.Parse(base) - if err != nil { - return "", err - } - - locationURL, err := url.Parse(location) - if err != nil { - return "", err - } - - return baseURL.ResolveReference(locationURL).String(), nil -} - -func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - return bs.statter.Stat(ctx, dgst) - -} - -func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { - reader, err := bs.Open(ctx, dgst) - if err != nil { - return nil, err - } - defer reader.Close() - - return ioutil.ReadAll(reader) -} - -func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { - ref, err := reference.WithDigest(bs.name, dgst) - if err != nil { - return nil, err - } - blobURL, err := bs.ub.BuildBlobURL(ref) - if err != nil { - return nil, err - } - - return transport.NewHTTPReadSeeker(bs.client, blobURL, - func(resp *http.Response) error { - if resp.StatusCode == http.StatusNotFound { - return distribution.ErrBlobUnknown - } - return HandleErrorResponse(resp) - }), nil -} - -func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { - panic("not implemented") -} - -func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { - writer, err := bs.Create(ctx) - if err != nil { - return distribution.Descriptor{}, err - } - dgstr := digest.Canonical.Digester() - n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash())) - if err != nil { - return distribution.Descriptor{}, err - } - if n < int64(len(p)) { - return distribution.Descriptor{}, fmt.Errorf("short copy: wrote %d of %d", n, len(p)) - } - - desc := distribution.Descriptor{ - MediaType: mediaType, - Size: int64(len(p)), - Digest: dgstr.Digest(), - } - - return writer.Commit(ctx, desc) -} - -type optionFunc func(interface{}) error - -func (f optionFunc) Apply(v interface{}) error { - return f(v) -} - -// WithMountFrom returns a BlobCreateOption which designates that the blob should be -// mounted from the given canonical reference. -func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption { - return optionFunc(func(v interface{}) error { - opts, ok := v.(*distribution.CreateOptions) - if !ok { - return fmt.Errorf("unexpected options type: %T", v) - } - - opts.Mount.ShouldMount = true - opts.Mount.From = ref - - return nil - }) -} - -func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { - var opts distribution.CreateOptions - - for _, option := range options { - err := option.Apply(&opts) - if err != nil { - return nil, err - } - } - - var values []url.Values - - if opts.Mount.ShouldMount { - values = append(values, url.Values{"from": {opts.Mount.From.Name()}, "mount": {opts.Mount.From.Digest().String()}}) - } - - u, err := bs.ub.BuildBlobUploadURL(bs.name, values...) - if err != nil { - return nil, err - } - - resp, err := bs.client.Post(u, "", nil) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - switch resp.StatusCode { - case http.StatusCreated: - desc, err := bs.statter.Stat(ctx, opts.Mount.From.Digest()) - if err != nil { - return nil, err - } - return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc} - case http.StatusAccepted: - // TODO(dmcgowan): Check for invalid UUID - uuid := resp.Header.Get("Docker-Upload-UUID") - location, err := sanitizeLocation(resp.Header.Get("Location"), u) - if err != nil { - return nil, err - } - - return &httpBlobUpload{ - statter: bs.statter, - client: bs.client, - uuid: uuid, - startedAt: time.Now(), - location: location, - }, nil - default: - return nil, HandleErrorResponse(resp) - } -} - -func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { - panic("not implemented") -} - -func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error { - return bs.statter.Clear(ctx, dgst) -} - -type blobStatter struct { - name reference.Named - ub *v2.URLBuilder - client *http.Client -} - -func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - ref, err := reference.WithDigest(bs.name, dgst) - if err != nil { - return distribution.Descriptor{}, err - } - u, err := bs.ub.BuildBlobURL(ref) - if err != nil { - return distribution.Descriptor{}, err - } - - resp, err := bs.client.Head(u) - if err != nil { - return distribution.Descriptor{}, err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - lengthHeader := resp.Header.Get("Content-Length") - if lengthHeader == "" { - return distribution.Descriptor{}, fmt.Errorf("missing content-length header for request: %s", u) - } - - length, err := strconv.ParseInt(lengthHeader, 10, 64) - if err != nil { - return distribution.Descriptor{}, fmt.Errorf("error parsing content-length: %v", err) - } - - return distribution.Descriptor{ - MediaType: resp.Header.Get("Content-Type"), - Size: length, - Digest: dgst, - }, nil - } else if resp.StatusCode == http.StatusNotFound { - return distribution.Descriptor{}, distribution.ErrBlobUnknown - } - return distribution.Descriptor{}, HandleErrorResponse(resp) -} - -func buildCatalogValues(maxEntries int, last string) url.Values { - values := url.Values{} - - if maxEntries > 0 { - values.Add("n", strconv.Itoa(maxEntries)) - } - - if last != "" { - values.Add("last", last) - } - - return values -} - -func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { - ref, err := reference.WithDigest(bs.name, dgst) - if err != nil { - return err - } - blobURL, err := bs.ub.BuildBlobURL(ref) - if err != nil { - return err - } - - req, err := http.NewRequest("DELETE", blobURL, nil) - if err != nil { - return err - } - - resp, err := bs.client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - return nil - } - return HandleErrorResponse(resp) -} - -func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - return nil -} diff --git a/ui/wasm/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go b/ui/wasm/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go deleted file mode 100644 index 1d0b382fb51..00000000000 --- a/ui/wasm/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go +++ /dev/null @@ -1,250 +0,0 @@ -package transport - -import ( - "errors" - "fmt" - "io" - "net/http" - "regexp" - "strconv" -) - -var ( - contentRangeRegexp = regexp.MustCompile(`bytes ([0-9]+)-([0-9]+)/([0-9]+|\\*)`) - - // ErrWrongCodeForByteRange is returned if the client sends a request - // with a Range header but the server returns a 2xx or 3xx code other - // than 206 Partial Content. - ErrWrongCodeForByteRange = errors.New("expected HTTP 206 from byte range request") -) - -// ReadSeekCloser combines io.ReadSeeker with io.Closer. -type ReadSeekCloser interface { - io.ReadSeeker - io.Closer -} - -// NewHTTPReadSeeker handles reading from an HTTP endpoint using a GET -// request. When seeking and starting a read from a non-zero offset -// the a "Range" header will be added which sets the offset. -// TODO(dmcgowan): Move this into a separate utility package -func NewHTTPReadSeeker(client *http.Client, url string, errorHandler func(*http.Response) error) ReadSeekCloser { - return &httpReadSeeker{ - client: client, - url: url, - errorHandler: errorHandler, - } -} - -type httpReadSeeker struct { - client *http.Client - url string - - // errorHandler creates an error from an unsuccessful HTTP response. - // This allows the error to be created with the HTTP response body - // without leaking the body through a returned error. - errorHandler func(*http.Response) error - - size int64 - - // rc is the remote read closer. - rc io.ReadCloser - // readerOffset tracks the offset as of the last read. - readerOffset int64 - // seekOffset allows Seek to override the offset. Seek changes - // seekOffset instead of changing readOffset directly so that - // connection resets can be delayed and possibly avoided if the - // seek is undone (i.e. seeking to the end and then back to the - // beginning). - seekOffset int64 - err error -} - -func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) { - if hrs.err != nil { - return 0, hrs.err - } - - // If we sought to a different position, we need to reset the - // connection. This logic is here instead of Seek so that if - // a seek is undone before the next read, the connection doesn't - // need to be closed and reopened. A common example of this is - // seeking to the end to determine the length, and then seeking - // back to the original position. - if hrs.readerOffset != hrs.seekOffset { - hrs.reset() - } - - hrs.readerOffset = hrs.seekOffset - - rd, err := hrs.reader() - if err != nil { - return 0, err - } - - n, err = rd.Read(p) - hrs.seekOffset += int64(n) - hrs.readerOffset += int64(n) - - return n, err -} - -func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { - if hrs.err != nil { - return 0, hrs.err - } - - lastReaderOffset := hrs.readerOffset - - if whence == io.SeekStart && hrs.rc == nil { - // If no request has been made yet, and we are seeking to an - // absolute position, set the read offset as well to avoid an - // unnecessary request. - hrs.readerOffset = offset - } - - _, err := hrs.reader() - if err != nil { - hrs.readerOffset = lastReaderOffset - return 0, err - } - - newOffset := hrs.seekOffset - - switch whence { - case io.SeekCurrent: - newOffset += offset - case io.SeekEnd: - if hrs.size < 0 { - return 0, errors.New("content length not known") - } - newOffset = hrs.size + offset - case io.SeekStart: - newOffset = offset - } - - if newOffset < 0 { - err = errors.New("cannot seek to negative position") - } else { - hrs.seekOffset = newOffset - } - - return hrs.seekOffset, err -} - -func (hrs *httpReadSeeker) Close() error { - if hrs.err != nil { - return hrs.err - } - - // close and release reader chain - if hrs.rc != nil { - hrs.rc.Close() - } - - hrs.rc = nil - - hrs.err = errors.New("httpLayer: closed") - - return nil -} - -func (hrs *httpReadSeeker) reset() { - if hrs.err != nil { - return - } - if hrs.rc != nil { - hrs.rc.Close() - hrs.rc = nil - } -} - -func (hrs *httpReadSeeker) reader() (io.Reader, error) { - if hrs.err != nil { - return nil, hrs.err - } - - if hrs.rc != nil { - return hrs.rc, nil - } - - req, err := http.NewRequest("GET", hrs.url, nil) - if err != nil { - return nil, err - } - - if hrs.readerOffset > 0 { - // If we are at different offset, issue a range request from there. - req.Header.Add("Range", fmt.Sprintf("bytes=%d-", hrs.readerOffset)) - // TODO: get context in here - // context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range")) - } - - req.Header.Add("Accept-Encoding", "identity") - resp, err := hrs.client.Do(req) - if err != nil { - return nil, err - } - - // Normally would use client.SuccessStatus, but that would be a cyclic - // import - if resp.StatusCode >= 200 && resp.StatusCode <= 399 { - if hrs.readerOffset > 0 { - if resp.StatusCode != http.StatusPartialContent { - return nil, ErrWrongCodeForByteRange - } - - contentRange := resp.Header.Get("Content-Range") - if contentRange == "" { - return nil, errors.New("no Content-Range header found in HTTP 206 response") - } - - submatches := contentRangeRegexp.FindStringSubmatch(contentRange) - if len(submatches) < 4 { - return nil, fmt.Errorf("could not parse Content-Range header: %s", contentRange) - } - - startByte, err := strconv.ParseUint(submatches[1], 10, 64) - if err != nil { - return nil, fmt.Errorf("could not parse start of range in Content-Range header: %s", contentRange) - } - - if startByte != uint64(hrs.readerOffset) { - return nil, fmt.Errorf("received Content-Range starting at offset %d instead of requested %d", startByte, hrs.readerOffset) - } - - endByte, err := strconv.ParseUint(submatches[2], 10, 64) - if err != nil { - return nil, fmt.Errorf("could not parse end of range in Content-Range header: %s", contentRange) - } - - if submatches[3] == "*" { - hrs.size = -1 - } else { - size, err := strconv.ParseUint(submatches[3], 10, 64) - if err != nil { - return nil, fmt.Errorf("could not parse total size in Content-Range header: %s", contentRange) - } - - if endByte+1 != size { - return nil, fmt.Errorf("range in Content-Range stops before the end of the content: %s", contentRange) - } - - hrs.size = int64(size) - } - } else if resp.StatusCode == http.StatusOK { - hrs.size = resp.ContentLength - } else { - hrs.size = -1 - } - hrs.rc = resp.Body - } else { - defer resp.Body.Close() - if hrs.errorHandler != nil { - return nil, hrs.errorHandler(resp) - } - return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status) - } - - return hrs.rc, nil -} diff --git a/ui/wasm/vendor/github.com/docker/distribution/registry/client/transport/transport.go b/ui/wasm/vendor/github.com/docker/distribution/registry/client/transport/transport.go deleted file mode 100644 index 30e45fab0f7..00000000000 --- a/ui/wasm/vendor/github.com/docker/distribution/registry/client/transport/transport.go +++ /dev/null @@ -1,147 +0,0 @@ -package transport - -import ( - "io" - "net/http" - "sync" -) - -// RequestModifier represents an object which will do an inplace -// modification of an HTTP request. -type RequestModifier interface { - ModifyRequest(*http.Request) error -} - -type headerModifier http.Header - -// NewHeaderRequestModifier returns a new RequestModifier which will -// add the given headers to a request. -func NewHeaderRequestModifier(header http.Header) RequestModifier { - return headerModifier(header) -} - -func (h headerModifier) ModifyRequest(req *http.Request) error { - for k, s := range http.Header(h) { - req.Header[k] = append(req.Header[k], s...) - } - - return nil -} - -// NewTransport creates a new transport which will apply modifiers to -// the request on a RoundTrip call. -func NewTransport(base http.RoundTripper, modifiers ...RequestModifier) http.RoundTripper { - return &transport{ - Modifiers: modifiers, - Base: base, - } -} - -// transport is an http.RoundTripper that makes HTTP requests after -// copying and modifying the request -type transport struct { - Modifiers []RequestModifier - Base http.RoundTripper - - mu sync.Mutex // guards modReq - modReq map[*http.Request]*http.Request // original -> modified -} - -// RoundTrip authorizes and authenticates the request with an -// access token. If no token exists or token is expired, -// tries to refresh/fetch a new token. -func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) { - req2 := cloneRequest(req) - for _, modifier := range t.Modifiers { - if err := modifier.ModifyRequest(req2); err != nil { - return nil, err - } - } - - t.setModReq(req, req2) - res, err := t.base().RoundTrip(req2) - if err != nil { - t.setModReq(req, nil) - return nil, err - } - res.Body = &onEOFReader{ - rc: res.Body, - fn: func() { t.setModReq(req, nil) }, - } - return res, nil -} - -// CancelRequest cancels an in-flight request by closing its connection. -func (t *transport) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := t.base().(canceler); ok { - t.mu.Lock() - modReq := t.modReq[req] - delete(t.modReq, req) - t.mu.Unlock() - cr.CancelRequest(modReq) - } -} - -func (t *transport) base() http.RoundTripper { - if t.Base != nil { - return t.Base - } - return http.DefaultTransport -} - -func (t *transport) setModReq(orig, mod *http.Request) { - t.mu.Lock() - defer t.mu.Unlock() - if t.modReq == nil { - t.modReq = make(map[*http.Request]*http.Request) - } - if mod == nil { - delete(t.modReq, orig) - } else { - t.modReq[orig] = mod - } -} - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header, len(r.Header)) - for k, s := range r.Header { - r2.Header[k] = append([]string(nil), s...) - } - - return r2 -} - -type onEOFReader struct { - rc io.ReadCloser - fn func() -} - -func (r *onEOFReader) Read(p []byte) (n int, err error) { - n, err = r.rc.Read(p) - if err == io.EOF { - r.runFunc() - } - return -} - -func (r *onEOFReader) Close() error { - err := r.rc.Close() - r.runFunc() - return err -} - -func (r *onEOFReader) runFunc() { - if fn := r.fn; fn != nil { - fn() - r.fn = nil - } -} diff --git a/ui/wasm/vendor/github.com/docker/distribution/registry/storage/cache/cache.go b/ui/wasm/vendor/github.com/docker/distribution/registry/storage/cache/cache.go deleted file mode 100644 index 10a3909197c..00000000000 --- a/ui/wasm/vendor/github.com/docker/distribution/registry/storage/cache/cache.go +++ /dev/null @@ -1,35 +0,0 @@ -// Package cache provides facilities to speed up access to the storage -// backend. -package cache - -import ( - "fmt" - - "github.com/docker/distribution" -) - -// BlobDescriptorCacheProvider provides repository scoped -// BlobDescriptorService cache instances and a global descriptor cache. -type BlobDescriptorCacheProvider interface { - distribution.BlobDescriptorService - - RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) -} - -// ValidateDescriptor provides a helper function to ensure that caches have -// common criteria for admitting descriptors. -func ValidateDescriptor(desc distribution.Descriptor) error { - if err := desc.Digest.Validate(); err != nil { - return err - } - - if desc.Size < 0 { - return fmt.Errorf("cache: invalid length in descriptor: %v < 0", desc.Size) - } - - if desc.MediaType == "" { - return fmt.Errorf("cache: empty mediatype on descriptor: %v", desc) - } - - return nil -} diff --git a/ui/wasm/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go b/ui/wasm/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go deleted file mode 100644 index ac4c452117d..00000000000 --- a/ui/wasm/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go +++ /dev/null @@ -1,129 +0,0 @@ -package cache - -import ( - "context" - - "github.com/docker/distribution" - prometheus "github.com/docker/distribution/metrics" - "github.com/opencontainers/go-digest" -) - -// Metrics is used to hold metric counters -// related to the number of times a cache was -// hit or missed. -type Metrics struct { - Requests uint64 - Hits uint64 - Misses uint64 -} - -// Logger can be provided on the MetricsTracker to log errors. -// -// Usually, this is just a proxy to dcontext.GetLogger. -type Logger interface { - Errorf(format string, args ...interface{}) -} - -// MetricsTracker represents a metric tracker -// which simply counts the number of hits and misses. -type MetricsTracker interface { - Hit() - Miss() - Metrics() Metrics - Logger(context.Context) Logger -} - -type cachedBlobStatter struct { - cache distribution.BlobDescriptorService - backend distribution.BlobDescriptorService - tracker MetricsTracker -} - -var ( - // cacheCount is the number of total cache request received/hits/misses - cacheCount = prometheus.StorageNamespace.NewLabeledCounter("cache", "The number of cache request received", "type") -) - -// NewCachedBlobStatter creates a new statter which prefers a cache and -// falls back to a backend. -func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService) distribution.BlobDescriptorService { - return &cachedBlobStatter{ - cache: cache, - backend: backend, - } -} - -// NewCachedBlobStatterWithMetrics creates a new statter which prefers a cache and -// falls back to a backend. Hits and misses will send to the tracker. -func NewCachedBlobStatterWithMetrics(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService, tracker MetricsTracker) distribution.BlobStatter { - return &cachedBlobStatter{ - cache: cache, - backend: backend, - tracker: tracker, - } -} - -func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - cacheCount.WithValues("Request").Inc(1) - desc, err := cbds.cache.Stat(ctx, dgst) - if err != nil { - if err != distribution.ErrBlobUnknown { - logErrorf(ctx, cbds.tracker, "error retrieving descriptor from cache: %v", err) - } - - goto fallback - } - cacheCount.WithValues("Hit").Inc(1) - if cbds.tracker != nil { - cbds.tracker.Hit() - } - return desc, nil -fallback: - cacheCount.WithValues("Miss").Inc(1) - if cbds.tracker != nil { - cbds.tracker.Miss() - } - desc, err = cbds.backend.Stat(ctx, dgst) - if err != nil { - return desc, err - } - - if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { - logErrorf(ctx, cbds.tracker, "error adding descriptor %v to cache: %v", desc.Digest, err) - } - - return desc, err - -} - -func (cbds *cachedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) error { - err := cbds.cache.Clear(ctx, dgst) - if err != nil { - return err - } - - err = cbds.backend.Clear(ctx, dgst) - if err != nil { - return err - } - return nil -} - -func (cbds *cachedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { - logErrorf(ctx, cbds.tracker, "error adding descriptor %v to cache: %v", desc.Digest, err) - } - return nil -} - -func logErrorf(ctx context.Context, tracker MetricsTracker, format string, args ...interface{}) { - if tracker == nil { - return - } - - logger := tracker.Logger(ctx) - if logger == nil { - return - } - logger.Errorf(format, args...) -} diff --git a/ui/wasm/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go b/ui/wasm/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go deleted file mode 100644 index 42d94d9bde6..00000000000 --- a/ui/wasm/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go +++ /dev/null @@ -1,179 +0,0 @@ -package memory - -import ( - "context" - "sync" - - "github.com/docker/distribution" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/storage/cache" - "github.com/opencontainers/go-digest" -) - -type inMemoryBlobDescriptorCacheProvider struct { - global *mapBlobDescriptorCache - repositories map[string]*mapBlobDescriptorCache - mu sync.RWMutex -} - -// NewInMemoryBlobDescriptorCacheProvider returns a new mapped-based cache for -// storing blob descriptor data. -func NewInMemoryBlobDescriptorCacheProvider() cache.BlobDescriptorCacheProvider { - return &inMemoryBlobDescriptorCacheProvider{ - global: newMapBlobDescriptorCache(), - repositories: make(map[string]*mapBlobDescriptorCache), - } -} - -func (imbdcp *inMemoryBlobDescriptorCacheProvider) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) { - if _, err := reference.ParseNormalizedNamed(repo); err != nil { - return nil, err - } - - imbdcp.mu.RLock() - defer imbdcp.mu.RUnlock() - - return &repositoryScopedInMemoryBlobDescriptorCache{ - repo: repo, - parent: imbdcp, - repository: imbdcp.repositories[repo], - }, nil -} - -func (imbdcp *inMemoryBlobDescriptorCacheProvider) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - return imbdcp.global.Stat(ctx, dgst) -} - -func (imbdcp *inMemoryBlobDescriptorCacheProvider) Clear(ctx context.Context, dgst digest.Digest) error { - return imbdcp.global.Clear(ctx, dgst) -} - -func (imbdcp *inMemoryBlobDescriptorCacheProvider) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - _, err := imbdcp.Stat(ctx, dgst) - if err == distribution.ErrBlobUnknown { - - if dgst.Algorithm() != desc.Digest.Algorithm() && dgst != desc.Digest { - // if the digests differ, set the other canonical mapping - if err := imbdcp.global.SetDescriptor(ctx, desc.Digest, desc); err != nil { - return err - } - } - - // unknown, just set it - return imbdcp.global.SetDescriptor(ctx, dgst, desc) - } - - // we already know it, do nothing - return err -} - -// repositoryScopedInMemoryBlobDescriptorCache provides the request scoped -// repository cache. Instances are not thread-safe but the delegated -// operations are. -type repositoryScopedInMemoryBlobDescriptorCache struct { - repo string - parent *inMemoryBlobDescriptorCacheProvider // allows lazy allocation of repo's map - repository *mapBlobDescriptorCache -} - -func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - rsimbdcp.parent.mu.Lock() - repo := rsimbdcp.repository - rsimbdcp.parent.mu.Unlock() - - if repo == nil { - return distribution.Descriptor{}, distribution.ErrBlobUnknown - } - - return repo.Stat(ctx, dgst) -} - -func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { - rsimbdcp.parent.mu.Lock() - repo := rsimbdcp.repository - rsimbdcp.parent.mu.Unlock() - - if repo == nil { - return distribution.ErrBlobUnknown - } - - return repo.Clear(ctx, dgst) -} - -func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - rsimbdcp.parent.mu.Lock() - repo := rsimbdcp.repository - if repo == nil { - // allocate map since we are setting it now. - var ok bool - // have to read back value since we may have allocated elsewhere. - repo, ok = rsimbdcp.parent.repositories[rsimbdcp.repo] - if !ok { - repo = newMapBlobDescriptorCache() - rsimbdcp.parent.repositories[rsimbdcp.repo] = repo - } - rsimbdcp.repository = repo - } - rsimbdcp.parent.mu.Unlock() - - if err := repo.SetDescriptor(ctx, dgst, desc); err != nil { - return err - } - - return rsimbdcp.parent.SetDescriptor(ctx, dgst, desc) -} - -// mapBlobDescriptorCache provides a simple map-based implementation of the -// descriptor cache. -type mapBlobDescriptorCache struct { - descriptors map[digest.Digest]distribution.Descriptor - mu sync.RWMutex -} - -var _ distribution.BlobDescriptorService = &mapBlobDescriptorCache{} - -func newMapBlobDescriptorCache() *mapBlobDescriptorCache { - return &mapBlobDescriptorCache{ - descriptors: make(map[digest.Digest]distribution.Descriptor), - } -} - -func (mbdc *mapBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - if err := dgst.Validate(); err != nil { - return distribution.Descriptor{}, err - } - - mbdc.mu.RLock() - defer mbdc.mu.RUnlock() - - desc, ok := mbdc.descriptors[dgst] - if !ok { - return distribution.Descriptor{}, distribution.ErrBlobUnknown - } - - return desc, nil -} - -func (mbdc *mapBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { - mbdc.mu.Lock() - defer mbdc.mu.Unlock() - - delete(mbdc.descriptors, dgst) - return nil -} - -func (mbdc *mapBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - if err := dgst.Validate(); err != nil { - return err - } - - if err := cache.ValidateDescriptor(desc); err != nil { - return err - } - - mbdc.mu.Lock() - defer mbdc.mu.Unlock() - - mbdc.descriptors[dgst] = desc - return nil -} diff --git a/ui/wasm/vendor/github.com/docker/distribution/tags.go b/ui/wasm/vendor/github.com/docker/distribution/tags.go deleted file mode 100644 index f22df2b850e..00000000000 --- a/ui/wasm/vendor/github.com/docker/distribution/tags.go +++ /dev/null @@ -1,27 +0,0 @@ -package distribution - -import ( - "context" -) - -// TagService provides access to information about tagged objects. -type TagService interface { - // Get retrieves the descriptor identified by the tag. Some - // implementations may differentiate between "trusted" tags and - // "untrusted" tags. If a tag is "untrusted", the mapping will be returned - // as an ErrTagUntrusted error, with the target descriptor. - Get(ctx context.Context, tag string) (Descriptor, error) - - // Tag associates the tag with the provided descriptor, updating the - // current association, if needed. - Tag(ctx context.Context, tag string, desc Descriptor) error - - // Untag removes the given tag association - Untag(ctx context.Context, tag string) error - - // All returns the set of tags managed by this tag service - All(ctx context.Context) ([]string, error) - - // Lookup returns the set of tags referencing the given digest. - Lookup(ctx context.Context, digest Descriptor) ([]string, error) -} diff --git a/ui/wasm/vendor/github.com/docker/distribution/vendor.conf b/ui/wasm/vendor/github.com/docker/distribution/vendor.conf deleted file mode 100644 index a249caf269d..00000000000 --- a/ui/wasm/vendor/github.com/docker/distribution/vendor.conf +++ /dev/null @@ -1,51 +0,0 @@ -github.com/Azure/azure-sdk-for-go 4650843026a7fdec254a8d9cf893693a254edd0b -github.com/Azure/go-autorest eaa7994b2278094c904d31993d26f56324db3052 -github.com/sirupsen/logrus 3d4380f53a34dcdc95f0c1db702615992b38d9a4 -github.com/aws/aws-sdk-go f831d5a0822a1ad72420ab18c6269bca1ddaf490 -github.com/bshuster-repo/logrus-logstash-hook d2c0ecc1836d91814e15e23bb5dc309c3ef51f4a -github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 -github.com/bugsnag/bugsnag-go b1d153021fcd90ca3f080db36bec96dc690fb274 -github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702 -github.com/bugsnag/panicwrap e2c28503fcd0675329da73bf48b33404db873782 -github.com/denverdino/aliyungo afedced274aa9a7fcdd47ac97018f0f8db4e5de2 -github.com/dgrijalva/jwt-go a601269ab70c205d26370c16f7c81e9017c14e04 -github.com/docker/go-metrics 399ea8c73916000c64c2c76e8da00ca82f8387ab -github.com/docker/libtrust fa567046d9b14f6aa788882a950d69651d230b21 -github.com/garyburd/redigo 535138d7bcd717d6531c701ef5933d98b1866257 -github.com/go-ini/ini 2ba15ac2dc9cdf88c110ec2dc0ced7fa45f5678c -github.com/golang/protobuf 8d92cf5fc15a4382f8964b08e1f42a75c0591aa3 -github.com/gorilla/handlers 60c7bfde3e33c201519a200a4507a158cc03a17b -github.com/gorilla/mux 599cba5e7b6137d46ddf58fb1765f5d928e69604 -github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 -github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d -github.com/marstr/guid 8bd9a64bf37eb297b492a4101fb28e80ac0b290f -github.com/satori/go.uuid f58768cc1a7a7e77a3bd49e98cdd21419399b6a3 -github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c -github.com/miekg/dns 271c58e0c14f552178ea321a545ff9af38930f39 -github.com/mitchellh/mapstructure 482a9fd5fa83e8c4e7817413b80f3eb8feec03ef -github.com/ncw/swift a0320860b16212c2b59b4912bb6508cda1d7cee6 -github.com/prometheus/client_golang c332b6f63c0658a65eca15c0e5247ded801cf564 -github.com/prometheus/client_model 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c -github.com/prometheus/common 89604d197083d4781071d3c65855d24ecfb0a563 -github.com/prometheus/procfs cb4147076ac75738c9a7d279075a253c0cc5acbd -github.com/Shopify/logrus-bugsnag 577dee27f20dd8f1a529f82210094af593be12bd -github.com/spf13/cobra 312092086bed4968099259622145a0c9ae280064 -github.com/spf13/pflag 5644820622454e71517561946e3d94b9f9db6842 -github.com/xenolf/lego a9d8cec0e6563575e5868a005359ac97911b5985 -github.com/yvasiyarov/go-metrics 57bccd1ccd43f94bb17fdd8bf3007059b802f85e -github.com/yvasiyarov/gorelic a9bba5b9ab508a086f9a12b8c51fab68478e2128 -github.com/yvasiyarov/newrelic_platform_go b21fdbd4370f3717f3bbd2bf41c223bc273068e6 -golang.org/x/crypto c10c31b5e94b6f7a0283272dc2bb27163dcea24b -golang.org/x/net 4876518f9e71663000c348837735820161a42df7 -golang.org/x/oauth2 045497edb6234273d67dbc25da3f2ddbc4c4cacf -golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb -google.golang.org/api 9bf6e6e569ff057f75d9604a46c52928f17d2b54 -google.golang.org/appengine 12d5545dc1cfa6047a286d5e853841b6471f4c19 -google.golang.org/cloud 975617b05ea8a58727e6c1a06b6161ff4185a9f2 -google.golang.org/grpc d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994 -gopkg.in/check.v1 64131543e7896d5bcc6bd5a76287eb75ea96c673 -gopkg.in/square/go-jose.v1 40d457b439244b546f023d056628e5184136899b -gopkg.in/yaml.v2 v2.2.1 -rsc.io/letsencrypt e770c10b0f1a64775ae91d240407ce00d1a5bdeb https://github.com/dmcgowan/letsencrypt.git -github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb -github.com/opencontainers/image-spec ab7389ef9f50030c9b245bc16b981c7ddf192882 diff --git a/ui/wasm/vendor/github.com/docker/docker-credential-helpers/LICENSE b/ui/wasm/vendor/github.com/docker/docker-credential-helpers/LICENSE deleted file mode 100644 index 1ea555e2af0..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker-credential-helpers/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2016 David Calavera - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/ui/wasm/vendor/github.com/docker/docker-credential-helpers/client/client.go b/ui/wasm/vendor/github.com/docker/docker-credential-helpers/client/client.go deleted file mode 100644 index d1d0434cb55..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker-credential-helpers/client/client.go +++ /dev/null @@ -1,121 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "fmt" - "strings" - - "github.com/docker/docker-credential-helpers/credentials" -) - -// isValidCredsMessage checks if 'msg' contains invalid credentials error message. -// It returns whether the logs are free of invalid credentials errors and the error if it isn't. -// error values can be errCredentialsMissingServerURL or errCredentialsMissingUsername. -func isValidCredsMessage(msg string) error { - if credentials.IsCredentialsMissingServerURLMessage(msg) { - return credentials.NewErrCredentialsMissingServerURL() - } - - if credentials.IsCredentialsMissingUsernameMessage(msg) { - return credentials.NewErrCredentialsMissingUsername() - } - - return nil -} - -// Store uses an external program to save credentials. -func Store(program ProgramFunc, creds *credentials.Credentials) error { - cmd := program("store") - - buffer := new(bytes.Buffer) - if err := json.NewEncoder(buffer).Encode(creds); err != nil { - return err - } - cmd.Input(buffer) - - out, err := cmd.Output() - if err != nil { - t := strings.TrimSpace(string(out)) - - if isValidErr := isValidCredsMessage(t); isValidErr != nil { - err = isValidErr - } - - return fmt.Errorf("error storing credentials - err: %v, out: `%s`", err, t) - } - - return nil -} - -// Get executes an external program to get the credentials from a native store. -func Get(program ProgramFunc, serverURL string) (*credentials.Credentials, error) { - cmd := program("get") - cmd.Input(strings.NewReader(serverURL)) - - out, err := cmd.Output() - if err != nil { - t := strings.TrimSpace(string(out)) - - if credentials.IsErrCredentialsNotFoundMessage(t) { - return nil, credentials.NewErrCredentialsNotFound() - } - - if isValidErr := isValidCredsMessage(t); isValidErr != nil { - err = isValidErr - } - - return nil, fmt.Errorf("error getting credentials - err: %v, out: `%s`", err, t) - } - - resp := &credentials.Credentials{ - ServerURL: serverURL, - } - - if err := json.NewDecoder(bytes.NewReader(out)).Decode(resp); err != nil { - return nil, err - } - - return resp, nil -} - -// Erase executes a program to remove the server credentials from the native store. -func Erase(program ProgramFunc, serverURL string) error { - cmd := program("erase") - cmd.Input(strings.NewReader(serverURL)) - out, err := cmd.Output() - if err != nil { - t := strings.TrimSpace(string(out)) - - if isValidErr := isValidCredsMessage(t); isValidErr != nil { - err = isValidErr - } - - return fmt.Errorf("error erasing credentials - err: %v, out: `%s`", err, t) - } - - return nil -} - -// List executes a program to list server credentials in the native store. -func List(program ProgramFunc) (map[string]string, error) { - cmd := program("list") - cmd.Input(strings.NewReader("unused")) - out, err := cmd.Output() - if err != nil { - t := strings.TrimSpace(string(out)) - - if isValidErr := isValidCredsMessage(t); isValidErr != nil { - err = isValidErr - } - - return nil, fmt.Errorf("error listing credentials - err: %v, out: `%s`", err, t) - } - - var resp map[string]string - if err = json.NewDecoder(bytes.NewReader(out)).Decode(&resp); err != nil { - return nil, err - } - - return resp, nil -} diff --git a/ui/wasm/vendor/github.com/docker/docker-credential-helpers/client/command.go b/ui/wasm/vendor/github.com/docker/docker-credential-helpers/client/command.go deleted file mode 100644 index 0183c063939..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker-credential-helpers/client/command.go +++ /dev/null @@ -1,57 +0,0 @@ -package client - -import ( - "fmt" - "io" - "os" - - exec "golang.org/x/sys/execabs" -) - -// Program is an interface to execute external programs. -type Program interface { - Output() ([]byte, error) - Input(in io.Reader) -} - -// ProgramFunc is a type of function that initializes programs based on arguments. -type ProgramFunc func(args ...string) Program - -// NewShellProgramFunc creates programs that are executed in a Shell. -func NewShellProgramFunc(name string) ProgramFunc { - return NewShellProgramFuncWithEnv(name, nil) -} - -// NewShellProgramFuncWithEnv creates programs that are executed in a Shell with environment variables -func NewShellProgramFuncWithEnv(name string, env *map[string]string) ProgramFunc { - return func(args ...string) Program { - return &Shell{cmd: createProgramCmdRedirectErr(name, args, env)} - } -} - -func createProgramCmdRedirectErr(commandName string, args []string, env *map[string]string) *exec.Cmd { - programCmd := exec.Command(commandName, args...) - programCmd.Env = os.Environ() - if env != nil { - for k, v := range *env { - programCmd.Env = append(programCmd.Env, fmt.Sprintf("%s=%s", k, v)) - } - } - programCmd.Stderr = os.Stderr - return programCmd -} - -// Shell invokes shell commands to talk with a remote credentials helper. -type Shell struct { - cmd *exec.Cmd -} - -// Output returns responses from the remote credentials helper. -func (s *Shell) Output() ([]byte, error) { - return s.cmd.Output() -} - -// Input sets the input to send to a remote credentials helper. -func (s *Shell) Input(in io.Reader) { - s.cmd.Stdin = in -} diff --git a/ui/wasm/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go b/ui/wasm/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go deleted file mode 100644 index da8b594e7f8..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go +++ /dev/null @@ -1,186 +0,0 @@ -package credentials - -import ( - "bufio" - "bytes" - "encoding/json" - "fmt" - "io" - "os" - "strings" -) - -// Credentials holds the information shared between docker and the credentials store. -type Credentials struct { - ServerURL string - Username string - Secret string -} - -// isValid checks the integrity of Credentials object such that no credentials lack -// a server URL or a username. -// It returns whether the credentials are valid and the error if it isn't. -// error values can be errCredentialsMissingServerURL or errCredentialsMissingUsername -func (c *Credentials) isValid() (bool, error) { - if len(c.ServerURL) == 0 { - return false, NewErrCredentialsMissingServerURL() - } - - if len(c.Username) == 0 { - return false, NewErrCredentialsMissingUsername() - } - - return true, nil -} - -// CredsLabel holds the way Docker credentials should be labeled as such in credentials stores that allow labelling. -// That label allows to filter out non-Docker credentials too at lookup/search in macOS keychain, -// Windows credentials manager and Linux libsecret. Default value is "Docker Credentials" -var CredsLabel = "Docker Credentials" - -// SetCredsLabel is a simple setter for CredsLabel -func SetCredsLabel(label string) { - CredsLabel = label -} - -// Serve initializes the credentials helper and parses the action argument. -// This function is designed to be called from a command line interface. -// It uses os.Args[1] as the key for the action. -// It uses os.Stdin as input and os.Stdout as output. -// This function terminates the program with os.Exit(1) if there is an error. -func Serve(helper Helper) { - var err error - if len(os.Args) != 2 { - err = fmt.Errorf("Usage: %s ", os.Args[0]) - } - - if err == nil { - err = HandleCommand(helper, os.Args[1], os.Stdin, os.Stdout) - } - - if err != nil { - fmt.Fprintf(os.Stdout, "%v\n", err) - os.Exit(1) - } -} - -// HandleCommand uses a helper and a key to run a credential action. -func HandleCommand(helper Helper, key string, in io.Reader, out io.Writer) error { - switch key { - case "store": - return Store(helper, in) - case "get": - return Get(helper, in, out) - case "erase": - return Erase(helper, in) - case "list": - return List(helper, out) - case "version": - return PrintVersion(out) - } - return fmt.Errorf("Unknown credential action `%s`", key) -} - -// Store uses a helper and an input reader to save credentials. -// The reader must contain the JSON serialization of a Credentials struct. -func Store(helper Helper, reader io.Reader) error { - scanner := bufio.NewScanner(reader) - - buffer := new(bytes.Buffer) - for scanner.Scan() { - buffer.Write(scanner.Bytes()) - } - - if err := scanner.Err(); err != nil && err != io.EOF { - return err - } - - var creds Credentials - if err := json.NewDecoder(buffer).Decode(&creds); err != nil { - return err - } - - if ok, err := creds.isValid(); !ok { - return err - } - - return helper.Add(&creds) -} - -// Get retrieves the credentials for a given server url. -// The reader must contain the server URL to search. -// The writer is used to write the JSON serialization of the credentials. -func Get(helper Helper, reader io.Reader, writer io.Writer) error { - scanner := bufio.NewScanner(reader) - - buffer := new(bytes.Buffer) - for scanner.Scan() { - buffer.Write(scanner.Bytes()) - } - - if err := scanner.Err(); err != nil && err != io.EOF { - return err - } - - serverURL := strings.TrimSpace(buffer.String()) - if len(serverURL) == 0 { - return NewErrCredentialsMissingServerURL() - } - - username, secret, err := helper.Get(serverURL) - if err != nil { - return err - } - - resp := Credentials{ - ServerURL: serverURL, - Username: username, - Secret: secret, - } - - buffer.Reset() - if err := json.NewEncoder(buffer).Encode(resp); err != nil { - return err - } - - fmt.Fprint(writer, buffer.String()) - return nil -} - -// Erase removes credentials from the store. -// The reader must contain the server URL to remove. -func Erase(helper Helper, reader io.Reader) error { - scanner := bufio.NewScanner(reader) - - buffer := new(bytes.Buffer) - for scanner.Scan() { - buffer.Write(scanner.Bytes()) - } - - if err := scanner.Err(); err != nil && err != io.EOF { - return err - } - - serverURL := strings.TrimSpace(buffer.String()) - if len(serverURL) == 0 { - return NewErrCredentialsMissingServerURL() - } - - return helper.Delete(serverURL) -} - -//List returns all the serverURLs of keys in -//the OS store as a list of strings -func List(helper Helper, writer io.Writer) error { - accts, err := helper.List() - if err != nil { - return err - } - return json.NewEncoder(writer).Encode(accts) -} - -//PrintVersion outputs the current version. -func PrintVersion(writer io.Writer) error { - fmt.Fprintln(writer, Version) - return nil -} diff --git a/ui/wasm/vendor/github.com/docker/docker-credential-helpers/credentials/error.go b/ui/wasm/vendor/github.com/docker/docker-credential-helpers/credentials/error.go deleted file mode 100644 index fe6a5aef45c..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker-credential-helpers/credentials/error.go +++ /dev/null @@ -1,102 +0,0 @@ -package credentials - -const ( - // ErrCredentialsNotFound standardizes the not found error, so every helper returns - // the same message and docker can handle it properly. - errCredentialsNotFoundMessage = "credentials not found in native keychain" - - // ErrCredentialsMissingServerURL and ErrCredentialsMissingUsername standardize - // invalid credentials or credentials management operations - errCredentialsMissingServerURLMessage = "no credentials server URL" - errCredentialsMissingUsernameMessage = "no credentials username" -) - -// errCredentialsNotFound represents an error -// raised when credentials are not in the store. -type errCredentialsNotFound struct{} - -// Error returns the standard error message -// for when the credentials are not in the store. -func (errCredentialsNotFound) Error() string { - return errCredentialsNotFoundMessage -} - -// NewErrCredentialsNotFound creates a new error -// for when the credentials are not in the store. -func NewErrCredentialsNotFound() error { - return errCredentialsNotFound{} -} - -// IsErrCredentialsNotFound returns true if the error -// was caused by not having a set of credentials in a store. -func IsErrCredentialsNotFound(err error) bool { - _, ok := err.(errCredentialsNotFound) - return ok -} - -// IsErrCredentialsNotFoundMessage returns true if the error -// was caused by not having a set of credentials in a store. -// -// This function helps to check messages returned by an -// external program via its standard output. -func IsErrCredentialsNotFoundMessage(err string) bool { - return err == errCredentialsNotFoundMessage -} - -// errCredentialsMissingServerURL represents an error raised -// when the credentials object has no server URL or when no -// server URL is provided to a credentials operation requiring -// one. -type errCredentialsMissingServerURL struct{} - -func (errCredentialsMissingServerURL) Error() string { - return errCredentialsMissingServerURLMessage -} - -// errCredentialsMissingUsername represents an error raised -// when the credentials object has no username or when no -// username is provided to a credentials operation requiring -// one. -type errCredentialsMissingUsername struct{} - -func (errCredentialsMissingUsername) Error() string { - return errCredentialsMissingUsernameMessage -} - -// NewErrCredentialsMissingServerURL creates a new error for -// errCredentialsMissingServerURL. -func NewErrCredentialsMissingServerURL() error { - return errCredentialsMissingServerURL{} -} - -// NewErrCredentialsMissingUsername creates a new error for -// errCredentialsMissingUsername. -func NewErrCredentialsMissingUsername() error { - return errCredentialsMissingUsername{} -} - -// IsCredentialsMissingServerURL returns true if the error -// was an errCredentialsMissingServerURL. -func IsCredentialsMissingServerURL(err error) bool { - _, ok := err.(errCredentialsMissingServerURL) - return ok -} - -// IsCredentialsMissingServerURLMessage checks for an -// errCredentialsMissingServerURL in the error message. -func IsCredentialsMissingServerURLMessage(err string) bool { - return err == errCredentialsMissingServerURLMessage -} - -// IsCredentialsMissingUsername returns true if the error -// was an errCredentialsMissingUsername. -func IsCredentialsMissingUsername(err error) bool { - _, ok := err.(errCredentialsMissingUsername) - return ok -} - -// IsCredentialsMissingUsernameMessage checks for an -// errCredentialsMissingUsername in the error message. -func IsCredentialsMissingUsernameMessage(err string) bool { - return err == errCredentialsMissingUsernameMessage -} diff --git a/ui/wasm/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go b/ui/wasm/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go deleted file mode 100644 index 135acd254d7..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go +++ /dev/null @@ -1,14 +0,0 @@ -package credentials - -// Helper is the interface a credentials store helper must implement. -type Helper interface { - // Add appends credentials to the store. - Add(*Credentials) error - // Delete removes credentials from the store. - Delete(serverURL string) error - // Get retrieves credentials from the store. - // It returns username and secret as strings. - Get(serverURL string) (string, string, error) - // List returns the stored serverURLs and their associated usernames. - List() (map[string]string, error) -} diff --git a/ui/wasm/vendor/github.com/docker/docker-credential-helpers/credentials/version.go b/ui/wasm/vendor/github.com/docker/docker-credential-helpers/credentials/version.go deleted file mode 100644 index 185e367961a..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker-credential-helpers/credentials/version.go +++ /dev/null @@ -1,4 +0,0 @@ -package credentials - -// Version holds a string describing the current version -const Version = "0.6.4" diff --git a/ui/wasm/vendor/github.com/docker/docker/AUTHORS b/ui/wasm/vendor/github.com/docker/docker/AUTHORS deleted file mode 100644 index dffacff1120..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/AUTHORS +++ /dev/null @@ -1,2175 +0,0 @@ -# This file lists all individuals having contributed content to the repository. -# For how it is generated, see `hack/generate-authors.sh`. - -Aanand Prasad -Aaron Davidson -Aaron Feng -Aaron Hnatiw -Aaron Huslage -Aaron L. Xu -Aaron Lehmann -Aaron Welch -Aaron.L.Xu -Abel Muiño -Abhijeet Kasurde -Abhinandan Prativadi -Abhinav Ajgaonkar -Abhishek Chanda -Abhishek Sharma -Abin Shahab -Adam Avilla -Adam Dobrawy -Adam Eijdenberg -Adam Kunk -Adam Miller -Adam Mills -Adam Pointer -Adam Singer -Adam Walz -Addam Hardy -Aditi Rajagopal -Aditya -Adnan Khan -Adolfo Ochagavía -Adria Casas -Adrian Moisey -Adrian Mouat -Adrian Oprea -Adrien Folie -Adrien Gallouët -Ahmed Kamal -Ahmet Alp Balkan -Aidan Feldman -Aidan Hobson Sayers -AJ Bowen -Ajey Charantimath -ajneu -Akash Gupta -Akhil Mohan -Akihiro Matsushima -Akihiro Suda -Akim Demaille -Akira Koyasu -Akshay Karle -Al Tobey -alambike -Alan Hoyle -Alan Scherger -Alan Thompson -Albert Callarisa -Albert Zhang -Albin Kerouanton -Alejandro González Hevia -Aleksa Sarai -Aleksandrs Fadins -Alena Prokharchyk -Alessandro Boch -Alessio Biancalana -Alex Chan -Alex Chen -Alex Coventry -Alex Crawford -Alex Ellis -Alex Gaynor -Alex Goodman -Alex Olshansky -Alex Samorukov -Alex Warhawk -Alexander Artemenko -Alexander Boyd -Alexander Larsson -Alexander Midlash -Alexander Morozov -Alexander Shopov -Alexandre Beslic -Alexandre Garnier -Alexandre González -Alexandre Jomin -Alexandru Sfirlogea -Alexei Margasov -Alexey Guskov -Alexey Kotlyarov -Alexey Shamrin -Alexis THOMAS -Alfred Landrum -Ali Dehghani -Alicia Lauerman -Alihan Demir -Allen Madsen -Allen Sun -almoehi -Alvaro Saurin -Alvin Deng -Alvin Richards -amangoel -Amen Belayneh -Amir Goldstein -Amit Bakshi -Amit Krishnan -Amit Shukla -Amr Gawish -Amy Lindburg -Anand Patil -AnandkumarPatel -Anatoly Borodin -Anca Iordache -Anchal Agrawal -Anda Xu -Anders Janmyr -Andre Dublin <81dublin@gmail.com> -Andre Granovsky -Andrea Denisse Gómez -Andrea Luzzardi -Andrea Turli -Andreas Elvers -Andreas Köhler -Andreas Savvides -Andreas Tiefenthaler -Andrei Gherzan -Andrei Vagin -Andrew C. Bodine -Andrew Clay Shafer -Andrew Duckworth -Andrew France -Andrew Gerrand -Andrew Guenther -Andrew He -Andrew Hsu -Andrew Kuklewicz -Andrew Macgregor -Andrew Macpherson -Andrew Martin -Andrew McDonnell -Andrew Munsell -Andrew Pennebaker -Andrew Po -Andrew Weiss -Andrew Williams -Andrews Medina -Andrey Kolomentsev -Andrey Petrov -Andrey Stolbovsky -André Martins -andy -Andy Chambers -andy diller -Andy Goldstein -Andy Kipp -Andy Rothfusz -Andy Smith -Andy Wilson -Anes Hasicic -Anil Belur -Anil Madhavapeddy -Ankit Jain -Ankush Agarwal -Anonmily -Anran Qiao -Anshul Pundir -Anthon van der Neut -Anthony Baire -Anthony Bishopric -Anthony Dahanne -Anthony Sottile -Anton Löfgren -Anton Nikitin -Anton Polonskiy -Anton Tiurin -Antonio Murdaca -Antonis Kalipetis -Antony Messerli -Anuj Bahuguna -Anusha Ragunathan -apocas -Arash Deshmeh -ArikaChen -Arko Dasgupta -Arnaud Lefebvre -Arnaud Porterie -Arnaud Rebillout -Arthur Barr -Arthur Gautier -Artur Meyster -Arun Gupta -Asad Saeeduddin -Asbjørn Enge -averagehuman -Avi Das -Avi Kivity -Avi Miller -Avi Vaid -ayoshitake -Azat Khuyiyakhmetov -Bardia Keyoumarsi -Barnaby Gray -Barry Allard -Bartłomiej Piotrowski -Bastiaan Bakker -bdevloed -Ben Bonnefoy -Ben Firshman -Ben Golub -Ben Gould -Ben Hall -Ben Sargent -Ben Severson -Ben Toews -Ben Wiklund -Benjamin Atkin -Benjamin Baker -Benjamin Boudreau -Benjamin Yolken -Benny Ng -Benoit Chesneau -Bernerd Schaefer -Bernhard M. Wiedemann -Bert Goethals -Bertrand Roussel -Bevisy Zhang -Bharath Thiruveedula -Bhiraj Butala -Bhumika Bayani -Bilal Amarni -Bill Wang -Bily Zhang -Bin Liu -Bingshen Wang -Blake Geno -Boaz Shuster -bobby abbott -Boqin Qin -Boris Pruessmann -Boshi Lian -Bouke Haarsma -Boyd Hemphill -boynux -Bradley Cicenas -Bradley Wright -Brandon Liu -Brandon Philips -Brandon Rhodes -Brendan Dixon -Brent Salisbury -Brett Higgins -Brett Kochendorfer -Brett Randall -Brian (bex) Exelbierd -Brian Bland -Brian DeHamer -Brian Dorsey -Brian Flad -Brian Goff -Brian McCallister -Brian Olsen -Brian Schwind -Brian Shumate -Brian Torres-Gil -Brian Trump -Brice Jaglin -Briehan Lombaard -Brielle Broder -Bruno Bigras -Bruno Binet -Bruno Gazzera -Bruno Renié -Bruno Tavares -Bryan Bess -Bryan Boreham -Bryan Matsuo -Bryan Murphy -Burke Libbey -Byung Kang -Caleb Spare -Calen Pennington -Cameron Boehmer -Cameron Spear -Campbell Allen -Candid Dauth -Cao Weiwei -Carl Henrik Lunde -Carl Loa Odin -Carl X. Su -Carlo Mion -Carlos Alexandro Becker -Carlos de Paula -Carlos Sanchez -Carol Fager-Higgins -Cary -Casey Bisson -Catalin Pirvu -Ce Gao -Cedric Davies -Cezar Sa Espinola -Chad Swenson -Chance Zibolski -Chander Govindarajan -Chanhun Jeong -Chao Wang -Charles Chan -Charles Hooper -Charles Law -Charles Lindsay -Charles Merriam -Charles Sarrazin -Charles Smith -Charlie Drage -Charlie Lewis -Chase Bolt -ChaYoung You -Chen Chao -Chen Chuanliang -Chen Hanxiao -Chen Min -Chen Mingjie -Chen Qiu -Cheng-mean Liu -Chengfei Shang -Chengguang Xu -chenyuzhu -Chetan Birajdar -Chewey -Chia-liang Kao -chli -Cholerae Hu -Chris Alfonso -Chris Armstrong -Chris Dias -Chris Dituri -Chris Fordham -Chris Gavin -Chris Gibson -Chris Khoo -Chris McKinnel -Chris McKinnel -Chris Price -Chris Seto -Chris Snow -Chris St. Pierre -Chris Stivers -Chris Swan -Chris Telfer -Chris Wahl -Chris Weyl -Chris White -Christian Berendt -Christian Brauner -Christian Böhme -Christian Muehlhaeuser -Christian Persson -Christian Rotzoll -Christian Simon -Christian Stefanescu -Christophe Mehay -Christophe Troestler -Christophe Vidal -Christopher Biscardi -Christopher Crone -Christopher Currie -Christopher Jones -Christopher Latham -Christopher Rigor -Christy Norman -Chun Chen -Ciro S. Costa -Clayton Coleman -Clinton Kitson -Cody Roseborough -Coenraad Loubser -Colin Dunklau -Colin Hebert -Colin Panisset -Colin Rice -Colin Walters -Collin Guarino -Colm Hally -companycy -Corbin Coleman -Corey Farrell -Cory Forsyth -cressie176 -CrimsonGlory -Cristian Ariza -Cristian Staretu -cristiano balducci -Cristina Yenyxe Gonzalez Garcia -Cruceru Calin-Cristian -CUI Wei -Cyprian Gracz -Cyril F -Daan van Berkel -Daehyeok Mun -Dafydd Crosby -dalanlan -Damian Smyth -Damien Nadé -Damien Nozay -Damjan Georgievski -Dan Anolik -Dan Buch -Dan Cotora -Dan Feldman -Dan Griffin -Dan Hirsch -Dan Keder -Dan Levy -Dan McPherson -Dan Stine -Dan Williams -Dani Hodovic -Dani Louca -Daniel Antlinger -Daniel Black -Daniel Dao -Daniel Exner -Daniel Farrell -Daniel Garcia -Daniel Gasienica -Daniel Grunwell -Daniel Helfand -Daniel Hiltgen -Daniel J Walsh -Daniel Menet -Daniel Mizyrycki -Daniel Nephin -Daniel Norberg -Daniel Nordberg -Daniel Robinson -Daniel S -Daniel Sweet -Daniel Von Fange -Daniel Watkins -Daniel X Moore -Daniel YC Lin -Daniel Zhang -Danny Berger -Danny Milosavljevic -Danny Yates -Danyal Khaliq -Darren Coxall -Darren Shepherd -Darren Stahl -Dattatraya Kumbhar -Davanum Srinivas -Dave Barboza -Dave Goodchild -Dave Henderson -Dave MacDonald -Dave Tucker -David Anderson -David Calavera -David Chung -David Corking -David Cramer -David Currie -David Davis -David Dooling -David Gageot -David Gebler -David Glasser -David Lawrence -David Lechner -David M. Karr -David Mackey -David Mat -David Mcanulty -David McKay -David P Hilton -David Pelaez -David R. Jenni -David Röthlisberger -David Sheets -David Sissitka -David Trott -David Wang <00107082@163.com> -David Williamson -David Xia -David Young -Davide Ceretti -Dawn Chen -dbdd -dcylabs -Debayan De -Deborah Gertrude Digges -deed02392 -Deep Debroy -Deng Guangxing -Deni Bertovic -Denis Defreyne -Denis Gladkikh -Denis Ollier -Dennis Chen -Dennis Chen -Dennis Docter -Derek -Derek -Derek Ch -Derek McGowan -Deric Crago -Deshi Xiao -devmeyster -Devon Estes -Devvyn Murphy -Dharmit Shah -Dhawal Yogesh Bhanushali -Diego Romero -Diego Siqueira -Dieter Reuter -Dillon Dixon -Dima Stopel -Dimitri John Ledkov -Dimitris Mandalidis -Dimitris Rozakis -Dimitry Andric -Dinesh Subhraveti -Ding Fei -Diogo Monica -DiuDiugirl -Djibril Koné -dkumor -Dmitri Logvinenko -Dmitri Shuralyov -Dmitry Demeshchuk -Dmitry Gusev -Dmitry Kononenko -Dmitry Sharshakov -Dmitry Shyshkin -Dmitry Smirnov -Dmitry V. Krivenok -Dmitry Vorobev -Dolph Mathews -Dominic Tubach -Dominic Yin -Dominik Dingel -Dominik Finkbeiner -Dominik Honnef -Don Kirkby -Don Kjer -Don Spaulding -Donald Huang -Dong Chen -Donghwa Kim -Donovan Jones -Doron Podoleanu -Doug Davis -Doug MacEachern -Doug Tangren -Douglas Curtis -Dr Nic Williams -dragon788 -Dražen Lučanin -Drew Erny -Drew Hubl -Dustin Sallings -Ed Costello -Edmund Wagner -Eiichi Tsukata -Eike Herzbach -Eivin Giske Skaaren -Eivind Uggedal -Elan Ruusamäe -Elango Sivanandam -Elena Morozova -Eli Uriegas -Elias Faxö -Elias Probst -Elijah Zupancic -eluck -Elvir Kuric -Emil Davtyan -Emil Hernvall -Emily Maier -Emily Rose -Emir Ozer -Enguerran -Eohyung Lee -epeterso -Eric Barch -Eric Curtin -Eric G. Noriega -Eric Hanchrow -Eric Lee -Eric Myhre -Eric Paris -Eric Rafaloff -Eric Rosenberg -Eric Sage -Eric Soderstrom -Eric Yang -Eric-Olivier Lamey -Erica Windisch -Erik Bray -Erik Dubbelboer -Erik Hollensbe -Erik Inge Bolsø -Erik Kristensen -Erik St. Martin -Erik Weathers -Erno Hopearuoho -Erwin van der Koogh -Ethan Bell -Ethan Mosbaugh -Euan Kemp -Eugen Krizo -Eugene Yakubovich -Evan Allrich -Evan Carmi -Evan Hazlett -Evan Krall -Evan Phoenix -Evan Wies -Evelyn Xu -Everett Toews -Evgeniy Makhrov -Evgeny Shmarnev -Evgeny Vereshchagin -Ewa Czechowska -Eystein Måløy Stenberg -ezbercih -Ezra Silvera -Fabian Kramm -Fabian Lauer -Fabian Raetz -Fabiano Rosas -Fabio Falci -Fabio Kung -Fabio Rapposelli -Fabio Rehm -Fabrizio Regini -Fabrizio Soppelsa -Faiz Khan -falmp -Fangming Fang -Fangyuan Gao <21551127@zju.edu.cn> -fanjiyun -Fareed Dudhia -Fathi Boudra -Federico Gimenez -Felipe Oliveira -Felipe Ruhland -Felix Abecassis -Felix Geisendörfer -Felix Hupfeld -Felix Rabe -Felix Ruess -Felix Schindler -Feng Yan -Fengtu Wang -Ferenc Szabo -Fernando -Fero Volar -Ferran Rodenas -Filipe Brandenburger -Filipe Oliveira -Flavio Castelli -Flavio Crisciani -Florian -Florian Klein -Florian Maier -Florian Noeding -Florian Schmaus -Florian Weingarten -Florin Asavoaie -Florin Patan -fonglh -Foysal Iqbal -Francesc Campoy -Francesco Mari -Francis Chuang -Francisco Carriedo -Francisco Souza -Frank Groeneveld -Frank Herrmann -Frank Macreery -Frank Rosquin -frankyang -Fred Lifton -Frederick F. Kautz IV -Frederik Loeffert -Frederik Nordahl Jul Sabroe -Freek Kalter -Frieder Bluemle -Fu JinLin -Félix Baylac-Jacqué -Félix Cantournet -Gabe Rosenhouse -Gabor Nagy -Gabriel Linder -Gabriel Monroy -Gabriel Nicolas Avellaneda -Gaetan de Villele -Galen Sampson -Gang Qiao -Gareth Rushgrove -Garrett Barboza -Gary Schaetz -Gaurav -Gaurav Singh -Gaël PORTAY -Genki Takiuchi -GennadySpb -Geoffrey Bachelet -Geon Kim -George Kontridze -George MacRorie -George Xie -Georgi Hristozov -Gereon Frey -German DZ -Gert van Valkenhoef -Gerwim Feiken -Ghislain Bourgeois -Giampaolo Mancini -Gianluca Borello -Gildas Cuisinier -Giovan Isa Musthofa -gissehel -Giuseppe Mazzotta -Gleb Fotengauer-Malinovskiy -Gleb M Borisov -Glyn Normington -GoBella -Goffert van Gool -Goldwyn Rodrigues -Gopikannan Venugopalsamy -Gosuke Miyashita -Gou Rao -Govinda Fichtner -Grant Millar -Grant Reaber -Graydon Hoare -Greg Fausak -Greg Pflaum -Greg Stephens -Greg Thornton -Grzegorz Jaśkiewicz -Guilhem Lettron -Guilherme Salgado -Guillaume Dufour -Guillaume J. Charmes -guoxiuyan -Guri -Gurjeet Singh -Guruprasad -Gustav Sinder -gwx296173 -Günter Zöchbauer -Haichao Yang -haikuoliu -Hakan Özler -Hamish Hutchings -Hannes Ljungberg -Hans Kristian Flaatten -Hans Rødtang -Hao Shu Wei -Hao Zhang <21521210@zju.edu.cn> -Harald Albers -Harald Niesche -Harley Laue -Harold Cooper -Harrison Turton -Harry Zhang -Harshal Patil -Harshal Patil -He Simei -He Xiaoxi -He Xin -heartlock <21521209@zju.edu.cn> -Hector Castro -Helen Xie -Henning Sprang -Hiroshi Hatake -Hiroyuki Sasagawa -Hobofan -Hollie Teal -Hong Xu -Hongbin Lu -Hongxu Jia -Honza Pokorny -Hsing-Hui Hsu -hsinko <21551195@zju.edu.cn> -Hu Keping -Hu Tao -HuanHuan Ye -Huanzhong Zhang -Huayi Zhang -Hugo Duncan -Hugo Marisco <0x6875676f@gmail.com> -Hunter Blanks -huqun -Huu Nguyen -hyeongkyu.lee -Hyzhou Zhy -Iago López Galeiras -Ian Babrou -Ian Bishop -Ian Bull -Ian Calvert -Ian Campbell -Ian Chen -Ian Lee -Ian Main -Ian Philpot -Ian Truslove -Iavael -Icaro Seara -Ignacio Capurro -Igor Dolzhikov -Igor Karpovich -Iliana Weller -Ilkka Laukkanen -Ilya Dmitrichenko -Ilya Gusev -Ilya Khlopotov -imre Fitos -inglesp -Ingo Gottwald -Innovimax -Isaac Dupree -Isabel Jimenez -Isaiah Grace -Isao Jonas -Iskander Sharipov -Ivan Babrou -Ivan Fraixedes -Ivan Grcic -Ivan Markin -J Bruni -J. Nunn -Jack Danger Canty -Jack Laxson -Jacob Atzen -Jacob Edelman -Jacob Tomlinson -Jacob Vallejo -Jacob Wen -Jaime Cepeda -Jaivish Kothari -Jake Champlin -Jake Moshenko -Jake Sanders -jakedt -James Allen -James Carey -James Carr -James DeFelice -James Harrison Fisher -James Kyburz -James Kyle -James Lal -James Mills -James Nesbitt -James Nugent -James Turnbull -James Watkins-Harvey -Jamie Hannaford -Jamshid Afshar -Jan Chren -Jan Keromnes -Jan Koprowski -Jan Pazdziora -Jan Toebes -Jan-Gerd Tenberge -Jan-Jaap Driessen -Jana Radhakrishnan -Jannick Fahlbusch -Januar Wayong -Jared Biel -Jared Hocutt -Jaroslaw Zabiello -jaseg -Jasmine Hegman -Jason A. Donenfeld -Jason Divock -Jason Giedymin -Jason Green -Jason Hall -Jason Heiss -Jason Livesay -Jason McVetta -Jason Plum -Jason Shepherd -Jason Smith -Jason Sommer -Jason Stangroome -jaxgeller -Jay -Jay -Jay Kamat -Jean Rouge -Jean-Baptiste Barth -Jean-Baptiste Dalido -Jean-Christophe Berthon -Jean-Paul Calderone -Jean-Pierre Huynh -Jean-Tiare Le Bigot -Jeeva S. Chelladhurai -Jeff Anderson -Jeff Hajewski -Jeff Johnston -Jeff Lindsay -Jeff Mickey -Jeff Minard -Jeff Nickoloff -Jeff Silberman -Jeff Welch -Jeffrey Bolle -Jeffrey Morgan -Jeffrey van Gogh -Jenny Gebske -Jeremy Chambers -Jeremy Grosser -Jeremy Price -Jeremy Qian -Jeremy Unruh -Jeremy Yallop -Jeroen Franse -Jeroen Jacobs -Jesse Dearing -Jesse Dubay -Jessica Frazelle -Jezeniel Zapanta -Jhon Honce -Ji.Zhilong -Jian Liao -Jian Zhang -Jiang Jinyang -Jie Luo -Jie Ma -Jihyun Hwang -Jilles Oldenbeuving -Jim Alateras -Jim Ehrismann -Jim Galasyn -Jim Minter -Jim Perrin -Jimmy Cuadra -Jimmy Puckett -Jimmy Song -Jinsoo Park -Jintao Zhang -Jiri Appl -Jiri Popelka -Jiuyue Ma -Jiří Župka -Joao Fernandes -Joao Trindade -Joe Beda -Joe Doliner -Joe Ferguson -Joe Gordon -Joe Shaw -Joe Van Dyk -Joel Friedly -Joel Handwell -Joel Hansson -Joel Wurtz -Joey Geiger -Joey Geiger -Joey Gibson -Joffrey F -Johan Euphrosine -Johan Rydberg -Johanan Lieberman -Johannes 'fish' Ziemke -John Costa -John Feminella -John Gardiner Myers -John Gossman -John Harris -John Howard -John Laswell -John Maguire -John Mulhausen -John OBrien III -John Starks -John Stephens -John Tims -John V. Martinez -John Warwick -John Willis -Jon Johnson -Jon Surrell -Jon Wedaman -Jonas Dohse -Jonas Heinrich -Jonas Pfenniger -Jonathan A. Schweder -Jonathan A. Sternberg -Jonathan Boulle -Jonathan Camp -Jonathan Choy -Jonathan Dowland -Jonathan Lebon -Jonathan Lomas -Jonathan McCrohan -Jonathan Mueller -Jonathan Pares -Jonathan Rudenberg -Jonathan Stoppani -Jonh Wendell -Joni Sar -Joost Cassee -Jordan Arentsen -Jordan Jennings -Jordan Sissel -Jorge Marin -Jorit Kleine-Möllhoff -Jose Diaz-Gonzalez -Joseph Anthony Pasquale Holsten -Joseph Hager -Joseph Kern -Joseph Rothrock -Josh -Josh Bodah -Josh Bonczkowski -Josh Chorlton -Josh Eveleth -Josh Hawn -Josh Horwitz -Josh Poimboeuf -Josh Soref -Josh Wilson -Josiah Kiehl -José Tomás Albornoz -Joyce Jang -JP -Julian Taylor -Julien Barbier -Julien Bisconti -Julien Bordellier -Julien Dubois -Julien Kassar -Julien Maitrehenry -Julien Pervillé -Julien Pivotto -Julio Guerra -Julio Montes -Jun-Ru Chang -Jussi Nummelin -Justas Brazauskas -Justen Martin -Justin Cormack -Justin Force -Justin Menga -Justin Plock -Justin Simonelis -Justin Terry -Justyn Temme -Jyrki Puttonen -Jérémy Leherpeur -Jérôme Petazzoni -Jörg Thalheim -K. Heller -Kai Blin -Kai Qiang Wu (Kennan) -Kamil Domański -Kamjar Gerami -Kanstantsin Shautsou -Kara Alexandra -Karan Lyons -Kareem Khazem -kargakis -Karl Grzeszczak -Karol Duleba -Karthik Karanth -Karthik Nayak -Kasper Fabæch Brandt -Kate Heddleston -Katie McLaughlin -Kato Kazuyoshi -Katrina Owen -Kawsar Saiyeed -Kay Yan -kayrus -Kazuhiro Sera -Ke Li -Ke Xu -Kei Ohmura -Keith Hudgins -Keli Hu -Ken Cochrane -Ken Herner -Ken ICHIKAWA -Ken Reese -Kenfe-Mickaël Laventure -Kenjiro Nakayama -Kent Johnson -Kenta Tada -Kevin "qwazerty" Houdebert -Kevin Burke -Kevin Clark -Kevin Feyrer -Kevin J. Lynagh -Kevin Jing Qiu -Kevin Kern -Kevin Menard -Kevin Meredith -Kevin P. Kucharczyk -Kevin Parsons -Kevin Richardson -Kevin Shi -Kevin Wallace -Kevin Yap -Keyvan Fatehi -kies -Kim BKC Carlbacker -Kim Eik -Kimbro Staken -Kir Kolyshkin -Kiran Gangadharan -Kirill SIbirev -knappe -Kohei Tsuruta -Koichi Shiraishi -Konrad Kleine -Konstantin Gribov -Konstantin L -Konstantin Pelykh -Krasi Georgiev -Krasimir Georgiev -Kris-Mikael Krister -Kristian Haugene -Kristina Zabunova -Krystian Wojcicki -Kun Zhang -Kunal Kushwaha -Kunal Tyagi -Kyle Conroy -Kyle Linden -Kyle Wuolle -kyu -Lachlan Coote -Lai Jiangshan -Lajos Papp -Lakshan Perera -Lalatendu Mohanty -Lance Chen -Lance Kinley -Lars Butler -Lars Kellogg-Stedman -Lars R. Damerow -Lars-Magnus Skog -Laszlo Meszaros -Laura Frank -Laurent Erignoux -Laurie Voss -Leandro Siqueira -Lee Chao <932819864@qq.com> -Lee, Meng-Han -leeplay -Lei Gong -Lei Jitang -Len Weincier -Lennie -Leo Gallucci -Leszek Kowalski -Levi Blackstone -Levi Gross -Lewis Daly -Lewis Marshall -Lewis Peckover -Li Yi -Liam Macgillavry -Liana Lo -Liang Mingqiang -Liang-Chi Hsieh -Liao Qingwei -Lifubang -Lihua Tang -Lily Guo -limsy -Lin Lu -LingFaKe -Linus Heckemann -Liran Tal -Liron Levin -Liu Bo -Liu Hua -liwenqi -lixiaobing10051267 -Liz Zhang -LIZAO LI -Lizzie Dixon <_@lizzie.io> -Lloyd Dewolf -Lokesh Mandvekar -longliqiang88 <394564827@qq.com> -Lorenz Leutgeb -Lorenzo Fontana -Lotus Fenn -Louis Delossantos -Louis Opter -Luca Favatella -Luca Marturana -Luca Orlandi -Luca-Bogdan Grigorescu -Lucas Chan -Lucas Chi -Lucas Molas -Lucas Silvestre -Luciano Mores -Luis Martínez de Bartolomé Izquierdo -Luiz Svoboda -Lukas Heeren -Lukas Waslowski -lukaspustina -Lukasz Zajaczkowski -Luke Marsden -Lyn -Lynda O'Leary -Lénaïc Huard -Ma Müller -Ma Shimiao -Mabin -Madhan Raj Mookkandy -Madhav Puri -Madhu Venugopal -Mageee -Mahesh Tiyyagura -malnick -Malte Janduda -Manfred Touron -Manfred Zabarauskas -Manjunath A Kumatagi -Mansi Nahar -Manuel Meurer -Manuel Rüger -Manuel Woelker -mapk0y -Marc Abramowitz -Marc Kuo -Marc Tamsky -Marcel Edmund Franke -Marcelo Horacio Fortino -Marcelo Salazar -Marco Hennings -Marcus Cobden -Marcus Farkas -Marcus Linke -Marcus Martins -Marcus Ramberg -Marek Goldmann -Marian Marinov -Marianna Tessel -Mario Loriedo -Marius Gundersen -Marius Sturm -Marius Voila -Mark Allen -Mark Jeromin -Mark McGranaghan -Mark McKinstry -Mark Milstein -Mark Oates -Mark Parker -Mark West -Markan Patel -Marko Mikulicic -Marko Tibold -Markus Fix -Markus Kortlang -Martijn Dwars -Martijn van Oosterhout -Martin Honermeyer -Martin Kelly -Martin Mosegaard Amdisen -Martin Muzatko -Martin Redmond -Mary Anthony -Masahito Zembutsu -Masato Ohba -Masayuki Morita -Mason Malone -Mateusz Sulima -Mathias Monnerville -Mathieu Champlon -Mathieu Le Marec - Pasquet -Mathieu Parent -Matt Apperson -Matt Bachmann -Matt Bentley -Matt Haggard -Matt Hoyle -Matt McCormick -Matt Moore -Matt Richardson -Matt Rickard -Matt Robenolt -Matt Schurenko -Matt Williams -Matthew Heon -Matthew Lapworth -Matthew Mayer -Matthew Mosesohn -Matthew Mueller -Matthew Riley -Matthias Klumpp -Matthias Kühnle -Matthias Rampke -Matthieu Hauglustaine -Mattias Jernberg -Mauricio Garavaglia -mauriyouth -Max Harmathy -Max Shytikov -Maxim Fedchyshyn -Maxim Ivanov -Maxim Kulkin -Maxim Treskin -Maxime Petazzoni -Maximiliano Maccanti -Maxwell -Meaglith Ma -meejah -Megan Kostick -Mehul Kar -Mei ChunTao -Mengdi Gao -Mert Yazıcıoğlu -mgniu -Micah Zoltu -Michael A. Smith -Michael Bridgen -Michael Brown -Michael Chiang -Michael Crosby -Michael Currie -Michael Friis -Michael Gorsuch -Michael Grauer -Michael Holzheu -Michael Hudson-Doyle -Michael Huettermann -Michael Irwin -Michael Käufl -Michael Neale -Michael Nussbaum -Michael Prokop -Michael Scharf -Michael Spetsiotis -Michael Stapelberg -Michael Steinert -Michael Thies -Michael West -Michael Zhao -Michal Fojtik -Michal Gebauer -Michal Jemala -Michal Minář -Michal Wieczorek -Michaël Pailloncy -Michał Czeraszkiewicz -Michał Gryko -Michiel de Jong -Mickaël Fortunato -Mickaël Remars -Miguel Angel Fernández -Miguel Morales -Mihai Borobocea -Mihuleacc Sergiu -Mike Brown -Mike Bush -Mike Casas -Mike Chelen -Mike Danese -Mike Dillon -Mike Dougherty -Mike Estes -Mike Gaffney -Mike Goelzer -Mike Leone -Mike Lundy -Mike MacCana -Mike Naberezny -Mike Snitzer -mikelinjie <294893458@qq.com> -Mikhail Sobolev -Miklos Szegedi -Milind Chawre -Miloslav Trmač -mingqing -Mingzhen Feng -Misty Stanley-Jones -Mitch Capper -Mizuki Urushida -mlarcher -Mohammad Banikazemi -Mohammad Nasirifar -Mohammed Aaqib Ansari -Mohit Soni -Moorthy RS -Morgan Bauer -Morgante Pell -Morgy93 -Morten Siebuhr -Morton Fox -Moysés Borges -mrfly -Mrunal Patel -Muayyad Alsadi -Mustafa Akın -Muthukumar R -Máximo Cuadros -Médi-Rémi Hashim -Nace Oroz -Nahum Shalman -Nakul Pathak -Nalin Dahyabhai -Nan Monnand Deng -Naoki Orii -Natalie Parker -Natanael Copa -Natasha Jarus -Nate Brennand -Nate Eagleson -Nate Jones -Nathan Hsieh -Nathan Kleyn -Nathan LeClaire -Nathan McCauley -Nathan Williams -Naveed Jamil -Neal McBurnett -Neil Horman -Neil Peterson -Nelson Chen -Neyazul Haque -Nghia Tran -Niall O'Higgins -Nicholas E. Rabenau -Nick Adcock -Nick DeCoursin -Nick Irvine -Nick Neisen -Nick Parker -Nick Payne -Nick Russo -Nick Stenning -Nick Stinemates -NickrenREN -Nicola Kabar -Nicolas Borboën -Nicolas De Loof -Nicolas Dudebout -Nicolas Goy -Nicolas Kaiser -Nicolas Sterchele -Nicolas V Castet -Nicolás Hock Isaza -Nigel Poulton -Nik Nyby -Nikhil Chawla -NikolaMandic -Nikolas Garofil -Nikolay Edigaryev -Nikolay Milovanov -Nirmal Mehta -Nishant Totla -NIWA Hideyuki -Noah Meyerhans -Noah Treuhaft -NobodyOnSE -noducks -Nolan Darilek -Noriki Nakamura -nponeccop -Nuutti Kotivuori -nzwsch -O.S. Tezer -objectified -Odin Ugedal -Oguz Bilgic -Oh Jinkyun -Ohad Schneider -ohmystack -Ole Reifschneider -Oliver Neal -Oliver Reason -Olivier Gambier -Olle Jonsson -Olli Janatuinen -Olly Pomeroy -Omri Shiv -Oriol Francès -Oskar Niburski -Otto Kekäläinen -Ouyang Liduo -Ovidio Mallo -Panagiotis Moustafellos -Paolo G. Giarrusso -Pascal -Pascal Bach -Pascal Borreli -Pascal Hartig -Patrick Böänziger -Patrick Devine -Patrick Hemmer -Patrick Stapleton -Patrik Cyvoct -pattichen -Paul -paul -Paul Annesley -Paul Bellamy -Paul Bowsher -Paul Furtado -Paul Hammond -Paul Jimenez -Paul Kehrer -Paul Lietar -Paul Liljenberg -Paul Morie -Paul Nasrat -Paul Weaver -Paulo Ribeiro -Pavel Lobashov -Pavel Matěja -Pavel Pletenev -Pavel Pospisil -Pavel Sutyrin -Pavel Tikhomirov -Pavlos Ratis -Pavol Vargovcik -Pawel Konczalski -Peeyush Gupta -Peggy Li -Pei Su -Peng Tao -Penghan Wang -Per Weijnitz -perhapszzy@sina.com -Peter Bourgon -Peter Braden -Peter Bücker -Peter Choi -Peter Dave Hello -Peter Edge -Peter Ericson -Peter Esbensen -Peter Jaffe -Peter Kang -Peter Malmgren -Peter Salvatore -Peter Volpe -Peter Waller -Petr Švihlík -Phil -Phil Estes -Phil Spitler -Philip Alexander Etling -Philip Monroe -Philipp Gillé -Philipp Wahala -Philipp Weissensteiner -Phillip Alexander -phineas -pidster -Piergiuliano Bossi -Pierre -Pierre Carrier -Pierre Dal-Pra -Pierre Wacrenier -Pierre-Alain RIVIERE -Piotr Bogdan -pixelistik -Porjo -Poul Kjeldager Sørensen -Pradeep Chhetri -Pradip Dhara -Prasanna Gautam -Pratik Karki -Prayag Verma -Priya Wadhwa -Projjol Banerji -Przemek Hejman -Pure White -pysqz -Qiang Huang -Qinglan Peng -qudongfang -Quentin Brossard -Quentin Perez -Quentin Tayssier -r0n22 -Radostin Stoyanov -Rafal Jeczalik -Rafe Colton -Raghavendra K T -Raghuram Devarakonda -Raja Sami -Rajat Pandit -Rajdeep Dua -Ralf Sippl -Ralle -Ralph Bean -Ramkumar Ramachandra -Ramon Brooker -Ramon van Alteren -RaviTeja Pothana -Ray Tsang -ReadmeCritic -Recursive Madman -Reficul -Regan McCooey -Remi Rampin -Remy Suen -Renato Riccieri Santos Zannon -Renaud Gaubert -Rhys Hiltner -Ri Xu -Ricardo N Feliciano -Rich Moyse -Rich Seymour -Richard -Richard Burnison -Richard Harvey -Richard Mathie -Richard Metzler -Richard Scothern -Richo Healey -Rick Bradley -Rick van de Loo -Rick Wieman -Rik Nijessen -Riku Voipio -Riley Guerin -Ritesh H Shukla -Riyaz Faizullabhoy -Rob Gulewich -Rob Vesse -Robert Bachmann -Robert Bittle -Robert Obryk -Robert Schneider -Robert Stern -Robert Terhaar -Robert Wallis -Robert Wang -Roberto G. Hashioka -Roberto Muñoz Fernández -Robin Naundorf -Robin Schneider -Robin Speekenbrink -Robin Thoni -robpc -Rodolfo Carvalho -Rodrigo Vaz -Roel Van Nyen -Roger Peppe -Rohit Jnagal -Rohit Kadam -Rohit Kapur -Rojin George -Roland Huß -Roland Kammerer -Roland Moriz -Roma Sokolov -Roman Dudin -Roman Mazur -Roman Strashkin -Ron Smits -Ron Williams -Rong Gao -Rong Zhang -Rongxiang Song -root -root -root -root -Rory Hunter -Rory McCune -Ross Boucher -Rovanion Luckey -Royce Remer -Rozhnov Alexandr -Rudolph Gottesheim -Rui Cao -Rui Lopes -Ruilin Li -Runshen Zhu -Russ Magee -Ryan Abrams -Ryan Anderson -Ryan Aslett -Ryan Belgrave -Ryan Detzel -Ryan Fowler -Ryan Liu -Ryan McLaughlin -Ryan O'Donnell -Ryan Seto -Ryan Simmen -Ryan Stelly -Ryan Thomas -Ryan Trauntvein -Ryan Wallner -Ryan Zhang -ryancooper7 -RyanDeng -Ryo Nakao -Rémy Greinhofer -s. rannou -s00318865 -Sabin Basyal -Sachin Joshi -Sagar Hani -Sainath Grandhi -Sakeven Jiang -Salahuddin Khan -Sally O'Malley -Sam Abed -Sam Alba -Sam Bailey -Sam J Sharpe -Sam Neirinck -Sam Reis -Sam Rijs -Sam Whited -Sambuddha Basu -Sami Wagiaalla -Samuel Andaya -Samuel Dion-Girardeau -Samuel Karp -Samuel PHAN -Sandeep Bansal -Sankar சங்கர் -Sanket Saurav -Santhosh Manohar -sapphiredev -Sargun Dhillon -Sascha Andres -Sascha Grunert -SataQiu -Satnam Singh -Satoshi Amemiya -Satoshi Tagomori -Scott Bessler -Scott Collier -Scott Johnston -Scott Stamp -Scott Walls -sdreyesg -Sean Christopherson -Sean Cronin -Sean Lee -Sean McIntyre -Sean OMeara -Sean P. Kane -Sean Rodman -Sebastiaan van Steenis -Sebastiaan van Stijn -Senthil Kumar Selvaraj -Senthil Kumaran -SeongJae Park -Seongyeol Lim -Serge Hallyn -Sergey Alekseev -Sergey Evstifeev -Sergii Kabashniuk -Sergio Lopez -Serhat Gülçiçek -SeungUkLee -Sevki Hasirci -Shane Canon -Shane da Silva -Shaun Kaasten -shaunol -Shawn Landden -Shawn Siefkas -shawnhe -Shayne Wang -Shekhar Gulati -Sheng Yang -Shengbo Song -Shev Yan -Shih-Yuan Lee -Shijiang Wei -Shijun Qin -Shishir Mahajan -Shoubhik Bose -Shourya Sarcar -Shu-Wai Chow -shuai-z -Shukui Yang -Shuwei Hao -Sian Lerk Lau -Sidhartha Mani -sidharthamani -Silas Sewell -Silvan Jegen -Simão Reis -Simei He -Simon Barendse -Simon Eskildsen -Simon Ferquel -Simon Leinen -Simon Menke -Simon Taranto -Simon Vikstrom -Sindhu S -Sjoerd Langkemper -skanehira -Solganik Alexander -Solomon Hykes -Song Gao -Soshi Katsuta -Soulou -Spencer Brown -Spencer Smith -Sridatta Thatipamala -Sridhar Ratnakumar -Srini Brahmaroutu -Srinivasan Srivatsan -Staf Wagemakers -Stanislav Bondarenko -Stanislav Levin -Steeve Morin -Stefan Berger -Stefan J. Wernli -Stefan Praszalowicz -Stefan S. -Stefan Scherer -Stefan Staudenmeyer -Stefan Weil -Stephan Spindler -Stephen Benjamin -Stephen Crosby -Stephen Day -Stephen Drake -Stephen Rust -Steve Desmond -Steve Dougherty -Steve Durrheimer -Steve Francia -Steve Koch -Steven Burgess -Steven Erenst -Steven Hartland -Steven Iveson -Steven Merrill -Steven Richards -Steven Taylor -Stig Larsson -Subhajit Ghosh -Sujith Haridasan -Sun Gengze <690388648@qq.com> -Sun Jianbo -Sune Keller -Sunny Gogoi -Suryakumar Sudar -Sven Dowideit -Swapnil Daingade -Sylvain Baubeau -Sylvain Bellemare -Sébastien -Sébastien HOUZÉ -Sébastien Luttringer -Sébastien Stormacq -Tabakhase -Tadej Janež -TAGOMORI Satoshi -tang0th -Tangi Colin -Tatsuki Sugiura -Tatsushi Inagaki -Taylan Isikdemir -Taylor Jones -Ted M. Young -Tehmasp Chaudhri -Tejaswini Duggaraju -Tejesh Mehta -terryding77 <550147740@qq.com> -tgic -Thatcher Peskens -theadactyl -Thell 'Bo' Fowler -Thermionix -Thijs Terlouw -Thomas Bikeev -Thomas Frössman -Thomas Gazagnaire -Thomas Grainger -Thomas Hansen -Thomas Leonard -Thomas Léveil -Thomas Orozco -Thomas Riccardi -Thomas Schroeter -Thomas Sjögren -Thomas Swift -Thomas Tanaka -Thomas Texier -Ti Zhou -Tianon Gravi -Tianyi Wang -Tibor Vass -Tiffany Jernigan -Tiffany Low -Till Wegmüller -Tim -Tim Bart -Tim Bosse -Tim Dettrick -Tim Düsterhus -Tim Hockin -Tim Potter -Tim Ruffles -Tim Smith -Tim Terhorst -Tim Wang -Tim Waugh -Tim Wraight -Tim Zju <21651152@zju.edu.cn> -timfeirg -Timothy Hobbs -tjwebb123 -tobe -Tobias Bieniek -Tobias Bradtke -Tobias Gesellchen -Tobias Klauser -Tobias Munk -Tobias Schmidt -Tobias Schwab -Todd Crane -Todd Lunter -Todd Whiteman -Toli Kuznets -Tom Barlow -Tom Booth -Tom Denham -Tom Fotherby -Tom Howe -Tom Hulihan -Tom Maaswinkel -Tom Sweeney -Tom Wilkie -Tom X. Tobin -Tomas Tomecek -Tomasz Kopczynski -Tomasz Lipinski -Tomasz Nurkiewicz -Tommaso Visconti -Tomáš Hrčka -Tonny Xu -Tony Abboud -Tony Daws -Tony Miller -toogley -Torstein Husebø -Tõnis Tiigi -Trace Andreason -tracylihui <793912329@qq.com> -Trapier Marshall -Travis Cline -Travis Thieman -Trent Ogren -Trevor -Trevor Pounds -Trevor Sullivan -Trishna Guha -Tristan Carel -Troy Denton -Tycho Andersen -Tyler Brock -Tyler Brown -Tzu-Jung Lee -uhayate -Ulysse Carion -Umesh Yadav -Utz Bacher -vagrant -Vaidas Jablonskis -vanderliang -Velko Ivanov -Veres Lajos -Victor Algaze -Victor Coisne -Victor Costan -Victor I. Wood -Victor Lyuboslavsky -Victor Marmol -Victor Palma -Victor Vieux -Victoria Bialas -Vijaya Kumar K -Vikram bir Singh -Viktor Stanchev -Viktor Vojnovski -VinayRaghavanKS -Vincent Batts -Vincent Bernat -Vincent Boulineau -Vincent Demeester -Vincent Giersch -Vincent Mayers -Vincent Woo -Vinod Kulkarni -Vishal Doshi -Vishnu Kannan -Vitaly Ostrosablin -Vitor Monteiro -Vivek Agarwal -Vivek Dasgupta -Vivek Goyal -Vladimir Bulyga -Vladimir Kirillov -Vladimir Pouzanov -Vladimir Rutsky -Vladimir Varankin -VladimirAus -Vlastimil Zeman -Vojtech Vitek (V-Teq) -waitingkuo -Walter Leibbrandt -Walter Stanish -Wang Chao -Wang Guoliang -Wang Jie -Wang Long -Wang Ping -Wang Xing -Wang Yuexiao -Wang Yumu <37442693@qq.com> -wanghuaiqing -Ward Vandewege -WarheadsSE -Wassim Dhif -Wayne Chang -Wayne Song -Weerasak Chongnguluam -Wei Fu -Wei Wu -Wei-Ting Kuo -weipeng -weiyan -Weiyang Zhu -Wen Cheng Ma -Wendel Fleming -Wenjun Tang -Wenkai Yin -wenlxie -Wentao Zhang -Wenxuan Zhao -Wenyu You <21551128@zju.edu.cn> -Wenzhi Liang -Wes Morgan -Wewang Xiaorenfine -Wiktor Kwapisiewicz -Will Dietz -Will Rouesnel -Will Weaver -willhf -William Delanoue -William Henry -William Hubbs -William Martin -William Riancho -William Thurston -Wilson Júnior -Wing-Kam Wong -WiseTrem -Wolfgang Powisch -Wonjun Kim -xamyzhao -Xian Chaobo -Xianglin Gao -Xianlu Bird -Xiao YongBiao -XiaoBing Jiang -Xiaodong Liu -Xiaodong Zhang -Xiaoxi He -Xiaoxu Chen -Xiaoyu Zhang -xichengliudui <1693291525@qq.com> -xiekeyang -Ximo Guanter Gonzálbez -Xinbo Weng -Xinfeng Liu -Xinzi Zhou -Xiuming Chen -Xuecong Liao -xuzhaokui -Yadnyawalkya Tale -Yahya -YAMADA Tsuyoshi -Yamasaki Masahide -Yan Feng -Yang Bai -Yang Pengfei -yangchenliang -Yanqiang Miao -Yao Zaiyong -Yash Murty -Yassine Tijani -Yasunori Mahata -Yazhong Liu -Yestin Sun -Yi EungJun -Yibai Zhang -Yihang Ho -Ying Li -Yohei Ueda -Yong Tang -Yongxin Li -Yongzhi Pan -Yosef Fertel -You-Sheng Yang (楊有勝) -youcai -Youcef YEKHLEF -Yu Changchun -Yu Chengxia -Yu Peng -Yu-Ju Hong -Yuan Sun -Yuanhong Peng -Yue Zhang -Yuhao Fang -Yuichiro Kaneko -Yunxiang Huang -Yurii Rashkovskii -Yusuf Tarık Günaydın -Yves Junqueira -Zac Dover -Zach Borboa -Zachary Jaffee -Zain Memon -Zaiste! -Zane DeGraffenried -Zefan Li -Zen Lin(Zhinan Lin) -Zhang Kun -Zhang Wei -Zhang Wentao -ZhangHang -zhangxianwei -Zhenan Ye <21551168@zju.edu.cn> -zhenghenghuo -Zhenhai Gao -Zhenkun Bi -zhipengzuo -Zhou Hao -Zhoulin Xie -Zhu Guihua -Zhu Kunjia -Zhuoyun Wei -Ziheng Liu -Zilin Du -zimbatm -Ziming Dong -ZJUshuaizhou <21551191@zju.edu.cn> -zmarouf -Zoltan Tombol -Zou Yu -zqh -Zuhayr Elahi -Zunayed Ali -Álex González -Álvaro Lázaro -Átila Camurça Alves -尹吉峰 -屈骏 -徐俊杰 -慕陶 -搏通 -黄艳红00139573 diff --git a/ui/wasm/vendor/github.com/docker/docker/LICENSE b/ui/wasm/vendor/github.com/docker/docker/LICENSE deleted file mode 100644 index 6d8d58fb676..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2013-2018 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/ui/wasm/vendor/github.com/docker/docker/NOTICE b/ui/wasm/vendor/github.com/docker/docker/NOTICE deleted file mode 100644 index 58b19b6d15b..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/NOTICE +++ /dev/null @@ -1,19 +0,0 @@ -Docker -Copyright 2012-2017 Docker, Inc. - -This product includes software developed at Docker, Inc. (https://www.docker.com). - -This product contains software (https://github.com/creack/pty) developed -by Keith Rarick, licensed under the MIT License. - -The following is courtesy of our legal counsel: - - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see https://www.bis.doc.gov - -See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/auth.go b/ui/wasm/vendor/github.com/docker/docker/api/types/auth.go deleted file mode 100644 index ddf15bb182d..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/auth.go +++ /dev/null @@ -1,22 +0,0 @@ -package types // import "github.com/docker/docker/api/types" - -// AuthConfig contains authorization information for connecting to a Registry -type AuthConfig struct { - Username string `json:"username,omitempty"` - Password string `json:"password,omitempty"` - Auth string `json:"auth,omitempty"` - - // Email is an optional value associated with the username. - // This field is deprecated and will be removed in a later - // version of docker. - Email string `json:"email,omitempty"` - - ServerAddress string `json:"serveraddress,omitempty"` - - // IdentityToken is used to authenticate the user and get - // an access token for the registry. - IdentityToken string `json:"identitytoken,omitempty"` - - // RegistryToken is a bearer token to be sent to a registry - RegistryToken string `json:"registrytoken,omitempty"` -} diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go b/ui/wasm/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go deleted file mode 100644 index bf3463b90e7..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go +++ /dev/null @@ -1,23 +0,0 @@ -package blkiodev // import "github.com/docker/docker/api/types/blkiodev" - -import "fmt" - -// WeightDevice is a structure that holds device:weight pair -type WeightDevice struct { - Path string - Weight uint16 -} - -func (w *WeightDevice) String() string { - return fmt.Sprintf("%s:%d", w.Path, w.Weight) -} - -// ThrottleDevice is a structure that holds device:rate_per_second pair -type ThrottleDevice struct { - Path string - Rate uint64 -} - -func (t *ThrottleDevice) String() string { - return fmt.Sprintf("%s:%d", t.Path, t.Rate) -} diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/client.go b/ui/wasm/vendor/github.com/docker/docker/api/types/client.go deleted file mode 100644 index 9c464b73e25..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/client.go +++ /dev/null @@ -1,419 +0,0 @@ -package types // import "github.com/docker/docker/api/types" - -import ( - "bufio" - "io" - "net" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/filters" - units "github.com/docker/go-units" -) - -// CheckpointCreateOptions holds parameters to create a checkpoint from a container -type CheckpointCreateOptions struct { - CheckpointID string - CheckpointDir string - Exit bool -} - -// CheckpointListOptions holds parameters to list checkpoints for a container -type CheckpointListOptions struct { - CheckpointDir string -} - -// CheckpointDeleteOptions holds parameters to delete a checkpoint from a container -type CheckpointDeleteOptions struct { - CheckpointID string - CheckpointDir string -} - -// ContainerAttachOptions holds parameters to attach to a container. -type ContainerAttachOptions struct { - Stream bool - Stdin bool - Stdout bool - Stderr bool - DetachKeys string - Logs bool -} - -// ContainerCommitOptions holds parameters to commit changes into a container. -type ContainerCommitOptions struct { - Reference string - Comment string - Author string - Changes []string - Pause bool - Config *container.Config -} - -// ContainerExecInspect holds information returned by exec inspect. -type ContainerExecInspect struct { - ExecID string `json:"ID"` - ContainerID string - Running bool - ExitCode int - Pid int -} - -// ContainerListOptions holds parameters to list containers with. -type ContainerListOptions struct { - Quiet bool - Size bool - All bool - Latest bool - Since string - Before string - Limit int - Filters filters.Args -} - -// ContainerLogsOptions holds parameters to filter logs with. -type ContainerLogsOptions struct { - ShowStdout bool - ShowStderr bool - Since string - Until string - Timestamps bool - Follow bool - Tail string - Details bool -} - -// ContainerRemoveOptions holds parameters to remove containers. -type ContainerRemoveOptions struct { - RemoveVolumes bool - RemoveLinks bool - Force bool -} - -// ContainerStartOptions holds parameters to start containers. -type ContainerStartOptions struct { - CheckpointID string - CheckpointDir string -} - -// CopyToContainerOptions holds information -// about files to copy into a container -type CopyToContainerOptions struct { - AllowOverwriteDirWithFile bool - CopyUIDGID bool -} - -// EventsOptions holds parameters to filter events with. -type EventsOptions struct { - Since string - Until string - Filters filters.Args -} - -// NetworkListOptions holds parameters to filter the list of networks with. -type NetworkListOptions struct { - Filters filters.Args -} - -// HijackedResponse holds connection information for a hijacked request. -type HijackedResponse struct { - Conn net.Conn - Reader *bufio.Reader -} - -// Close closes the hijacked connection and reader. -func (h *HijackedResponse) Close() { - h.Conn.Close() -} - -// CloseWriter is an interface that implements structs -// that close input streams to prevent from writing. -type CloseWriter interface { - CloseWrite() error -} - -// CloseWrite closes a readWriter for writing. -func (h *HijackedResponse) CloseWrite() error { - if conn, ok := h.Conn.(CloseWriter); ok { - return conn.CloseWrite() - } - return nil -} - -// ImageBuildOptions holds the information -// necessary to build images. -type ImageBuildOptions struct { - Tags []string - SuppressOutput bool - RemoteContext string - NoCache bool - Remove bool - ForceRemove bool - PullParent bool - Isolation container.Isolation - CPUSetCPUs string - CPUSetMems string - CPUShares int64 - CPUQuota int64 - CPUPeriod int64 - Memory int64 - MemorySwap int64 - CgroupParent string - NetworkMode string - ShmSize int64 - Dockerfile string - Ulimits []*units.Ulimit - // BuildArgs needs to be a *string instead of just a string so that - // we can tell the difference between "" (empty string) and no value - // at all (nil). See the parsing of buildArgs in - // api/server/router/build/build_routes.go for even more info. - BuildArgs map[string]*string - AuthConfigs map[string]AuthConfig - Context io.Reader - Labels map[string]string - // squash the resulting image's layers to the parent - // preserves the original image and creates a new one from the parent with all - // the changes applied to a single layer - Squash bool - // CacheFrom specifies images that are used for matching cache. Images - // specified here do not need to have a valid parent chain to match cache. - CacheFrom []string - SecurityOpt []string - ExtraHosts []string // List of extra hosts - Target string - SessionID string - Platform string - // Version specifies the version of the unerlying builder to use - Version BuilderVersion - // BuildID is an optional identifier that can be passed together with the - // build request. The same identifier can be used to gracefully cancel the - // build with the cancel request. - BuildID string - // Outputs defines configurations for exporting build results. Only supported - // in BuildKit mode - Outputs []ImageBuildOutput -} - -// ImageBuildOutput defines configuration for exporting a build result -type ImageBuildOutput struct { - Type string - Attrs map[string]string -} - -// BuilderVersion sets the version of underlying builder to use -type BuilderVersion string - -const ( - // BuilderV1 is the first generation builder in docker daemon - BuilderV1 BuilderVersion = "1" - // BuilderBuildKit is builder based on moby/buildkit project - BuilderBuildKit BuilderVersion = "2" -) - -// ImageBuildResponse holds information -// returned by a server after building -// an image. -type ImageBuildResponse struct { - Body io.ReadCloser - OSType string -} - -// ImageCreateOptions holds information to create images. -type ImageCreateOptions struct { - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry. - Platform string // Platform is the target platform of the image if it needs to be pulled from the registry. -} - -// ImageImportSource holds source information for ImageImport -type ImageImportSource struct { - Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this. - SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute. -} - -// ImageImportOptions holds information to import images from the client host. -type ImageImportOptions struct { - Tag string // Tag is the name to tag this image with. This attribute is deprecated. - Message string // Message is the message to tag the image with - Changes []string // Changes are the raw changes to apply to this image - Platform string // Platform is the target platform of the image -} - -// ImageListOptions holds parameters to filter the list of images with. -type ImageListOptions struct { - All bool - Filters filters.Args -} - -// ImageLoadResponse returns information to the client about a load process. -type ImageLoadResponse struct { - // Body must be closed to avoid a resource leak - Body io.ReadCloser - JSON bool -} - -// ImagePullOptions holds information to pull images. -type ImagePullOptions struct { - All bool - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry - PrivilegeFunc RequestPrivilegeFunc - Platform string -} - -// RequestPrivilegeFunc is a function interface that -// clients can supply to retry operations after -// getting an authorization error. -// This function returns the registry authentication -// header value in base 64 format, or an error -// if the privilege request fails. -type RequestPrivilegeFunc func() (string, error) - -// ImagePushOptions holds information to push images. -type ImagePushOptions ImagePullOptions - -// ImageRemoveOptions holds parameters to remove images. -type ImageRemoveOptions struct { - Force bool - PruneChildren bool -} - -// ImageSearchOptions holds parameters to search images with. -type ImageSearchOptions struct { - RegistryAuth string - PrivilegeFunc RequestPrivilegeFunc - Filters filters.Args - Limit int -} - -// ResizeOptions holds parameters to resize a tty. -// It can be used to resize container ttys and -// exec process ttys too. -type ResizeOptions struct { - Height uint - Width uint -} - -// NodeListOptions holds parameters to list nodes with. -type NodeListOptions struct { - Filters filters.Args -} - -// NodeRemoveOptions holds parameters to remove nodes with. -type NodeRemoveOptions struct { - Force bool -} - -// ServiceCreateOptions contains the options to use when creating a service. -type ServiceCreateOptions struct { - // EncodedRegistryAuth is the encoded registry authorization credentials to - // use when updating the service. - // - // This field follows the format of the X-Registry-Auth header. - EncodedRegistryAuth string - - // QueryRegistry indicates whether the service update requires - // contacting a registry. A registry may be contacted to retrieve - // the image digest and manifest, which in turn can be used to update - // platform or other information about the service. - QueryRegistry bool -} - -// ServiceCreateResponse contains the information returned to a client -// on the creation of a new service. -type ServiceCreateResponse struct { - // ID is the ID of the created service. - ID string - // Warnings is a set of non-fatal warning messages to pass on to the user. - Warnings []string `json:",omitempty"` -} - -// Values for RegistryAuthFrom in ServiceUpdateOptions -const ( - RegistryAuthFromSpec = "spec" - RegistryAuthFromPreviousSpec = "previous-spec" -) - -// ServiceUpdateOptions contains the options to be used for updating services. -type ServiceUpdateOptions struct { - // EncodedRegistryAuth is the encoded registry authorization credentials to - // use when updating the service. - // - // This field follows the format of the X-Registry-Auth header. - EncodedRegistryAuth string - - // TODO(stevvooe): Consider moving the version parameter of ServiceUpdate - // into this field. While it does open API users up to racy writes, most - // users may not need that level of consistency in practice. - - // RegistryAuthFrom specifies where to find the registry authorization - // credentials if they are not given in EncodedRegistryAuth. Valid - // values are "spec" and "previous-spec". - RegistryAuthFrom string - - // Rollback indicates whether a server-side rollback should be - // performed. When this is set, the provided spec will be ignored. - // The valid values are "previous" and "none". An empty value is the - // same as "none". - Rollback string - - // QueryRegistry indicates whether the service update requires - // contacting a registry. A registry may be contacted to retrieve - // the image digest and manifest, which in turn can be used to update - // platform or other information about the service. - QueryRegistry bool -} - -// ServiceListOptions holds parameters to list services with. -type ServiceListOptions struct { - Filters filters.Args - - // Status indicates whether the server should include the service task - // count of running and desired tasks. - Status bool -} - -// ServiceInspectOptions holds parameters related to the "service inspect" -// operation. -type ServiceInspectOptions struct { - InsertDefaults bool -} - -// TaskListOptions holds parameters to list tasks with. -type TaskListOptions struct { - Filters filters.Args -} - -// PluginRemoveOptions holds parameters to remove plugins. -type PluginRemoveOptions struct { - Force bool -} - -// PluginEnableOptions holds parameters to enable plugins. -type PluginEnableOptions struct { - Timeout int -} - -// PluginDisableOptions holds parameters to disable plugins. -type PluginDisableOptions struct { - Force bool -} - -// PluginInstallOptions holds parameters to install a plugin. -type PluginInstallOptions struct { - Disabled bool - AcceptAllPermissions bool - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry - RemoteRef string // RemoteRef is the plugin name on the registry - PrivilegeFunc RequestPrivilegeFunc - AcceptPermissionsFunc func(PluginPrivileges) (bool, error) - Args []string -} - -// SwarmUnlockKeyResponse contains the response for Engine API: -// GET /swarm/unlockkey -type SwarmUnlockKeyResponse struct { - // UnlockKey is the unlock key in ASCII-armored format. - UnlockKey string -} - -// PluginCreateOptions hold all options to plugin create. -type PluginCreateOptions struct { - RepoName string -} diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/configs.go b/ui/wasm/vendor/github.com/docker/docker/api/types/configs.go deleted file mode 100644 index 3dd133a3a58..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/configs.go +++ /dev/null @@ -1,66 +0,0 @@ -package types // import "github.com/docker/docker/api/types" - -import ( - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/network" - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - -// configs holds structs used for internal communication between the -// frontend (such as an http server) and the backend (such as the -// docker daemon). - -// ContainerCreateConfig is the parameter set to ContainerCreate() -type ContainerCreateConfig struct { - Name string - Config *container.Config - HostConfig *container.HostConfig - NetworkingConfig *network.NetworkingConfig - Platform *specs.Platform - AdjustCPUShares bool -} - -// ContainerRmConfig holds arguments for the container remove -// operation. This struct is used to tell the backend what operations -// to perform. -type ContainerRmConfig struct { - ForceRemove, RemoveVolume, RemoveLink bool -} - -// ExecConfig is a small subset of the Config struct that holds the configuration -// for the exec feature of docker. -type ExecConfig struct { - User string // User that will run the command - Privileged bool // Is the container in privileged mode - Tty bool // Attach standard streams to a tty. - AttachStdin bool // Attach the standard input, makes possible user interaction - AttachStderr bool // Attach the standard error - AttachStdout bool // Attach the standard output - Detach bool // Execute in detach mode - DetachKeys string // Escape keys for detach - Env []string // Environment variables - WorkingDir string // Working directory - Cmd []string // Execution commands and args -} - -// PluginRmConfig holds arguments for plugin remove. -type PluginRmConfig struct { - ForceRemove bool -} - -// PluginEnableConfig holds arguments for plugin enable -type PluginEnableConfig struct { - Timeout int -} - -// PluginDisableConfig holds arguments for plugin disable. -type PluginDisableConfig struct { - ForceDisable bool -} - -// NetworkListConfig stores the options available for listing networks -type NetworkListConfig struct { - // TODO(@cpuguy83): naming is hard, this is pulled from what was being used in the router before moving here - Detailed bool - Verbose bool -} diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/container/config.go b/ui/wasm/vendor/github.com/docker/docker/api/types/container/config.go deleted file mode 100644 index f767195b94b..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/container/config.go +++ /dev/null @@ -1,69 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -import ( - "time" - - "github.com/docker/docker/api/types/strslice" - "github.com/docker/go-connections/nat" -) - -// MinimumDuration puts a minimum on user configured duration. -// This is to prevent API error on time unit. For example, API may -// set 3 as healthcheck interval with intention of 3 seconds, but -// Docker interprets it as 3 nanoseconds. -const MinimumDuration = 1 * time.Millisecond - -// HealthConfig holds configuration settings for the HEALTHCHECK feature. -type HealthConfig struct { - // Test is the test to perform to check that the container is healthy. - // An empty slice means to inherit the default. - // The options are: - // {} : inherit healthcheck - // {"NONE"} : disable healthcheck - // {"CMD", args...} : exec arguments directly - // {"CMD-SHELL", command} : run command with system's default shell - Test []string `json:",omitempty"` - - // Zero means to inherit. Durations are expressed as integer nanoseconds. - Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. - Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. - StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. - - // Retries is the number of consecutive failures needed to consider a container as unhealthy. - // Zero means inherit. - Retries int `json:",omitempty"` -} - -// Config contains the configuration data about a container. -// It should hold only portable information about the container. -// Here, "portable" means "independent from the host we are running on". -// Non-portable information *should* appear in HostConfig. -// All fields added to this struct must be marked `omitempty` to keep getting -// predictable hashes from the old `v1Compatibility` configuration. -type Config struct { - Hostname string // Hostname - Domainname string // Domainname - User string // User that will run the command(s) inside the container, also support user:group - AttachStdin bool // Attach the standard input, makes possible user interaction - AttachStdout bool // Attach the standard output - AttachStderr bool // Attach the standard error - ExposedPorts nat.PortSet `json:",omitempty"` // List of exposed ports - Tty bool // Attach standard streams to a tty, including stdin if it is not closed. - OpenStdin bool // Open stdin - StdinOnce bool // If true, close stdin after the 1 attached client disconnects. - Env []string // List of environment variable to set in the container - Cmd strslice.StrSlice // Command to run when starting the container - Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy - ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (meaning treat as a command line) (Windows specific). - Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) - Volumes map[string]struct{} // List of volumes (mounts) used for the container - WorkingDir string // Current directory (PWD) in the command will be launched - Entrypoint strslice.StrSlice // Entrypoint to run when starting the container - NetworkDisabled bool `json:",omitempty"` // Is network disabled - MacAddress string `json:",omitempty"` // Mac Address of the container - OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile - Labels map[string]string // List of labels set to this container - StopSignal string `json:",omitempty"` // Signal to stop a container - StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container - Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT -} diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/container/container_changes.go b/ui/wasm/vendor/github.com/docker/docker/api/types/container/container_changes.go deleted file mode 100644 index 16dd5019eef..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/container/container_changes.go +++ /dev/null @@ -1,20 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -// ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// ContainerChangeResponseItem change item in response to ContainerChanges operation -// swagger:model ContainerChangeResponseItem -type ContainerChangeResponseItem struct { - - // Kind of change - // Required: true - Kind uint8 `json:"Kind"` - - // Path to file that has changed - // Required: true - Path string `json:"Path"` -} diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/container/container_create.go b/ui/wasm/vendor/github.com/docker/docker/api/types/container/container_create.go deleted file mode 100644 index d0c852f84d5..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/container/container_create.go +++ /dev/null @@ -1,20 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -// ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// ContainerCreateCreatedBody OK response to ContainerCreate operation -// swagger:model ContainerCreateCreatedBody -type ContainerCreateCreatedBody struct { - - // The ID of the created container - // Required: true - ID string `json:"Id"` - - // Warnings encountered when creating the container - // Required: true - Warnings []string `json:"Warnings"` -} diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/container/container_top.go b/ui/wasm/vendor/github.com/docker/docker/api/types/container/container_top.go deleted file mode 100644 index 63381da3674..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/container/container_top.go +++ /dev/null @@ -1,22 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -// ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// ContainerTopOKBody OK response to ContainerTop operation -// swagger:model ContainerTopOKBody -type ContainerTopOKBody struct { - - // Each process running in the container, where each is process - // is an array of values corresponding to the titles. - // - // Required: true - Processes [][]string `json:"Processes"` - - // The ps column titles - // Required: true - Titles []string `json:"Titles"` -} diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/container/container_update.go b/ui/wasm/vendor/github.com/docker/docker/api/types/container/container_update.go deleted file mode 100644 index c10f175ea82..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/container/container_update.go +++ /dev/null @@ -1,16 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -// ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// ContainerUpdateOKBody OK response to ContainerUpdate operation -// swagger:model ContainerUpdateOKBody -type ContainerUpdateOKBody struct { - - // warnings - // Required: true - Warnings []string `json:"Warnings"` -} diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/container/container_wait.go b/ui/wasm/vendor/github.com/docker/docker/api/types/container/container_wait.go deleted file mode 100644 index 49e05ae6694..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/container/container_wait.go +++ /dev/null @@ -1,28 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -// ---------------------------------------------------------------------------- -// Code generated by `swagger generate operation`. DO NOT EDIT. -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// ContainerWaitOKBodyError container waiting error, if any -// swagger:model ContainerWaitOKBodyError -type ContainerWaitOKBodyError struct { - - // Details of an error - Message string `json:"Message,omitempty"` -} - -// ContainerWaitOKBody OK response to ContainerWait operation -// swagger:model ContainerWaitOKBody -type ContainerWaitOKBody struct { - - // error - // Required: true - Error *ContainerWaitOKBodyError `json:"Error"` - - // Exit code of the container - // Required: true - StatusCode int64 `json:"StatusCode"` -} diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/container/host_config.go b/ui/wasm/vendor/github.com/docker/docker/api/types/container/host_config.go deleted file mode 100644 index 2d1cbaa9abd..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/container/host_config.go +++ /dev/null @@ -1,447 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -import ( - "strings" - - "github.com/docker/docker/api/types/blkiodev" - "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/api/types/strslice" - "github.com/docker/go-connections/nat" - units "github.com/docker/go-units" -) - -// CgroupnsMode represents the cgroup namespace mode of the container -type CgroupnsMode string - -// IsPrivate indicates whether the container uses its own private cgroup namespace -func (c CgroupnsMode) IsPrivate() bool { - return c == "private" -} - -// IsHost indicates whether the container shares the host's cgroup namespace -func (c CgroupnsMode) IsHost() bool { - return c == "host" -} - -// IsEmpty indicates whether the container cgroup namespace mode is unset -func (c CgroupnsMode) IsEmpty() bool { - return c == "" -} - -// Valid indicates whether the cgroup namespace mode is valid -func (c CgroupnsMode) Valid() bool { - return c.IsEmpty() || c.IsPrivate() || c.IsHost() -} - -// Isolation represents the isolation technology of a container. The supported -// values are platform specific -type Isolation string - -// IsDefault indicates the default isolation technology of a container. On Linux this -// is the native driver. On Windows, this is a Windows Server Container. -func (i Isolation) IsDefault() bool { - return strings.ToLower(string(i)) == "default" || string(i) == "" -} - -// IsHyperV indicates the use of a Hyper-V partition for isolation -func (i Isolation) IsHyperV() bool { - return strings.ToLower(string(i)) == "hyperv" -} - -// IsProcess indicates the use of process isolation -func (i Isolation) IsProcess() bool { - return strings.ToLower(string(i)) == "process" -} - -const ( - // IsolationEmpty is unspecified (same behavior as default) - IsolationEmpty = Isolation("") - // IsolationDefault is the default isolation mode on current daemon - IsolationDefault = Isolation("default") - // IsolationProcess is process isolation mode - IsolationProcess = Isolation("process") - // IsolationHyperV is HyperV isolation mode - IsolationHyperV = Isolation("hyperv") -) - -// IpcMode represents the container ipc stack. -type IpcMode string - -// IsPrivate indicates whether the container uses its own private ipc namespace which can not be shared. -func (n IpcMode) IsPrivate() bool { - return n == "private" -} - -// IsHost indicates whether the container shares the host's ipc namespace. -func (n IpcMode) IsHost() bool { - return n == "host" -} - -// IsShareable indicates whether the container's ipc namespace can be shared with another container. -func (n IpcMode) IsShareable() bool { - return n == "shareable" -} - -// IsContainer indicates whether the container uses another container's ipc namespace. -func (n IpcMode) IsContainer() bool { - parts := strings.SplitN(string(n), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -// IsNone indicates whether container IpcMode is set to "none". -func (n IpcMode) IsNone() bool { - return n == "none" -} - -// IsEmpty indicates whether container IpcMode is empty -func (n IpcMode) IsEmpty() bool { - return n == "" -} - -// Valid indicates whether the ipc mode is valid. -func (n IpcMode) Valid() bool { - return n.IsEmpty() || n.IsNone() || n.IsPrivate() || n.IsHost() || n.IsShareable() || n.IsContainer() -} - -// Container returns the name of the container ipc stack is going to be used. -func (n IpcMode) Container() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 && parts[0] == "container" { - return parts[1] - } - return "" -} - -// NetworkMode represents the container network stack. -type NetworkMode string - -// IsNone indicates whether container isn't using a network stack. -func (n NetworkMode) IsNone() bool { - return n == "none" -} - -// IsDefault indicates whether container uses the default network stack. -func (n NetworkMode) IsDefault() bool { - return n == "default" -} - -// IsPrivate indicates whether container uses its private network stack. -func (n NetworkMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) -} - -// IsContainer indicates whether container uses a container network stack. -func (n NetworkMode) IsContainer() bool { - parts := strings.SplitN(string(n), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -// ConnectedContainer is the id of the container which network this container is connected to. -func (n NetworkMode) ConnectedContainer() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 { - return parts[1] - } - return "" -} - -// UserDefined indicates user-created network -func (n NetworkMode) UserDefined() string { - if n.IsUserDefined() { - return string(n) - } - return "" -} - -// UsernsMode represents userns mode in the container. -type UsernsMode string - -// IsHost indicates whether the container uses the host's userns. -func (n UsernsMode) IsHost() bool { - return n == "host" -} - -// IsPrivate indicates whether the container uses the a private userns. -func (n UsernsMode) IsPrivate() bool { - return !(n.IsHost()) -} - -// Valid indicates whether the userns is valid. -func (n UsernsMode) Valid() bool { - parts := strings.Split(string(n), ":") - switch mode := parts[0]; mode { - case "", "host": - default: - return false - } - return true -} - -// CgroupSpec represents the cgroup to use for the container. -type CgroupSpec string - -// IsContainer indicates whether the container is using another container cgroup -func (c CgroupSpec) IsContainer() bool { - parts := strings.SplitN(string(c), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -// Valid indicates whether the cgroup spec is valid. -func (c CgroupSpec) Valid() bool { - return c.IsContainer() || c == "" -} - -// Container returns the name of the container whose cgroup will be used. -func (c CgroupSpec) Container() string { - parts := strings.SplitN(string(c), ":", 2) - if len(parts) > 1 { - return parts[1] - } - return "" -} - -// UTSMode represents the UTS namespace of the container. -type UTSMode string - -// IsPrivate indicates whether the container uses its private UTS namespace. -func (n UTSMode) IsPrivate() bool { - return !(n.IsHost()) -} - -// IsHost indicates whether the container uses the host's UTS namespace. -func (n UTSMode) IsHost() bool { - return n == "host" -} - -// Valid indicates whether the UTS namespace is valid. -func (n UTSMode) Valid() bool { - parts := strings.Split(string(n), ":") - switch mode := parts[0]; mode { - case "", "host": - default: - return false - } - return true -} - -// PidMode represents the pid namespace of the container. -type PidMode string - -// IsPrivate indicates whether the container uses its own new pid namespace. -func (n PidMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) -} - -// IsHost indicates whether the container uses the host's pid namespace. -func (n PidMode) IsHost() bool { - return n == "host" -} - -// IsContainer indicates whether the container uses a container's pid namespace. -func (n PidMode) IsContainer() bool { - parts := strings.SplitN(string(n), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -// Valid indicates whether the pid namespace is valid. -func (n PidMode) Valid() bool { - parts := strings.Split(string(n), ":") - switch mode := parts[0]; mode { - case "", "host": - case "container": - if len(parts) != 2 || parts[1] == "" { - return false - } - default: - return false - } - return true -} - -// Container returns the name of the container whose pid namespace is going to be used. -func (n PidMode) Container() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 { - return parts[1] - } - return "" -} - -// DeviceRequest represents a request for devices from a device driver. -// Used by GPU device drivers. -type DeviceRequest struct { - Driver string // Name of device driver - Count int // Number of devices to request (-1 = All) - DeviceIDs []string // List of device IDs as recognizable by the device driver - Capabilities [][]string // An OR list of AND lists of device capabilities (e.g. "gpu") - Options map[string]string // Options to pass onto the device driver -} - -// DeviceMapping represents the device mapping between the host and the container. -type DeviceMapping struct { - PathOnHost string - PathInContainer string - CgroupPermissions string -} - -// RestartPolicy represents the restart policies of the container. -type RestartPolicy struct { - Name string - MaximumRetryCount int -} - -// IsNone indicates whether the container has the "no" restart policy. -// This means the container will not automatically restart when exiting. -func (rp *RestartPolicy) IsNone() bool { - return rp.Name == "no" || rp.Name == "" -} - -// IsAlways indicates whether the container has the "always" restart policy. -// This means the container will automatically restart regardless of the exit status. -func (rp *RestartPolicy) IsAlways() bool { - return rp.Name == "always" -} - -// IsOnFailure indicates whether the container has the "on-failure" restart policy. -// This means the container will automatically restart of exiting with a non-zero exit status. -func (rp *RestartPolicy) IsOnFailure() bool { - return rp.Name == "on-failure" -} - -// IsUnlessStopped indicates whether the container has the -// "unless-stopped" restart policy. This means the container will -// automatically restart unless user has put it to stopped state. -func (rp *RestartPolicy) IsUnlessStopped() bool { - return rp.Name == "unless-stopped" -} - -// IsSame compares two RestartPolicy to see if they are the same -func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool { - return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount -} - -// LogMode is a type to define the available modes for logging -// These modes affect how logs are handled when log messages start piling up. -type LogMode string - -// Available logging modes -const ( - LogModeUnset = "" - LogModeBlocking LogMode = "blocking" - LogModeNonBlock LogMode = "non-blocking" -) - -// LogConfig represents the logging configuration of the container. -type LogConfig struct { - Type string - Config map[string]string -} - -// Resources contains container's resources (cgroups config, ulimits...) -type Resources struct { - // Applicable to all platforms - CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers) - Memory int64 // Memory limit (in bytes) - NanoCPUs int64 `json:"NanoCpus"` // CPU quota in units of 10-9 CPUs. - - // Applicable to UNIX platforms - CgroupParent string // Parent cgroup. - BlkioWeight uint16 // Block IO weight (relative weight vs. other containers) - BlkioWeightDevice []*blkiodev.WeightDevice - BlkioDeviceReadBps []*blkiodev.ThrottleDevice - BlkioDeviceWriteBps []*blkiodev.ThrottleDevice - BlkioDeviceReadIOps []*blkiodev.ThrottleDevice - BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice - CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period - CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota - CPURealtimePeriod int64 `json:"CpuRealtimePeriod"` // CPU real-time period - CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"` // CPU real-time runtime - CpusetCpus string // CpusetCpus 0-2, 0,1 - CpusetMems string // CpusetMems 0-2, 0,1 - Devices []DeviceMapping // List of devices to map inside the container - DeviceCgroupRules []string // List of rule to be added to the device cgroup - DeviceRequests []DeviceRequest // List of device requests for device drivers - KernelMemory int64 // Kernel memory limit (in bytes), Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes - KernelMemoryTCP int64 // Hard limit for kernel TCP buffer memory (in bytes) - MemoryReservation int64 // Memory soft limit (in bytes) - MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap - MemorySwappiness *int64 // Tuning container memory swappiness behaviour - OomKillDisable *bool // Whether to disable OOM Killer or not - PidsLimit *int64 // Setting PIDs limit for a container; Set `0` or `-1` for unlimited, or `null` to not change. - Ulimits []*units.Ulimit // List of ulimits to be set in the container - - // Applicable to Windows - CPUCount int64 `json:"CpuCount"` // CPU count - CPUPercent int64 `json:"CpuPercent"` // CPU percent - IOMaximumIOps uint64 // Maximum IOps for the container system drive - IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive -} - -// UpdateConfig holds the mutable attributes of a Container. -// Those attributes can be updated at runtime. -type UpdateConfig struct { - // Contains container's resources (cgroups, ulimits) - Resources - RestartPolicy RestartPolicy -} - -// HostConfig the non-portable Config structure of a container. -// Here, "non-portable" means "dependent of the host we are running on". -// Portable information *should* appear in Config. -type HostConfig struct { - // Applicable to all platforms - Binds []string // List of volume bindings for this container - ContainerIDFile string // File (path) where the containerId is written - LogConfig LogConfig // Configuration of the logs for this container - NetworkMode NetworkMode // Network mode to use for the container - PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host - RestartPolicy RestartPolicy // Restart policy to be used for the container - AutoRemove bool // Automatically remove container when it exits - VolumeDriver string // Name of the volume driver used to mount volumes - VolumesFrom []string // List of volumes to take from other container - - // Applicable to UNIX platforms - CapAdd strslice.StrSlice // List of kernel capabilities to add to the container - CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container - CgroupnsMode CgroupnsMode // Cgroup namespace mode to use for the container - DNS []string `json:"Dns"` // List of DNS server to lookup - DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for - DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for - ExtraHosts []string // List of extra hosts - GroupAdd []string // List of additional groups that the container process will run as - IpcMode IpcMode // IPC namespace to use for the container - Cgroup CgroupSpec // Cgroup to use for the container - Links []string // List of links (in the name:alias form) - OomScoreAdj int // Container preference for OOM-killing - PidMode PidMode // PID namespace to use for the container - Privileged bool // Is the container in privileged mode - PublishAllPorts bool // Should docker publish all exposed port for the container - ReadonlyRootfs bool // Is the container root filesystem in read-only - SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux. - StorageOpt map[string]string `json:",omitempty"` // Storage driver options per container. - Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container - UTSMode UTSMode // UTS namespace to use for the container - UsernsMode UsernsMode // The user namespace to use for the container - ShmSize int64 // Total shm memory usage - Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container - Runtime string `json:",omitempty"` // Runtime to use with this container - - // Applicable to Windows - ConsoleSize [2]uint // Initial console size (height,width) - Isolation Isolation // Isolation technology of the container (e.g. default, hyperv) - - // Contains container's resources (cgroups, ulimits) - Resources - - // Mounts specs used by the container - Mounts []mount.Mount `json:",omitempty"` - - // MaskedPaths is the list of paths to be masked inside the container (this overrides the default set of paths) - MaskedPaths []string - - // ReadonlyPaths is the list of paths to be set as read-only inside the container (this overrides the default set of paths) - ReadonlyPaths []string - - // Run a custom init inside the container, if null, use the daemon's configured settings - Init *bool `json:",omitempty"` -} diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go b/ui/wasm/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go deleted file mode 100644 index cf6fdf44026..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go +++ /dev/null @@ -1,41 +0,0 @@ -// +build !windows - -package container // import "github.com/docker/docker/api/types/container" - -// IsValid indicates if an isolation technology is valid -func (i Isolation) IsValid() bool { - return i.IsDefault() -} - -// NetworkName returns the name of the network stack. -func (n NetworkMode) NetworkName() string { - if n.IsBridge() { - return "bridge" - } else if n.IsHost() { - return "host" - } else if n.IsContainer() { - return "container" - } else if n.IsNone() { - return "none" - } else if n.IsDefault() { - return "default" - } else if n.IsUserDefined() { - return n.UserDefined() - } - return "" -} - -// IsBridge indicates whether container uses the bridge network stack -func (n NetworkMode) IsBridge() bool { - return n == "bridge" -} - -// IsHost indicates whether container uses the host network stack. -func (n NetworkMode) IsHost() bool { - return n == "host" -} - -// IsUserDefined indicates user-created network -func (n NetworkMode) IsUserDefined() bool { - return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() -} diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go b/ui/wasm/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go deleted file mode 100644 index 99f803a5bb1..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go +++ /dev/null @@ -1,40 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -// IsBridge indicates whether container uses the bridge network stack -// in windows it is given the name NAT -func (n NetworkMode) IsBridge() bool { - return n == "nat" -} - -// IsHost indicates whether container uses the host network stack. -// returns false as this is not supported by windows -func (n NetworkMode) IsHost() bool { - return false -} - -// IsUserDefined indicates user-created network -func (n NetworkMode) IsUserDefined() bool { - return !n.IsDefault() && !n.IsNone() && !n.IsBridge() && !n.IsContainer() -} - -// IsValid indicates if an isolation technology is valid -func (i Isolation) IsValid() bool { - return i.IsDefault() || i.IsHyperV() || i.IsProcess() -} - -// NetworkName returns the name of the network stack. -func (n NetworkMode) NetworkName() string { - if n.IsDefault() { - return "default" - } else if n.IsBridge() { - return "nat" - } else if n.IsNone() { - return "none" - } else if n.IsContainer() { - return "container" - } else if n.IsUserDefined() { - return n.UserDefined() - } - - return "" -} diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/container/waitcondition.go b/ui/wasm/vendor/github.com/docker/docker/api/types/container/waitcondition.go deleted file mode 100644 index cd8311f99cf..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/container/waitcondition.go +++ /dev/null @@ -1,22 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -// WaitCondition is a type used to specify a container state for which -// to wait. -type WaitCondition string - -// Possible WaitCondition Values. -// -// WaitConditionNotRunning (default) is used to wait for any of the non-running -// states: "created", "exited", "dead", "removing", or "removed". -// -// WaitConditionNextExit is used to wait for the next time the state changes -// to a non-running state. If the state is currently "created" or "exited", -// this would cause Wait() to block until either the container runs and exits -// or is removed. -// -// WaitConditionRemoved is used to wait for the container to be removed. -const ( - WaitConditionNotRunning WaitCondition = "not-running" - WaitConditionNextExit WaitCondition = "next-exit" - WaitConditionRemoved WaitCondition = "removed" -) diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/error_response.go b/ui/wasm/vendor/github.com/docker/docker/api/types/error_response.go deleted file mode 100644 index dc942d9d9ef..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/error_response.go +++ /dev/null @@ -1,13 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// ErrorResponse Represents an error. -// swagger:model ErrorResponse -type ErrorResponse struct { - - // The error message. - // Required: true - Message string `json:"message"` -} diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/error_response_ext.go b/ui/wasm/vendor/github.com/docker/docker/api/types/error_response_ext.go deleted file mode 100644 index f84f034cd54..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/error_response_ext.go +++ /dev/null @@ -1,6 +0,0 @@ -package types - -// Error returns the error message -func (e ErrorResponse) Error() string { - return e.Message -} diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/filters/parse.go b/ui/wasm/vendor/github.com/docker/docker/api/types/filters/parse.go deleted file mode 100644 index 4bc91cffd6e..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/filters/parse.go +++ /dev/null @@ -1,324 +0,0 @@ -/*Package filters provides tools for encoding a mapping of keys to a set of -multiple values. -*/ -package filters // import "github.com/docker/docker/api/types/filters" - -import ( - "encoding/json" - "regexp" - "strings" - - "github.com/docker/docker/api/types/versions" -) - -// Args stores a mapping of keys to a set of multiple values. -type Args struct { - fields map[string]map[string]bool -} - -// KeyValuePair are used to initialize a new Args -type KeyValuePair struct { - Key string - Value string -} - -// Arg creates a new KeyValuePair for initializing Args -func Arg(key, value string) KeyValuePair { - return KeyValuePair{Key: key, Value: value} -} - -// NewArgs returns a new Args populated with the initial args -func NewArgs(initialArgs ...KeyValuePair) Args { - args := Args{fields: map[string]map[string]bool{}} - for _, arg := range initialArgs { - args.Add(arg.Key, arg.Value) - } - return args -} - -// Keys returns all the keys in list of Args -func (args Args) Keys() []string { - keys := make([]string, 0, len(args.fields)) - for k := range args.fields { - keys = append(keys, k) - } - return keys -} - -// MarshalJSON returns a JSON byte representation of the Args -func (args Args) MarshalJSON() ([]byte, error) { - if len(args.fields) == 0 { - return []byte{}, nil - } - return json.Marshal(args.fields) -} - -// ToJSON returns the Args as a JSON encoded string -func ToJSON(a Args) (string, error) { - if a.Len() == 0 { - return "", nil - } - buf, err := json.Marshal(a) - return string(buf), err -} - -// ToParamWithVersion encodes Args as a JSON string. If version is less than 1.22 -// then the encoded format will use an older legacy format where the values are a -// list of strings, instead of a set. -// -// Deprecated: do not use in any new code; use ToJSON instead -func ToParamWithVersion(version string, a Args) (string, error) { - if a.Len() == 0 { - return "", nil - } - - if version != "" && versions.LessThan(version, "1.22") { - buf, err := json.Marshal(convertArgsToSlice(a.fields)) - return string(buf), err - } - - return ToJSON(a) -} - -// FromJSON decodes a JSON encoded string into Args -func FromJSON(p string) (Args, error) { - args := NewArgs() - - if p == "" { - return args, nil - } - - raw := []byte(p) - err := json.Unmarshal(raw, &args) - if err == nil { - return args, nil - } - - // Fallback to parsing arguments in the legacy slice format - deprecated := map[string][]string{} - if legacyErr := json.Unmarshal(raw, &deprecated); legacyErr != nil { - return args, err - } - - args.fields = deprecatedArgs(deprecated) - return args, nil -} - -// UnmarshalJSON populates the Args from JSON encode bytes -func (args Args) UnmarshalJSON(raw []byte) error { - if len(raw) == 0 { - return nil - } - return json.Unmarshal(raw, &args.fields) -} - -// Get returns the list of values associated with the key -func (args Args) Get(key string) []string { - values := args.fields[key] - if values == nil { - return make([]string, 0) - } - slice := make([]string, 0, len(values)) - for key := range values { - slice = append(slice, key) - } - return slice -} - -// Add a new value to the set of values -func (args Args) Add(key, value string) { - if _, ok := args.fields[key]; ok { - args.fields[key][value] = true - } else { - args.fields[key] = map[string]bool{value: true} - } -} - -// Del removes a value from the set -func (args Args) Del(key, value string) { - if _, ok := args.fields[key]; ok { - delete(args.fields[key], value) - if len(args.fields[key]) == 0 { - delete(args.fields, key) - } - } -} - -// Len returns the number of keys in the mapping -func (args Args) Len() int { - return len(args.fields) -} - -// MatchKVList returns true if all the pairs in sources exist as key=value -// pairs in the mapping at key, or if there are no values at key. -func (args Args) MatchKVList(key string, sources map[string]string) bool { - fieldValues := args.fields[key] - - // do not filter if there is no filter set or cannot determine filter - if len(fieldValues) == 0 { - return true - } - - if len(sources) == 0 { - return false - } - - for value := range fieldValues { - testKV := strings.SplitN(value, "=", 2) - - v, ok := sources[testKV[0]] - if !ok { - return false - } - if len(testKV) == 2 && testKV[1] != v { - return false - } - } - - return true -} - -// Match returns true if any of the values at key match the source string -func (args Args) Match(field, source string) bool { - if args.ExactMatch(field, source) { - return true - } - - fieldValues := args.fields[field] - for name2match := range fieldValues { - match, err := regexp.MatchString(name2match, source) - if err != nil { - continue - } - if match { - return true - } - } - return false -} - -// ExactMatch returns true if the source matches exactly one of the values. -func (args Args) ExactMatch(key, source string) bool { - fieldValues, ok := args.fields[key] - // do not filter if there is no filter set or cannot determine filter - if !ok || len(fieldValues) == 0 { - return true - } - - // try to match full name value to avoid O(N) regular expression matching - return fieldValues[source] -} - -// UniqueExactMatch returns true if there is only one value and the source -// matches exactly the value. -func (args Args) UniqueExactMatch(key, source string) bool { - fieldValues := args.fields[key] - // do not filter if there is no filter set or cannot determine filter - if len(fieldValues) == 0 { - return true - } - if len(args.fields[key]) != 1 { - return false - } - - // try to match full name value to avoid O(N) regular expression matching - return fieldValues[source] -} - -// FuzzyMatch returns true if the source matches exactly one value, or the -// source has one of the values as a prefix. -func (args Args) FuzzyMatch(key, source string) bool { - if args.ExactMatch(key, source) { - return true - } - - fieldValues := args.fields[key] - for prefix := range fieldValues { - if strings.HasPrefix(source, prefix) { - return true - } - } - return false -} - -// Contains returns true if the key exists in the mapping -func (args Args) Contains(field string) bool { - _, ok := args.fields[field] - return ok -} - -type invalidFilter string - -func (e invalidFilter) Error() string { - return "Invalid filter '" + string(e) + "'" -} - -func (invalidFilter) InvalidParameter() {} - -// Validate compared the set of accepted keys against the keys in the mapping. -// An error is returned if any mapping keys are not in the accepted set. -func (args Args) Validate(accepted map[string]bool) error { - for name := range args.fields { - if !accepted[name] { - return invalidFilter(name) - } - } - return nil -} - -// WalkValues iterates over the list of values for a key in the mapping and calls -// op() for each value. If op returns an error the iteration stops and the -// error is returned. -func (args Args) WalkValues(field string, op func(value string) error) error { - if _, ok := args.fields[field]; !ok { - return nil - } - for v := range args.fields[field] { - if err := op(v); err != nil { - return err - } - } - return nil -} - -// Clone returns a copy of args. -func (args Args) Clone() (newArgs Args) { - newArgs.fields = make(map[string]map[string]bool, len(args.fields)) - for k, m := range args.fields { - var mm map[string]bool - if m != nil { - mm = make(map[string]bool, len(m)) - for kk, v := range m { - mm[kk] = v - } - } - newArgs.fields[k] = mm - } - return newArgs -} - -func deprecatedArgs(d map[string][]string) map[string]map[string]bool { - m := map[string]map[string]bool{} - for k, v := range d { - values := map[string]bool{} - for _, vv := range v { - values[vv] = true - } - m[k] = values - } - return m -} - -func convertArgsToSlice(f map[string]map[string]bool) map[string][]string { - m := map[string][]string{} - for k, v := range f { - values := []string{} - for kk := range v { - if v[kk] { - values = append(values, kk) - } - } - m[k] = values - } - return m -} diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/graph_driver_data.go b/ui/wasm/vendor/github.com/docker/docker/api/types/graph_driver_data.go deleted file mode 100644 index 4d9bf1c62c8..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/graph_driver_data.go +++ /dev/null @@ -1,17 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// GraphDriverData Information about a container's graph driver. -// swagger:model GraphDriverData -type GraphDriverData struct { - - // data - // Required: true - Data map[string]string `json:"Data"` - - // name - // Required: true - Name string `json:"Name"` -} diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/id_response.go b/ui/wasm/vendor/github.com/docker/docker/api/types/id_response.go deleted file mode 100644 index 7592d2f8b15..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/id_response.go +++ /dev/null @@ -1,13 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// IDResponse Response to an API call that returns just an Id -// swagger:model IdResponse -type IDResponse struct { - - // The id of the newly created object. - // Required: true - ID string `json:"Id"` -} diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/image_delete_response_item.go b/ui/wasm/vendor/github.com/docker/docker/api/types/image_delete_response_item.go deleted file mode 100644 index b9a65a0d8e8..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/image_delete_response_item.go +++ /dev/null @@ -1,15 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// ImageDeleteResponseItem image delete response item -// swagger:model ImageDeleteResponseItem -type ImageDeleteResponseItem struct { - - // The image ID of an image that was deleted - Deleted string `json:"Deleted,omitempty"` - - // The image ID of an image that was untagged - Untagged string `json:"Untagged,omitempty"` -} diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/image_summary.go b/ui/wasm/vendor/github.com/docker/docker/api/types/image_summary.go deleted file mode 100644 index e145b3dcfcd..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/image_summary.go +++ /dev/null @@ -1,49 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// ImageSummary image summary -// swagger:model ImageSummary -type ImageSummary struct { - - // containers - // Required: true - Containers int64 `json:"Containers"` - - // created - // Required: true - Created int64 `json:"Created"` - - // Id - // Required: true - ID string `json:"Id"` - - // labels - // Required: true - Labels map[string]string `json:"Labels"` - - // parent Id - // Required: true - ParentID string `json:"ParentId"` - - // repo digests - // Required: true - RepoDigests []string `json:"RepoDigests"` - - // repo tags - // Required: true - RepoTags []string `json:"RepoTags"` - - // shared size - // Required: true - SharedSize int64 `json:"SharedSize"` - - // size - // Required: true - Size int64 `json:"Size"` - - // virtual size - // Required: true - VirtualSize int64 `json:"VirtualSize"` -} diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/mount/mount.go b/ui/wasm/vendor/github.com/docker/docker/api/types/mount/mount.go deleted file mode 100644 index 443b8d07a9f..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/mount/mount.go +++ /dev/null @@ -1,131 +0,0 @@ -package mount // import "github.com/docker/docker/api/types/mount" - -import ( - "os" -) - -// Type represents the type of a mount. -type Type string - -// Type constants -const ( - // TypeBind is the type for mounting host dir - TypeBind Type = "bind" - // TypeVolume is the type for remote storage volumes - TypeVolume Type = "volume" - // TypeTmpfs is the type for mounting tmpfs - TypeTmpfs Type = "tmpfs" - // TypeNamedPipe is the type for mounting Windows named pipes - TypeNamedPipe Type = "npipe" -) - -// Mount represents a mount (volume). -type Mount struct { - Type Type `json:",omitempty"` - // Source specifies the name of the mount. Depending on mount type, this - // may be a volume name or a host path, or even ignored. - // Source is not supported for tmpfs (must be an empty value) - Source string `json:",omitempty"` - Target string `json:",omitempty"` - ReadOnly bool `json:",omitempty"` - Consistency Consistency `json:",omitempty"` - - BindOptions *BindOptions `json:",omitempty"` - VolumeOptions *VolumeOptions `json:",omitempty"` - TmpfsOptions *TmpfsOptions `json:",omitempty"` -} - -// Propagation represents the propagation of a mount. -type Propagation string - -const ( - // PropagationRPrivate RPRIVATE - PropagationRPrivate Propagation = "rprivate" - // PropagationPrivate PRIVATE - PropagationPrivate Propagation = "private" - // PropagationRShared RSHARED - PropagationRShared Propagation = "rshared" - // PropagationShared SHARED - PropagationShared Propagation = "shared" - // PropagationRSlave RSLAVE - PropagationRSlave Propagation = "rslave" - // PropagationSlave SLAVE - PropagationSlave Propagation = "slave" -) - -// Propagations is the list of all valid mount propagations -var Propagations = []Propagation{ - PropagationRPrivate, - PropagationPrivate, - PropagationRShared, - PropagationShared, - PropagationRSlave, - PropagationSlave, -} - -// Consistency represents the consistency requirements of a mount. -type Consistency string - -const ( - // ConsistencyFull guarantees bind mount-like consistency - ConsistencyFull Consistency = "consistent" - // ConsistencyCached mounts can cache read data and FS structure - ConsistencyCached Consistency = "cached" - // ConsistencyDelegated mounts can cache read and written data and structure - ConsistencyDelegated Consistency = "delegated" - // ConsistencyDefault provides "consistent" behavior unless overridden - ConsistencyDefault Consistency = "default" -) - -// BindOptions defines options specific to mounts of type "bind". -type BindOptions struct { - Propagation Propagation `json:",omitempty"` - NonRecursive bool `json:",omitempty"` -} - -// VolumeOptions represents the options for a mount of type volume. -type VolumeOptions struct { - NoCopy bool `json:",omitempty"` - Labels map[string]string `json:",omitempty"` - DriverConfig *Driver `json:",omitempty"` -} - -// Driver represents a volume driver. -type Driver struct { - Name string `json:",omitempty"` - Options map[string]string `json:",omitempty"` -} - -// TmpfsOptions defines options specific to mounts of type "tmpfs". -type TmpfsOptions struct { - // Size sets the size of the tmpfs, in bytes. - // - // This will be converted to an operating system specific value - // depending on the host. For example, on linux, it will be converted to - // use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with - // docker, uses a straight byte value. - // - // Percentages are not supported. - SizeBytes int64 `json:",omitempty"` - // Mode of the tmpfs upon creation - Mode os.FileMode `json:",omitempty"` - - // TODO(stevvooe): There are several more tmpfs flags, specified in the - // daemon, that are accepted. Only the most basic are added for now. - // - // From https://github.com/moby/sys/blob/mount/v0.1.1/mount/flags.go#L47-L56 - // - // var validFlags = map[string]bool{ - // "": true, - // "size": true, X - // "mode": true, X - // "uid": true, - // "gid": true, - // "nr_inodes": true, - // "nr_blocks": true, - // "mpol": true, - // } - // - // Some of these may be straightforward to add, but others, such as - // uid/gid have implications in a clustered system. -} diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/network/network.go b/ui/wasm/vendor/github.com/docker/docker/api/types/network/network.go deleted file mode 100644 index 437b184c67b..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/network/network.go +++ /dev/null @@ -1,126 +0,0 @@ -package network // import "github.com/docker/docker/api/types/network" -import ( - "github.com/docker/docker/api/types/filters" -) - -// Address represents an IP address -type Address struct { - Addr string - PrefixLen int -} - -// IPAM represents IP Address Management -type IPAM struct { - Driver string - Options map[string]string // Per network IPAM driver options - Config []IPAMConfig -} - -// IPAMConfig represents IPAM configurations -type IPAMConfig struct { - Subnet string `json:",omitempty"` - IPRange string `json:",omitempty"` - Gateway string `json:",omitempty"` - AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` -} - -// EndpointIPAMConfig represents IPAM configurations for the endpoint -type EndpointIPAMConfig struct { - IPv4Address string `json:",omitempty"` - IPv6Address string `json:",omitempty"` - LinkLocalIPs []string `json:",omitempty"` -} - -// Copy makes a copy of the endpoint ipam config -func (cfg *EndpointIPAMConfig) Copy() *EndpointIPAMConfig { - cfgCopy := *cfg - cfgCopy.LinkLocalIPs = make([]string, 0, len(cfg.LinkLocalIPs)) - cfgCopy.LinkLocalIPs = append(cfgCopy.LinkLocalIPs, cfg.LinkLocalIPs...) - return &cfgCopy -} - -// PeerInfo represents one peer of an overlay network -type PeerInfo struct { - Name string - IP string -} - -// EndpointSettings stores the network endpoint details -type EndpointSettings struct { - // Configurations - IPAMConfig *EndpointIPAMConfig - Links []string - Aliases []string - // Operational data - NetworkID string - EndpointID string - Gateway string - IPAddress string - IPPrefixLen int - IPv6Gateway string - GlobalIPv6Address string - GlobalIPv6PrefixLen int - MacAddress string - DriverOpts map[string]string -} - -// Task carries the information about one backend task -type Task struct { - Name string - EndpointID string - EndpointIP string - Info map[string]string -} - -// ServiceInfo represents service parameters with the list of service's tasks -type ServiceInfo struct { - VIP string - Ports []string - LocalLBIndex int - Tasks []Task -} - -// Copy makes a deep copy of `EndpointSettings` -func (es *EndpointSettings) Copy() *EndpointSettings { - epCopy := *es - if es.IPAMConfig != nil { - epCopy.IPAMConfig = es.IPAMConfig.Copy() - } - - if es.Links != nil { - links := make([]string, 0, len(es.Links)) - epCopy.Links = append(links, es.Links...) - } - - if es.Aliases != nil { - aliases := make([]string, 0, len(es.Aliases)) - epCopy.Aliases = append(aliases, es.Aliases...) - } - return &epCopy -} - -// NetworkingConfig represents the container's networking configuration for each of its interfaces -// Carries the networking configs specified in the `docker run` and `docker network connect` commands -type NetworkingConfig struct { - EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network -} - -// ConfigReference specifies the source which provides a network's configuration -type ConfigReference struct { - Network string -} - -var acceptedFilters = map[string]bool{ - "dangling": true, - "driver": true, - "id": true, - "label": true, - "name": true, - "scope": true, - "type": true, -} - -// ValidateFilters validates the list of filter args with the available filters. -func ValidateFilters(filter filters.Args) error { - return filter.Validate(acceptedFilters) -} diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/plugin.go b/ui/wasm/vendor/github.com/docker/docker/api/types/plugin.go deleted file mode 100644 index abae48b9ab0..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/plugin.go +++ /dev/null @@ -1,203 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// Plugin A plugin for the Engine API -// swagger:model Plugin -type Plugin struct { - - // config - // Required: true - Config PluginConfig `json:"Config"` - - // True if the plugin is running. False if the plugin is not running, only installed. - // Required: true - Enabled bool `json:"Enabled"` - - // Id - ID string `json:"Id,omitempty"` - - // name - // Required: true - Name string `json:"Name"` - - // plugin remote reference used to push/pull the plugin - PluginReference string `json:"PluginReference,omitempty"` - - // settings - // Required: true - Settings PluginSettings `json:"Settings"` -} - -// PluginConfig The config of a plugin. -// swagger:model PluginConfig -type PluginConfig struct { - - // args - // Required: true - Args PluginConfigArgs `json:"Args"` - - // description - // Required: true - Description string `json:"Description"` - - // Docker Version used to create the plugin - DockerVersion string `json:"DockerVersion,omitempty"` - - // documentation - // Required: true - Documentation string `json:"Documentation"` - - // entrypoint - // Required: true - Entrypoint []string `json:"Entrypoint"` - - // env - // Required: true - Env []PluginEnv `json:"Env"` - - // interface - // Required: true - Interface PluginConfigInterface `json:"Interface"` - - // ipc host - // Required: true - IpcHost bool `json:"IpcHost"` - - // linux - // Required: true - Linux PluginConfigLinux `json:"Linux"` - - // mounts - // Required: true - Mounts []PluginMount `json:"Mounts"` - - // network - // Required: true - Network PluginConfigNetwork `json:"Network"` - - // pid host - // Required: true - PidHost bool `json:"PidHost"` - - // propagated mount - // Required: true - PropagatedMount string `json:"PropagatedMount"` - - // user - User PluginConfigUser `json:"User,omitempty"` - - // work dir - // Required: true - WorkDir string `json:"WorkDir"` - - // rootfs - Rootfs *PluginConfigRootfs `json:"rootfs,omitempty"` -} - -// PluginConfigArgs plugin config args -// swagger:model PluginConfigArgs -type PluginConfigArgs struct { - - // description - // Required: true - Description string `json:"Description"` - - // name - // Required: true - Name string `json:"Name"` - - // settable - // Required: true - Settable []string `json:"Settable"` - - // value - // Required: true - Value []string `json:"Value"` -} - -// PluginConfigInterface The interface between Docker and the plugin -// swagger:model PluginConfigInterface -type PluginConfigInterface struct { - - // Protocol to use for clients connecting to the plugin. - ProtocolScheme string `json:"ProtocolScheme,omitempty"` - - // socket - // Required: true - Socket string `json:"Socket"` - - // types - // Required: true - Types []PluginInterfaceType `json:"Types"` -} - -// PluginConfigLinux plugin config linux -// swagger:model PluginConfigLinux -type PluginConfigLinux struct { - - // allow all devices - // Required: true - AllowAllDevices bool `json:"AllowAllDevices"` - - // capabilities - // Required: true - Capabilities []string `json:"Capabilities"` - - // devices - // Required: true - Devices []PluginDevice `json:"Devices"` -} - -// PluginConfigNetwork plugin config network -// swagger:model PluginConfigNetwork -type PluginConfigNetwork struct { - - // type - // Required: true - Type string `json:"Type"` -} - -// PluginConfigRootfs plugin config rootfs -// swagger:model PluginConfigRootfs -type PluginConfigRootfs struct { - - // diff ids - DiffIds []string `json:"diff_ids"` - - // type - Type string `json:"type,omitempty"` -} - -// PluginConfigUser plugin config user -// swagger:model PluginConfigUser -type PluginConfigUser struct { - - // g ID - GID uint32 `json:"GID,omitempty"` - - // UID - UID uint32 `json:"UID,omitempty"` -} - -// PluginSettings Settings that can be modified by users. -// swagger:model PluginSettings -type PluginSettings struct { - - // args - // Required: true - Args []string `json:"Args"` - - // devices - // Required: true - Devices []PluginDevice `json:"Devices"` - - // env - // Required: true - Env []string `json:"Env"` - - // mounts - // Required: true - Mounts []PluginMount `json:"Mounts"` -} diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/plugin_device.go b/ui/wasm/vendor/github.com/docker/docker/api/types/plugin_device.go deleted file mode 100644 index 56990106755..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/plugin_device.go +++ /dev/null @@ -1,25 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// PluginDevice plugin device -// swagger:model PluginDevice -type PluginDevice struct { - - // description - // Required: true - Description string `json:"Description"` - - // name - // Required: true - Name string `json:"Name"` - - // path - // Required: true - Path *string `json:"Path"` - - // settable - // Required: true - Settable []string `json:"Settable"` -} diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/plugin_env.go b/ui/wasm/vendor/github.com/docker/docker/api/types/plugin_env.go deleted file mode 100644 index 32962dc2ebe..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/plugin_env.go +++ /dev/null @@ -1,25 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// PluginEnv plugin env -// swagger:model PluginEnv -type PluginEnv struct { - - // description - // Required: true - Description string `json:"Description"` - - // name - // Required: true - Name string `json:"Name"` - - // settable - // Required: true - Settable []string `json:"Settable"` - - // value - // Required: true - Value *string `json:"Value"` -} diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/plugin_interface_type.go b/ui/wasm/vendor/github.com/docker/docker/api/types/plugin_interface_type.go deleted file mode 100644 index c82f204e870..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/plugin_interface_type.go +++ /dev/null @@ -1,21 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// PluginInterfaceType plugin interface type -// swagger:model PluginInterfaceType -type PluginInterfaceType struct { - - // capability - // Required: true - Capability string `json:"Capability"` - - // prefix - // Required: true - Prefix string `json:"Prefix"` - - // version - // Required: true - Version string `json:"Version"` -} diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/plugin_mount.go b/ui/wasm/vendor/github.com/docker/docker/api/types/plugin_mount.go deleted file mode 100644 index 5c031cf8b5c..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/plugin_mount.go +++ /dev/null @@ -1,37 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// PluginMount plugin mount -// swagger:model PluginMount -type PluginMount struct { - - // description - // Required: true - Description string `json:"Description"` - - // destination - // Required: true - Destination string `json:"Destination"` - - // name - // Required: true - Name string `json:"Name"` - - // options - // Required: true - Options []string `json:"Options"` - - // settable - // Required: true - Settable []string `json:"Settable"` - - // source - // Required: true - Source *string `json:"Source"` - - // type - // Required: true - Type string `json:"Type"` -} diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/plugin_responses.go b/ui/wasm/vendor/github.com/docker/docker/api/types/plugin_responses.go deleted file mode 100644 index 60d1fb5ad85..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/plugin_responses.go +++ /dev/null @@ -1,71 +0,0 @@ -package types // import "github.com/docker/docker/api/types" - -import ( - "encoding/json" - "fmt" - "sort" -) - -// PluginsListResponse contains the response for the Engine API -type PluginsListResponse []*Plugin - -// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType -func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error { - versionIndex := len(p) - prefixIndex := 0 - if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' { - return fmt.Errorf("%q is not a plugin interface type", p) - } - p = p[1 : len(p)-1] -loop: - for i, b := range p { - switch b { - case '.': - prefixIndex = i - case '/': - versionIndex = i - break loop - } - } - t.Prefix = string(p[:prefixIndex]) - t.Capability = string(p[prefixIndex+1 : versionIndex]) - if versionIndex < len(p) { - t.Version = string(p[versionIndex+1:]) - } - return nil -} - -// MarshalJSON implements json.Marshaler for PluginInterfaceType -func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) { - return json.Marshal(t.String()) -} - -// String implements fmt.Stringer for PluginInterfaceType -func (t PluginInterfaceType) String() string { - return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version) -} - -// PluginPrivilege describes a permission the user has to accept -// upon installing a plugin. -type PluginPrivilege struct { - Name string - Description string - Value []string -} - -// PluginPrivileges is a list of PluginPrivilege -type PluginPrivileges []PluginPrivilege - -func (s PluginPrivileges) Len() int { - return len(s) -} - -func (s PluginPrivileges) Less(i, j int) bool { - return s[i].Name < s[j].Name -} - -func (s PluginPrivileges) Swap(i, j int) { - sort.Strings(s[i].Value) - sort.Strings(s[j].Value) - s[i], s[j] = s[j], s[i] -} diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/port.go b/ui/wasm/vendor/github.com/docker/docker/api/types/port.go deleted file mode 100644 index d91234744c6..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/port.go +++ /dev/null @@ -1,23 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// Port An open port on a container -// swagger:model Port -type Port struct { - - // Host IP address that the container's port is mapped to - IP string `json:"IP,omitempty"` - - // Port on the container - // Required: true - PrivatePort uint16 `json:"PrivatePort"` - - // Port exposed on the host - PublicPort uint16 `json:"PublicPort,omitempty"` - - // type - // Required: true - Type string `json:"Type"` -} diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/registry/authenticate.go b/ui/wasm/vendor/github.com/docker/docker/api/types/registry/authenticate.go deleted file mode 100644 index f0a2113e405..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/registry/authenticate.go +++ /dev/null @@ -1,21 +0,0 @@ -package registry // import "github.com/docker/docker/api/types/registry" - -// ---------------------------------------------------------------------------- -// DO NOT EDIT THIS FILE -// This file was generated by `swagger generate operation` -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// AuthenticateOKBody authenticate o k body -// swagger:model AuthenticateOKBody -type AuthenticateOKBody struct { - - // An opaque token used to authenticate a user after a successful login - // Required: true - IdentityToken string `json:"IdentityToken"` - - // The status of the authentication - // Required: true - Status string `json:"Status"` -} diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/registry/registry.go b/ui/wasm/vendor/github.com/docker/docker/api/types/registry/registry.go deleted file mode 100644 index 53e47084c8d..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/registry/registry.go +++ /dev/null @@ -1,119 +0,0 @@ -package registry // import "github.com/docker/docker/api/types/registry" - -import ( - "encoding/json" - "net" - - v1 "github.com/opencontainers/image-spec/specs-go/v1" -) - -// ServiceConfig stores daemon registry services configuration. -type ServiceConfig struct { - AllowNondistributableArtifactsCIDRs []*NetIPNet - AllowNondistributableArtifactsHostnames []string - InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` - IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` - Mirrors []string -} - -// NetIPNet is the net.IPNet type, which can be marshalled and -// unmarshalled to JSON -type NetIPNet net.IPNet - -// String returns the CIDR notation of ipnet -func (ipnet *NetIPNet) String() string { - return (*net.IPNet)(ipnet).String() -} - -// MarshalJSON returns the JSON representation of the IPNet -func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) { - return json.Marshal((*net.IPNet)(ipnet).String()) -} - -// UnmarshalJSON sets the IPNet from a byte array of JSON -func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) { - var ipnetStr string - if err = json.Unmarshal(b, &ipnetStr); err == nil { - var cidr *net.IPNet - if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil { - *ipnet = NetIPNet(*cidr) - } - } - return -} - -// IndexInfo contains information about a registry -// -// RepositoryInfo Examples: -// { -// "Index" : { -// "Name" : "docker.io", -// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], -// "Secure" : true, -// "Official" : true, -// }, -// "RemoteName" : "library/debian", -// "LocalName" : "debian", -// "CanonicalName" : "docker.io/debian" -// "Official" : true, -// } -// -// { -// "Index" : { -// "Name" : "127.0.0.1:5000", -// "Mirrors" : [], -// "Secure" : false, -// "Official" : false, -// }, -// "RemoteName" : "user/repo", -// "LocalName" : "127.0.0.1:5000/user/repo", -// "CanonicalName" : "127.0.0.1:5000/user/repo", -// "Official" : false, -// } -type IndexInfo struct { - // Name is the name of the registry, such as "docker.io" - Name string - // Mirrors is a list of mirrors, expressed as URIs - Mirrors []string - // Secure is set to false if the registry is part of the list of - // insecure registries. Insecure registries accept HTTP and/or accept - // HTTPS with certificates from unknown CAs. - Secure bool - // Official indicates whether this is an official registry - Official bool -} - -// SearchResult describes a search result returned from a registry -type SearchResult struct { - // StarCount indicates the number of stars this repository has - StarCount int `json:"star_count"` - // IsOfficial is true if the result is from an official repository. - IsOfficial bool `json:"is_official"` - // Name is the name of the repository - Name string `json:"name"` - // IsAutomated indicates whether the result is automated - IsAutomated bool `json:"is_automated"` - // Description is a textual description of the repository - Description string `json:"description"` -} - -// SearchResults lists a collection search results returned from a registry -type SearchResults struct { - // Query contains the query string that generated the search results - Query string `json:"query"` - // NumResults indicates the number of results the query returned - NumResults int `json:"num_results"` - // Results is a slice containing the actual results for the search - Results []SearchResult `json:"results"` -} - -// DistributionInspect describes the result obtained from contacting the -// registry to retrieve image metadata -type DistributionInspect struct { - // Descriptor contains information about the manifest, including - // the content addressable digest - Descriptor v1.Descriptor - // Platforms contains the list of platforms supported by the image, - // obtained by parsing the manifest - Platforms []v1.Platform -} diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/service_update_response.go b/ui/wasm/vendor/github.com/docker/docker/api/types/service_update_response.go deleted file mode 100644 index 74ea64b1bb6..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/service_update_response.go +++ /dev/null @@ -1,12 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// ServiceUpdateResponse service update response -// swagger:model ServiceUpdateResponse -type ServiceUpdateResponse struct { - - // Optional warning messages - Warnings []string `json:"Warnings"` -} diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/stats.go b/ui/wasm/vendor/github.com/docker/docker/api/types/stats.go deleted file mode 100644 index 20daebed14b..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/stats.go +++ /dev/null @@ -1,181 +0,0 @@ -// Package types is used for API stability in the types and response to the -// consumers of the API stats endpoint. -package types // import "github.com/docker/docker/api/types" - -import "time" - -// ThrottlingData stores CPU throttling stats of one running container. -// Not used on Windows. -type ThrottlingData struct { - // Number of periods with throttling active - Periods uint64 `json:"periods"` - // Number of periods when the container hits its throttling limit. - ThrottledPeriods uint64 `json:"throttled_periods"` - // Aggregate time the container was throttled for in nanoseconds. - ThrottledTime uint64 `json:"throttled_time"` -} - -// CPUUsage stores All CPU stats aggregated since container inception. -type CPUUsage struct { - // Total CPU time consumed. - // Units: nanoseconds (Linux) - // Units: 100's of nanoseconds (Windows) - TotalUsage uint64 `json:"total_usage"` - - // Total CPU time consumed per core (Linux). Not used on Windows. - // Units: nanoseconds. - PercpuUsage []uint64 `json:"percpu_usage,omitempty"` - - // Time spent by tasks of the cgroup in kernel mode (Linux). - // Time spent by all container processes in kernel mode (Windows). - // Units: nanoseconds (Linux). - // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers. - UsageInKernelmode uint64 `json:"usage_in_kernelmode"` - - // Time spent by tasks of the cgroup in user mode (Linux). - // Time spent by all container processes in user mode (Windows). - // Units: nanoseconds (Linux). - // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers - UsageInUsermode uint64 `json:"usage_in_usermode"` -} - -// CPUStats aggregates and wraps all CPU related info of container -type CPUStats struct { - // CPU Usage. Linux and Windows. - CPUUsage CPUUsage `json:"cpu_usage"` - - // System Usage. Linux only. - SystemUsage uint64 `json:"system_cpu_usage,omitempty"` - - // Online CPUs. Linux only. - OnlineCPUs uint32 `json:"online_cpus,omitempty"` - - // Throttling Data. Linux only. - ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` -} - -// MemoryStats aggregates all memory stats since container inception on Linux. -// Windows returns stats for commit and private working set only. -type MemoryStats struct { - // Linux Memory Stats - - // current res_counter usage for memory - Usage uint64 `json:"usage,omitempty"` - // maximum usage ever recorded. - MaxUsage uint64 `json:"max_usage,omitempty"` - // TODO(vishh): Export these as stronger types. - // all the stats exported via memory.stat. - Stats map[string]uint64 `json:"stats,omitempty"` - // number of times memory usage hits limits. - Failcnt uint64 `json:"failcnt,omitempty"` - Limit uint64 `json:"limit,omitempty"` - - // Windows Memory Stats - // See https://technet.microsoft.com/en-us/magazine/ff382715.aspx - - // committed bytes - Commit uint64 `json:"commitbytes,omitempty"` - // peak committed bytes - CommitPeak uint64 `json:"commitpeakbytes,omitempty"` - // private working set - PrivateWorkingSet uint64 `json:"privateworkingset,omitempty"` -} - -// BlkioStatEntry is one small entity to store a piece of Blkio stats -// Not used on Windows. -type BlkioStatEntry struct { - Major uint64 `json:"major"` - Minor uint64 `json:"minor"` - Op string `json:"op"` - Value uint64 `json:"value"` -} - -// BlkioStats stores All IO service stats for data read and write. -// This is a Linux specific structure as the differences between expressing -// block I/O on Windows and Linux are sufficiently significant to make -// little sense attempting to morph into a combined structure. -type BlkioStats struct { - // number of bytes transferred to and from the block device - IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"` - IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"` - IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"` - IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"` - IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"` - IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"` - IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"` - SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"` -} - -// StorageStats is the disk I/O stats for read/write on Windows. -type StorageStats struct { - ReadCountNormalized uint64 `json:"read_count_normalized,omitempty"` - ReadSizeBytes uint64 `json:"read_size_bytes,omitempty"` - WriteCountNormalized uint64 `json:"write_count_normalized,omitempty"` - WriteSizeBytes uint64 `json:"write_size_bytes,omitempty"` -} - -// NetworkStats aggregates the network stats of one container -type NetworkStats struct { - // Bytes received. Windows and Linux. - RxBytes uint64 `json:"rx_bytes"` - // Packets received. Windows and Linux. - RxPackets uint64 `json:"rx_packets"` - // Received errors. Not used on Windows. Note that we don't `omitempty` this - // field as it is expected in the >=v1.21 API stats structure. - RxErrors uint64 `json:"rx_errors"` - // Incoming packets dropped. Windows and Linux. - RxDropped uint64 `json:"rx_dropped"` - // Bytes sent. Windows and Linux. - TxBytes uint64 `json:"tx_bytes"` - // Packets sent. Windows and Linux. - TxPackets uint64 `json:"tx_packets"` - // Sent errors. Not used on Windows. Note that we don't `omitempty` this - // field as it is expected in the >=v1.21 API stats structure. - TxErrors uint64 `json:"tx_errors"` - // Outgoing packets dropped. Windows and Linux. - TxDropped uint64 `json:"tx_dropped"` - // Endpoint ID. Not used on Linux. - EndpointID string `json:"endpoint_id,omitempty"` - // Instance ID. Not used on Linux. - InstanceID string `json:"instance_id,omitempty"` -} - -// PidsStats contains the stats of a container's pids -type PidsStats struct { - // Current is the number of pids in the cgroup - Current uint64 `json:"current,omitempty"` - // Limit is the hard limit on the number of pids in the cgroup. - // A "Limit" of 0 means that there is no limit. - Limit uint64 `json:"limit,omitempty"` -} - -// Stats is Ultimate struct aggregating all types of stats of one container -type Stats struct { - // Common stats - Read time.Time `json:"read"` - PreRead time.Time `json:"preread"` - - // Linux specific stats, not populated on Windows. - PidsStats PidsStats `json:"pids_stats,omitempty"` - BlkioStats BlkioStats `json:"blkio_stats,omitempty"` - - // Windows specific stats, not populated on Linux. - NumProcs uint32 `json:"num_procs"` - StorageStats StorageStats `json:"storage_stats,omitempty"` - - // Shared stats - CPUStats CPUStats `json:"cpu_stats,omitempty"` - PreCPUStats CPUStats `json:"precpu_stats,omitempty"` // "Pre"="Previous" - MemoryStats MemoryStats `json:"memory_stats,omitempty"` -} - -// StatsJSON is newly used Networks -type StatsJSON struct { - Stats - - Name string `json:"name,omitempty"` - ID string `json:"id,omitempty"` - - // Networks request version >=1.21 - Networks map[string]NetworkStats `json:"networks,omitempty"` -} diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/strslice/strslice.go b/ui/wasm/vendor/github.com/docker/docker/api/types/strslice/strslice.go deleted file mode 100644 index 82921cebc15..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/strslice/strslice.go +++ /dev/null @@ -1,30 +0,0 @@ -package strslice // import "github.com/docker/docker/api/types/strslice" - -import "encoding/json" - -// StrSlice represents a string or an array of strings. -// We need to override the json decoder to accept both options. -type StrSlice []string - -// UnmarshalJSON decodes the byte slice whether it's a string or an array of -// strings. This method is needed to implement json.Unmarshaler. -func (e *StrSlice) UnmarshalJSON(b []byte) error { - if len(b) == 0 { - // With no input, we preserve the existing value by returning nil and - // leaving the target alone. This allows defining default values for - // the type. - return nil - } - - p := make([]string, 0, 1) - if err := json.Unmarshal(b, &p); err != nil { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - p = append(p, s) - } - - *e = p - return nil -} diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/swarm/common.go b/ui/wasm/vendor/github.com/docker/docker/api/types/swarm/common.go deleted file mode 100644 index ef020f458bd..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/swarm/common.go +++ /dev/null @@ -1,40 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import "time" - -// Version represents the internal object version. -type Version struct { - Index uint64 `json:",omitempty"` -} - -// Meta is a base object inherited by most of the other once. -type Meta struct { - Version Version `json:",omitempty"` - CreatedAt time.Time `json:",omitempty"` - UpdatedAt time.Time `json:",omitempty"` -} - -// Annotations represents how to describe an object. -type Annotations struct { - Name string `json:",omitempty"` - Labels map[string]string `json:"Labels"` -} - -// Driver represents a driver (network, logging, secrets backend). -type Driver struct { - Name string `json:",omitempty"` - Options map[string]string `json:",omitempty"` -} - -// TLSInfo represents the TLS information about what CA certificate is trusted, -// and who the issuer for a TLS certificate is -type TLSInfo struct { - // TrustRoot is the trusted CA root certificate in PEM format - TrustRoot string `json:",omitempty"` - - // CertIssuer is the raw subject bytes of the issuer - CertIssuerSubject []byte `json:",omitempty"` - - // CertIssuerPublicKey is the raw public key bytes of the issuer - CertIssuerPublicKey []byte `json:",omitempty"` -} diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/swarm/config.go b/ui/wasm/vendor/github.com/docker/docker/api/types/swarm/config.go deleted file mode 100644 index 16202ccce61..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/swarm/config.go +++ /dev/null @@ -1,40 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import "os" - -// Config represents a config. -type Config struct { - ID string - Meta - Spec ConfigSpec -} - -// ConfigSpec represents a config specification from a config in swarm -type ConfigSpec struct { - Annotations - Data []byte `json:",omitempty"` - - // Templating controls whether and how to evaluate the config payload as - // a template. If it is not set, no templating is used. - Templating *Driver `json:",omitempty"` -} - -// ConfigReferenceFileTarget is a file target in a config reference -type ConfigReferenceFileTarget struct { - Name string - UID string - GID string - Mode os.FileMode -} - -// ConfigReferenceRuntimeTarget is a target for a config specifying that it -// isn't mounted into the container but instead has some other purpose. -type ConfigReferenceRuntimeTarget struct{} - -// ConfigReference is a reference to a config in swarm -type ConfigReference struct { - File *ConfigReferenceFileTarget `json:",omitempty"` - Runtime *ConfigReferenceRuntimeTarget `json:",omitempty"` - ConfigID string - ConfigName string -} diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/swarm/container.go b/ui/wasm/vendor/github.com/docker/docker/api/types/swarm/container.go deleted file mode 100644 index af5e1c0bc27..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/swarm/container.go +++ /dev/null @@ -1,80 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import ( - "time" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/mount" - "github.com/docker/go-units" -) - -// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf) -// Detailed documentation is available in: -// http://man7.org/linux/man-pages/man5/resolv.conf.5.html -// `nameserver`, `search`, `options` have been supported. -// TODO: `domain` is not supported yet. -type DNSConfig struct { - // Nameservers specifies the IP addresses of the name servers - Nameservers []string `json:",omitempty"` - // Search specifies the search list for host-name lookup - Search []string `json:",omitempty"` - // Options allows certain internal resolver variables to be modified - Options []string `json:",omitempty"` -} - -// SELinuxContext contains the SELinux labels of the container. -type SELinuxContext struct { - Disable bool - - User string - Role string - Type string - Level string -} - -// CredentialSpec for managed service account (Windows only) -type CredentialSpec struct { - Config string - File string - Registry string -} - -// Privileges defines the security options for the container. -type Privileges struct { - CredentialSpec *CredentialSpec - SELinuxContext *SELinuxContext -} - -// ContainerSpec represents the spec of a container. -type ContainerSpec struct { - Image string `json:",omitempty"` - Labels map[string]string `json:",omitempty"` - Command []string `json:",omitempty"` - Args []string `json:",omitempty"` - Hostname string `json:",omitempty"` - Env []string `json:",omitempty"` - Dir string `json:",omitempty"` - User string `json:",omitempty"` - Groups []string `json:",omitempty"` - Privileges *Privileges `json:",omitempty"` - Init *bool `json:",omitempty"` - StopSignal string `json:",omitempty"` - TTY bool `json:",omitempty"` - OpenStdin bool `json:",omitempty"` - ReadOnly bool `json:",omitempty"` - Mounts []mount.Mount `json:",omitempty"` - StopGracePeriod *time.Duration `json:",omitempty"` - Healthcheck *container.HealthConfig `json:",omitempty"` - // The format of extra hosts on swarmkit is specified in: - // http://man7.org/linux/man-pages/man5/hosts.5.html - // IP_address canonical_hostname [aliases...] - Hosts []string `json:",omitempty"` - DNSConfig *DNSConfig `json:",omitempty"` - Secrets []*SecretReference `json:",omitempty"` - Configs []*ConfigReference `json:",omitempty"` - Isolation container.Isolation `json:",omitempty"` - Sysctls map[string]string `json:",omitempty"` - CapabilityAdd []string `json:",omitempty"` - CapabilityDrop []string `json:",omitempty"` - Ulimits []*units.Ulimit `json:",omitempty"` -} diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/swarm/network.go b/ui/wasm/vendor/github.com/docker/docker/api/types/swarm/network.go deleted file mode 100644 index 98ef3284d1d..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/swarm/network.go +++ /dev/null @@ -1,121 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import ( - "github.com/docker/docker/api/types/network" -) - -// Endpoint represents an endpoint. -type Endpoint struct { - Spec EndpointSpec `json:",omitempty"` - Ports []PortConfig `json:",omitempty"` - VirtualIPs []EndpointVirtualIP `json:",omitempty"` -} - -// EndpointSpec represents the spec of an endpoint. -type EndpointSpec struct { - Mode ResolutionMode `json:",omitempty"` - Ports []PortConfig `json:",omitempty"` -} - -// ResolutionMode represents a resolution mode. -type ResolutionMode string - -const ( - // ResolutionModeVIP VIP - ResolutionModeVIP ResolutionMode = "vip" - // ResolutionModeDNSRR DNSRR - ResolutionModeDNSRR ResolutionMode = "dnsrr" -) - -// PortConfig represents the config of a port. -type PortConfig struct { - Name string `json:",omitempty"` - Protocol PortConfigProtocol `json:",omitempty"` - // TargetPort is the port inside the container - TargetPort uint32 `json:",omitempty"` - // PublishedPort is the port on the swarm hosts - PublishedPort uint32 `json:",omitempty"` - // PublishMode is the mode in which port is published - PublishMode PortConfigPublishMode `json:",omitempty"` -} - -// PortConfigPublishMode represents the mode in which the port is to -// be published. -type PortConfigPublishMode string - -const ( - // PortConfigPublishModeIngress is used for ports published - // for ingress load balancing using routing mesh. - PortConfigPublishModeIngress PortConfigPublishMode = "ingress" - // PortConfigPublishModeHost is used for ports published - // for direct host level access on the host where the task is running. - PortConfigPublishModeHost PortConfigPublishMode = "host" -) - -// PortConfigProtocol represents the protocol of a port. -type PortConfigProtocol string - -const ( - // TODO(stevvooe): These should be used generally, not just for PortConfig. - - // PortConfigProtocolTCP TCP - PortConfigProtocolTCP PortConfigProtocol = "tcp" - // PortConfigProtocolUDP UDP - PortConfigProtocolUDP PortConfigProtocol = "udp" - // PortConfigProtocolSCTP SCTP - PortConfigProtocolSCTP PortConfigProtocol = "sctp" -) - -// EndpointVirtualIP represents the virtual ip of a port. -type EndpointVirtualIP struct { - NetworkID string `json:",omitempty"` - Addr string `json:",omitempty"` -} - -// Network represents a network. -type Network struct { - ID string - Meta - Spec NetworkSpec `json:",omitempty"` - DriverState Driver `json:",omitempty"` - IPAMOptions *IPAMOptions `json:",omitempty"` -} - -// NetworkSpec represents the spec of a network. -type NetworkSpec struct { - Annotations - DriverConfiguration *Driver `json:",omitempty"` - IPv6Enabled bool `json:",omitempty"` - Internal bool `json:",omitempty"` - Attachable bool `json:",omitempty"` - Ingress bool `json:",omitempty"` - IPAMOptions *IPAMOptions `json:",omitempty"` - ConfigFrom *network.ConfigReference `json:",omitempty"` - Scope string `json:",omitempty"` -} - -// NetworkAttachmentConfig represents the configuration of a network attachment. -type NetworkAttachmentConfig struct { - Target string `json:",omitempty"` - Aliases []string `json:",omitempty"` - DriverOpts map[string]string `json:",omitempty"` -} - -// NetworkAttachment represents a network attachment. -type NetworkAttachment struct { - Network Network `json:",omitempty"` - Addresses []string `json:",omitempty"` -} - -// IPAMOptions represents ipam options. -type IPAMOptions struct { - Driver Driver `json:",omitempty"` - Configs []IPAMConfig `json:",omitempty"` -} - -// IPAMConfig represents ipam configuration. -type IPAMConfig struct { - Subnet string `json:",omitempty"` - Range string `json:",omitempty"` - Gateway string `json:",omitempty"` -} diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/swarm/node.go b/ui/wasm/vendor/github.com/docker/docker/api/types/swarm/node.go deleted file mode 100644 index 1e30f5fa10d..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/swarm/node.go +++ /dev/null @@ -1,115 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -// Node represents a node. -type Node struct { - ID string - Meta - // Spec defines the desired state of the node as specified by the user. - // The system will honor this and will *never* modify it. - Spec NodeSpec `json:",omitempty"` - // Description encapsulates the properties of the Node as reported by the - // agent. - Description NodeDescription `json:",omitempty"` - // Status provides the current status of the node, as seen by the manager. - Status NodeStatus `json:",omitempty"` - // ManagerStatus provides the current status of the node's manager - // component, if the node is a manager. - ManagerStatus *ManagerStatus `json:",omitempty"` -} - -// NodeSpec represents the spec of a node. -type NodeSpec struct { - Annotations - Role NodeRole `json:",omitempty"` - Availability NodeAvailability `json:",omitempty"` -} - -// NodeRole represents the role of a node. -type NodeRole string - -const ( - // NodeRoleWorker WORKER - NodeRoleWorker NodeRole = "worker" - // NodeRoleManager MANAGER - NodeRoleManager NodeRole = "manager" -) - -// NodeAvailability represents the availability of a node. -type NodeAvailability string - -const ( - // NodeAvailabilityActive ACTIVE - NodeAvailabilityActive NodeAvailability = "active" - // NodeAvailabilityPause PAUSE - NodeAvailabilityPause NodeAvailability = "pause" - // NodeAvailabilityDrain DRAIN - NodeAvailabilityDrain NodeAvailability = "drain" -) - -// NodeDescription represents the description of a node. -type NodeDescription struct { - Hostname string `json:",omitempty"` - Platform Platform `json:",omitempty"` - Resources Resources `json:",omitempty"` - Engine EngineDescription `json:",omitempty"` - TLSInfo TLSInfo `json:",omitempty"` -} - -// Platform represents the platform (Arch/OS). -type Platform struct { - Architecture string `json:",omitempty"` - OS string `json:",omitempty"` -} - -// EngineDescription represents the description of an engine. -type EngineDescription struct { - EngineVersion string `json:",omitempty"` - Labels map[string]string `json:",omitempty"` - Plugins []PluginDescription `json:",omitempty"` -} - -// PluginDescription represents the description of an engine plugin. -type PluginDescription struct { - Type string `json:",omitempty"` - Name string `json:",omitempty"` -} - -// NodeStatus represents the status of a node. -type NodeStatus struct { - State NodeState `json:",omitempty"` - Message string `json:",omitempty"` - Addr string `json:",omitempty"` -} - -// Reachability represents the reachability of a node. -type Reachability string - -const ( - // ReachabilityUnknown UNKNOWN - ReachabilityUnknown Reachability = "unknown" - // ReachabilityUnreachable UNREACHABLE - ReachabilityUnreachable Reachability = "unreachable" - // ReachabilityReachable REACHABLE - ReachabilityReachable Reachability = "reachable" -) - -// ManagerStatus represents the status of a manager. -type ManagerStatus struct { - Leader bool `json:",omitempty"` - Reachability Reachability `json:",omitempty"` - Addr string `json:",omitempty"` -} - -// NodeState represents the state of a node. -type NodeState string - -const ( - // NodeStateUnknown UNKNOWN - NodeStateUnknown NodeState = "unknown" - // NodeStateDown DOWN - NodeStateDown NodeState = "down" - // NodeStateReady READY - NodeStateReady NodeState = "ready" - // NodeStateDisconnected DISCONNECTED - NodeStateDisconnected NodeState = "disconnected" -) diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/swarm/runtime.go b/ui/wasm/vendor/github.com/docker/docker/api/types/swarm/runtime.go deleted file mode 100644 index 0c77403ccff..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/swarm/runtime.go +++ /dev/null @@ -1,27 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -// RuntimeType is the type of runtime used for the TaskSpec -type RuntimeType string - -// RuntimeURL is the proto type url -type RuntimeURL string - -const ( - // RuntimeContainer is the container based runtime - RuntimeContainer RuntimeType = "container" - // RuntimePlugin is the plugin based runtime - RuntimePlugin RuntimeType = "plugin" - // RuntimeNetworkAttachment is the network attachment runtime - RuntimeNetworkAttachment RuntimeType = "attachment" - - // RuntimeURLContainer is the proto url for the container type - RuntimeURLContainer RuntimeURL = "types.docker.com/RuntimeContainer" - // RuntimeURLPlugin is the proto url for the plugin type - RuntimeURLPlugin RuntimeURL = "types.docker.com/RuntimePlugin" -) - -// NetworkAttachmentSpec represents the runtime spec type for network -// attachment tasks -type NetworkAttachmentSpec struct { - ContainerID string -} diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go b/ui/wasm/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go deleted file mode 100644 index 98c2806c31d..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go +++ /dev/null @@ -1,3 +0,0 @@ -//go:generate protoc -I . --gogofast_out=import_path=github.com/docker/docker/api/types/swarm/runtime:. plugin.proto - -package runtime // import "github.com/docker/docker/api/types/swarm/runtime" diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go b/ui/wasm/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go deleted file mode 100644 index e45045866a6..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go +++ /dev/null @@ -1,754 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: plugin.proto - -/* - Package runtime is a generated protocol buffer package. - - It is generated from these files: - plugin.proto - - It has these top-level messages: - PluginSpec - PluginPrivilege -*/ -package runtime - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -// PluginSpec defines the base payload which clients can specify for creating -// a service with the plugin runtime. -type PluginSpec struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Remote string `protobuf:"bytes,2,opt,name=remote,proto3" json:"remote,omitempty"` - Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges" json:"privileges,omitempty"` - Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"` - Env []string `protobuf:"bytes,5,rep,name=env" json:"env,omitempty"` -} - -func (m *PluginSpec) Reset() { *m = PluginSpec{} } -func (m *PluginSpec) String() string { return proto.CompactTextString(m) } -func (*PluginSpec) ProtoMessage() {} -func (*PluginSpec) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{0} } - -func (m *PluginSpec) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *PluginSpec) GetRemote() string { - if m != nil { - return m.Remote - } - return "" -} - -func (m *PluginSpec) GetPrivileges() []*PluginPrivilege { - if m != nil { - return m.Privileges - } - return nil -} - -func (m *PluginSpec) GetDisabled() bool { - if m != nil { - return m.Disabled - } - return false -} - -func (m *PluginSpec) GetEnv() []string { - if m != nil { - return m.Env - } - return nil -} - -// PluginPrivilege describes a permission the user has to accept -// upon installing a plugin. -type PluginPrivilege struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - Value []string `protobuf:"bytes,3,rep,name=value" json:"value,omitempty"` -} - -func (m *PluginPrivilege) Reset() { *m = PluginPrivilege{} } -func (m *PluginPrivilege) String() string { return proto.CompactTextString(m) } -func (*PluginPrivilege) ProtoMessage() {} -func (*PluginPrivilege) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{1} } - -func (m *PluginPrivilege) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *PluginPrivilege) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *PluginPrivilege) GetValue() []string { - if m != nil { - return m.Value - } - return nil -} - -func init() { - proto.RegisterType((*PluginSpec)(nil), "PluginSpec") - proto.RegisterType((*PluginPrivilege)(nil), "PluginPrivilege") -} -func (m *PluginSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PluginSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.Remote) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Remote))) - i += copy(dAtA[i:], m.Remote) - } - if len(m.Privileges) > 0 { - for _, msg := range m.Privileges { - dAtA[i] = 0x1a - i++ - i = encodeVarintPlugin(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.Disabled { - dAtA[i] = 0x20 - i++ - if m.Disabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if len(m.Env) > 0 { - for _, s := range m.Env { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *PluginPrivilege) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PluginPrivilege) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.Description) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Description))) - i += copy(dAtA[i:], m.Description) - } - if len(m.Value) > 0 { - for _, s := range m.Value { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *PluginSpec) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - l = len(m.Remote) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - if len(m.Privileges) > 0 { - for _, e := range m.Privileges { - l = e.Size() - n += 1 + l + sovPlugin(uint64(l)) - } - } - if m.Disabled { - n += 2 - } - if len(m.Env) > 0 { - for _, s := range m.Env { - l = len(s) - n += 1 + l + sovPlugin(uint64(l)) - } - } - return n -} - -func (m *PluginPrivilege) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - l = len(m.Description) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - if len(m.Value) > 0 { - for _, s := range m.Value { - l = len(s) - n += 1 + l + sovPlugin(uint64(l)) - } - } - return n -} - -func sovPlugin(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozPlugin(x uint64) (n int) { - return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *PluginSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PluginSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PluginSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Remote", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Remote = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Privileges = append(m.Privileges, &PluginPrivilege{}) - if err := m.Privileges[len(m.Privileges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Disabled = bool(v != 0) - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Env = append(m.Env, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPlugin(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthPlugin - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PluginPrivilege: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PluginPrivilege: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = append(m.Value, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPlugin(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthPlugin - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipPlugin(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthPlugin - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipPlugin(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("plugin.proto", fileDescriptorPlugin) } - -var fileDescriptorPlugin = []byte{ - // 256 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x4d, 0x4b, 0xc3, 0x30, - 0x18, 0xc7, 0x89, 0xdd, 0xc6, 0xfa, 0x4c, 0x70, 0x04, 0x91, 0xe2, 0xa1, 0x94, 0x9d, 0x7a, 0x6a, - 0x45, 0x2f, 0x82, 0x37, 0x0f, 0x9e, 0x47, 0xbc, 0x09, 0x1e, 0xd2, 0xf6, 0xa1, 0x06, 0x9b, 0x17, - 0x92, 0xb4, 0xe2, 0x37, 0xf1, 0x23, 0x79, 0xf4, 0x23, 0x48, 0x3f, 0x89, 0x98, 0x75, 0x32, 0x64, - 0xa7, 0xff, 0x4b, 0xc2, 0x9f, 0x1f, 0x0f, 0x9c, 0x9a, 0xae, 0x6f, 0x85, 0x2a, 0x8c, 0xd5, 0x5e, - 0x6f, 0x3e, 0x08, 0xc0, 0x36, 0x14, 0x8f, 0x06, 0x6b, 0x4a, 0x61, 0xa6, 0xb8, 0xc4, 0x84, 0x64, - 0x24, 0x8f, 0x59, 0xf0, 0xf4, 0x02, 0x16, 0x16, 0xa5, 0xf6, 0x98, 0x9c, 0x84, 0x76, 0x4a, 0xf4, - 0x0a, 0xc0, 0x58, 0x31, 0x88, 0x0e, 0x5b, 0x74, 0x49, 0x94, 0x45, 0xf9, 0xea, 0x7a, 0x5d, 0xec, - 0xc6, 0xb6, 0xfb, 0x07, 0x76, 0xf0, 0x87, 0x5e, 0xc2, 0xb2, 0x11, 0x8e, 0x57, 0x1d, 0x36, 0xc9, - 0x2c, 0x23, 0xf9, 0x92, 0xfd, 0x65, 0xba, 0x86, 0x08, 0xd5, 0x90, 0xcc, 0xb3, 0x28, 0x8f, 0xd9, - 0xaf, 0xdd, 0x3c, 0xc3, 0xd9, 0xbf, 0xb1, 0xa3, 0x78, 0x19, 0xac, 0x1a, 0x74, 0xb5, 0x15, 0xc6, - 0x0b, 0xad, 0x26, 0xc6, 0xc3, 0x8a, 0x9e, 0xc3, 0x7c, 0xe0, 0x5d, 0x8f, 0x81, 0x31, 0x66, 0xbb, - 0x70, 0xff, 0xf0, 0x39, 0xa6, 0xe4, 0x6b, 0x4c, 0xc9, 0xf7, 0x98, 0x92, 0xa7, 0xdb, 0x56, 0xf8, - 0x97, 0xbe, 0x2a, 0x6a, 0x2d, 0xcb, 0x46, 0xd7, 0xaf, 0x68, 0xf7, 0xc2, 0x8d, 0x28, 0xfd, 0xbb, - 0x41, 0x57, 0xba, 0x37, 0x6e, 0x65, 0x69, 0x7b, 0xe5, 0x85, 0xc4, 0xbb, 0x49, 0xab, 0x45, 0x38, - 0xe4, 0xcd, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x99, 0xa8, 0xd9, 0x9b, 0x58, 0x01, 0x00, 0x00, -} diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto b/ui/wasm/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto deleted file mode 100644 index 9ef169046b4..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto +++ /dev/null @@ -1,21 +0,0 @@ -syntax = "proto3"; - -option go_package = "github.com/docker/docker/api/types/swarm/runtime;runtime"; - -// PluginSpec defines the base payload which clients can specify for creating -// a service with the plugin runtime. -message PluginSpec { - string name = 1; - string remote = 2; - repeated PluginPrivilege privileges = 3; - bool disabled = 4; - repeated string env = 5; -} - -// PluginPrivilege describes a permission the user has to accept -// upon installing a plugin. -message PluginPrivilege { - string name = 1; - string description = 2; - repeated string value = 3; -} diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/swarm/secret.go b/ui/wasm/vendor/github.com/docker/docker/api/types/swarm/secret.go deleted file mode 100644 index d5213ec981c..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/swarm/secret.go +++ /dev/null @@ -1,36 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import "os" - -// Secret represents a secret. -type Secret struct { - ID string - Meta - Spec SecretSpec -} - -// SecretSpec represents a secret specification from a secret in swarm -type SecretSpec struct { - Annotations - Data []byte `json:",omitempty"` - Driver *Driver `json:",omitempty"` // name of the secrets driver used to fetch the secret's value from an external secret store - - // Templating controls whether and how to evaluate the secret payload as - // a template. If it is not set, no templating is used. - Templating *Driver `json:",omitempty"` -} - -// SecretReferenceFileTarget is a file target in a secret reference -type SecretReferenceFileTarget struct { - Name string - UID string - GID string - Mode os.FileMode -} - -// SecretReference is a reference to a secret in swarm -type SecretReference struct { - File *SecretReferenceFileTarget - SecretID string - SecretName string -} diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/swarm/service.go b/ui/wasm/vendor/github.com/docker/docker/api/types/swarm/service.go deleted file mode 100644 index 6eb452d24d1..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/swarm/service.go +++ /dev/null @@ -1,202 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import "time" - -// Service represents a service. -type Service struct { - ID string - Meta - Spec ServiceSpec `json:",omitempty"` - PreviousSpec *ServiceSpec `json:",omitempty"` - Endpoint Endpoint `json:",omitempty"` - UpdateStatus *UpdateStatus `json:",omitempty"` - - // ServiceStatus is an optional, extra field indicating the number of - // desired and running tasks. It is provided primarily as a shortcut to - // calculating these values client-side, which otherwise would require - // listing all tasks for a service, an operation that could be - // computation and network expensive. - ServiceStatus *ServiceStatus `json:",omitempty"` - - // JobStatus is the status of a Service which is in one of ReplicatedJob or - // GlobalJob modes. It is absent on Replicated and Global services. - JobStatus *JobStatus `json:",omitempty"` -} - -// ServiceSpec represents the spec of a service. -type ServiceSpec struct { - Annotations - - // TaskTemplate defines how the service should construct new tasks when - // orchestrating this service. - TaskTemplate TaskSpec `json:",omitempty"` - Mode ServiceMode `json:",omitempty"` - UpdateConfig *UpdateConfig `json:",omitempty"` - RollbackConfig *UpdateConfig `json:",omitempty"` - - // Networks field in ServiceSpec is deprecated. The - // same field in TaskSpec should be used instead. - // This field will be removed in a future release. - Networks []NetworkAttachmentConfig `json:",omitempty"` - EndpointSpec *EndpointSpec `json:",omitempty"` -} - -// ServiceMode represents the mode of a service. -type ServiceMode struct { - Replicated *ReplicatedService `json:",omitempty"` - Global *GlobalService `json:",omitempty"` - ReplicatedJob *ReplicatedJob `json:",omitempty"` - GlobalJob *GlobalJob `json:",omitempty"` -} - -// UpdateState is the state of a service update. -type UpdateState string - -const ( - // UpdateStateUpdating is the updating state. - UpdateStateUpdating UpdateState = "updating" - // UpdateStatePaused is the paused state. - UpdateStatePaused UpdateState = "paused" - // UpdateStateCompleted is the completed state. - UpdateStateCompleted UpdateState = "completed" - // UpdateStateRollbackStarted is the state with a rollback in progress. - UpdateStateRollbackStarted UpdateState = "rollback_started" - // UpdateStateRollbackPaused is the state with a rollback in progress. - UpdateStateRollbackPaused UpdateState = "rollback_paused" - // UpdateStateRollbackCompleted is the state with a rollback in progress. - UpdateStateRollbackCompleted UpdateState = "rollback_completed" -) - -// UpdateStatus reports the status of a service update. -type UpdateStatus struct { - State UpdateState `json:",omitempty"` - StartedAt *time.Time `json:",omitempty"` - CompletedAt *time.Time `json:",omitempty"` - Message string `json:",omitempty"` -} - -// ReplicatedService is a kind of ServiceMode. -type ReplicatedService struct { - Replicas *uint64 `json:",omitempty"` -} - -// GlobalService is a kind of ServiceMode. -type GlobalService struct{} - -// ReplicatedJob is the a type of Service which executes a defined Tasks -// in parallel until the specified number of Tasks have succeeded. -type ReplicatedJob struct { - // MaxConcurrent indicates the maximum number of Tasks that should be - // executing simultaneously for this job at any given time. There may be - // fewer Tasks that MaxConcurrent executing simultaneously; for example, if - // there are fewer than MaxConcurrent tasks needed to reach - // TotalCompletions. - // - // If this field is empty, it will default to a max concurrency of 1. - MaxConcurrent *uint64 `json:",omitempty"` - - // TotalCompletions is the total number of Tasks desired to run to - // completion. - // - // If this field is empty, the value of MaxConcurrent will be used. - TotalCompletions *uint64 `json:",omitempty"` -} - -// GlobalJob is the type of a Service which executes a Task on every Node -// matching the Service's placement constraints. These tasks run to completion -// and then exit. -// -// This type is deliberately empty. -type GlobalJob struct{} - -const ( - // UpdateFailureActionPause PAUSE - UpdateFailureActionPause = "pause" - // UpdateFailureActionContinue CONTINUE - UpdateFailureActionContinue = "continue" - // UpdateFailureActionRollback ROLLBACK - UpdateFailureActionRollback = "rollback" - - // UpdateOrderStopFirst STOP_FIRST - UpdateOrderStopFirst = "stop-first" - // UpdateOrderStartFirst START_FIRST - UpdateOrderStartFirst = "start-first" -) - -// UpdateConfig represents the update configuration. -type UpdateConfig struct { - // Maximum number of tasks to be updated in one iteration. - // 0 means unlimited parallelism. - Parallelism uint64 - - // Amount of time between updates. - Delay time.Duration `json:",omitempty"` - - // FailureAction is the action to take when an update failures. - FailureAction string `json:",omitempty"` - - // Monitor indicates how long to monitor a task for failure after it is - // created. If the task fails by ending up in one of the states - // REJECTED, COMPLETED, or FAILED, within Monitor from its creation, - // this counts as a failure. If it fails after Monitor, it does not - // count as a failure. If Monitor is unspecified, a default value will - // be used. - Monitor time.Duration `json:",omitempty"` - - // MaxFailureRatio is the fraction of tasks that may fail during - // an update before the failure action is invoked. Any task created by - // the current update which ends up in one of the states REJECTED, - // COMPLETED or FAILED within Monitor from its creation counts as a - // failure. The number of failures is divided by the number of tasks - // being updated, and if this fraction is greater than - // MaxFailureRatio, the failure action is invoked. - // - // If the failure action is CONTINUE, there is no effect. - // If the failure action is PAUSE, no more tasks will be updated until - // another update is started. - MaxFailureRatio float32 - - // Order indicates the order of operations when rolling out an updated - // task. Either the old task is shut down before the new task is - // started, or the new task is started before the old task is shut down. - Order string -} - -// ServiceStatus represents the number of running tasks in a service and the -// number of tasks desired to be running. -type ServiceStatus struct { - // RunningTasks is the number of tasks for the service actually in the - // Running state - RunningTasks uint64 - - // DesiredTasks is the number of tasks desired to be running by the - // service. For replicated services, this is the replica count. For global - // services, this is computed by taking the number of tasks with desired - // state of not-Shutdown. - DesiredTasks uint64 - - // CompletedTasks is the number of tasks in the state Completed, if this - // service is in ReplicatedJob or GlobalJob mode. This field must be - // cross-referenced with the service type, because the default value of 0 - // may mean that a service is not in a job mode, or it may mean that the - // job has yet to complete any tasks. - CompletedTasks uint64 -} - -// JobStatus is the status of a job-type service. -type JobStatus struct { - // JobIteration is a value increased each time a Job is executed, - // successfully or otherwise. "Executed", in this case, means the job as a - // whole has been started, not that an individual Task has been launched. A - // job is "Executed" when its ServiceSpec is updated. JobIteration can be - // used to disambiguate Tasks belonging to different executions of a job. - // - // Though JobIteration will increase with each subsequent execution, it may - // not necessarily increase by 1, and so JobIteration should not be used to - // keep track of the number of times a job has been executed. - JobIteration Version - - // LastExecution is the time that the job was last executed, as observed by - // Swarm manager. - LastExecution time.Time `json:",omitempty"` -} diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/ui/wasm/vendor/github.com/docker/docker/api/types/swarm/swarm.go deleted file mode 100644 index b25f9996462..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/swarm/swarm.go +++ /dev/null @@ -1,227 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import ( - "time" -) - -// ClusterInfo represents info about the cluster for outputting in "info" -// it contains the same information as "Swarm", but without the JoinTokens -type ClusterInfo struct { - ID string - Meta - Spec Spec - TLSInfo TLSInfo - RootRotationInProgress bool - DefaultAddrPool []string - SubnetSize uint32 - DataPathPort uint32 -} - -// Swarm represents a swarm. -type Swarm struct { - ClusterInfo - JoinTokens JoinTokens -} - -// JoinTokens contains the tokens workers and managers need to join the swarm. -type JoinTokens struct { - // Worker is the join token workers may use to join the swarm. - Worker string - // Manager is the join token managers may use to join the swarm. - Manager string -} - -// Spec represents the spec of a swarm. -type Spec struct { - Annotations - - Orchestration OrchestrationConfig `json:",omitempty"` - Raft RaftConfig `json:",omitempty"` - Dispatcher DispatcherConfig `json:",omitempty"` - CAConfig CAConfig `json:",omitempty"` - TaskDefaults TaskDefaults `json:",omitempty"` - EncryptionConfig EncryptionConfig `json:",omitempty"` -} - -// OrchestrationConfig represents orchestration configuration. -type OrchestrationConfig struct { - // TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or - // node. If negative, never remove completed or failed tasks. - TaskHistoryRetentionLimit *int64 `json:",omitempty"` -} - -// TaskDefaults parameterizes cluster-level task creation with default values. -type TaskDefaults struct { - // LogDriver selects the log driver to use for tasks created in the - // orchestrator if unspecified by a service. - // - // Updating this value will only have an affect on new tasks. Old tasks - // will continue use their previously configured log driver until - // recreated. - LogDriver *Driver `json:",omitempty"` -} - -// EncryptionConfig controls at-rest encryption of data and keys. -type EncryptionConfig struct { - // AutoLockManagers specifies whether or not managers TLS keys and raft data - // should be encrypted at rest in such a way that they must be unlocked - // before the manager node starts up again. - AutoLockManagers bool -} - -// RaftConfig represents raft configuration. -type RaftConfig struct { - // SnapshotInterval is the number of log entries between snapshots. - SnapshotInterval uint64 `json:",omitempty"` - - // KeepOldSnapshots is the number of snapshots to keep beyond the - // current snapshot. - KeepOldSnapshots *uint64 `json:",omitempty"` - - // LogEntriesForSlowFollowers is the number of log entries to keep - // around to sync up slow followers after a snapshot is created. - LogEntriesForSlowFollowers uint64 `json:",omitempty"` - - // ElectionTick is the number of ticks that a follower will wait for a message - // from the leader before becoming a candidate and starting an election. - // ElectionTick must be greater than HeartbeatTick. - // - // A tick currently defaults to one second, so these translate directly to - // seconds currently, but this is NOT guaranteed. - ElectionTick int - - // HeartbeatTick is the number of ticks between heartbeats. Every - // HeartbeatTick ticks, the leader will send a heartbeat to the - // followers. - // - // A tick currently defaults to one second, so these translate directly to - // seconds currently, but this is NOT guaranteed. - HeartbeatTick int -} - -// DispatcherConfig represents dispatcher configuration. -type DispatcherConfig struct { - // HeartbeatPeriod defines how often agent should send heartbeats to - // dispatcher. - HeartbeatPeriod time.Duration `json:",omitempty"` -} - -// CAConfig represents CA configuration. -type CAConfig struct { - // NodeCertExpiry is the duration certificates should be issued for - NodeCertExpiry time.Duration `json:",omitempty"` - - // ExternalCAs is a list of CAs to which a manager node will make - // certificate signing requests for node certificates. - ExternalCAs []*ExternalCA `json:",omitempty"` - - // SigningCACert and SigningCAKey specify the desired signing root CA and - // root CA key for the swarm. When inspecting the cluster, the key will - // be redacted. - SigningCACert string `json:",omitempty"` - SigningCAKey string `json:",omitempty"` - - // If this value changes, and there is no specified signing cert and key, - // then the swarm is forced to generate a new root certificate ane key. - ForceRotate uint64 `json:",omitempty"` -} - -// ExternalCAProtocol represents type of external CA. -type ExternalCAProtocol string - -// ExternalCAProtocolCFSSL CFSSL -const ExternalCAProtocolCFSSL ExternalCAProtocol = "cfssl" - -// ExternalCA defines external CA to be used by the cluster. -type ExternalCA struct { - // Protocol is the protocol used by this external CA. - Protocol ExternalCAProtocol - - // URL is the URL where the external CA can be reached. - URL string - - // Options is a set of additional key/value pairs whose interpretation - // depends on the specified CA type. - Options map[string]string `json:",omitempty"` - - // CACert specifies which root CA is used by this external CA. This certificate must - // be in PEM format. - CACert string -} - -// InitRequest is the request used to init a swarm. -type InitRequest struct { - ListenAddr string - AdvertiseAddr string - DataPathAddr string - DataPathPort uint32 - ForceNewCluster bool - Spec Spec - AutoLockManagers bool - Availability NodeAvailability - DefaultAddrPool []string - SubnetSize uint32 -} - -// JoinRequest is the request used to join a swarm. -type JoinRequest struct { - ListenAddr string - AdvertiseAddr string - DataPathAddr string - RemoteAddrs []string - JoinToken string // accept by secret - Availability NodeAvailability -} - -// UnlockRequest is the request used to unlock a swarm. -type UnlockRequest struct { - // UnlockKey is the unlock key in ASCII-armored format. - UnlockKey string -} - -// LocalNodeState represents the state of the local node. -type LocalNodeState string - -const ( - // LocalNodeStateInactive INACTIVE - LocalNodeStateInactive LocalNodeState = "inactive" - // LocalNodeStatePending PENDING - LocalNodeStatePending LocalNodeState = "pending" - // LocalNodeStateActive ACTIVE - LocalNodeStateActive LocalNodeState = "active" - // LocalNodeStateError ERROR - LocalNodeStateError LocalNodeState = "error" - // LocalNodeStateLocked LOCKED - LocalNodeStateLocked LocalNodeState = "locked" -) - -// Info represents generic information about swarm. -type Info struct { - NodeID string - NodeAddr string - - LocalNodeState LocalNodeState - ControlAvailable bool - Error string - - RemoteManagers []Peer - Nodes int `json:",omitempty"` - Managers int `json:",omitempty"` - - Cluster *ClusterInfo `json:",omitempty"` - - Warnings []string `json:",omitempty"` -} - -// Peer represents a peer. -type Peer struct { - NodeID string - Addr string -} - -// UpdateFlags contains flags for SwarmUpdate. -type UpdateFlags struct { - RotateWorkerToken bool - RotateManagerToken bool - RotateManagerUnlockKey bool -} diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/swarm/task.go b/ui/wasm/vendor/github.com/docker/docker/api/types/swarm/task.go deleted file mode 100644 index a6f7ab7b5c7..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/swarm/task.go +++ /dev/null @@ -1,206 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import ( - "time" - - "github.com/docker/docker/api/types/swarm/runtime" -) - -// TaskState represents the state of a task. -type TaskState string - -const ( - // TaskStateNew NEW - TaskStateNew TaskState = "new" - // TaskStateAllocated ALLOCATED - TaskStateAllocated TaskState = "allocated" - // TaskStatePending PENDING - TaskStatePending TaskState = "pending" - // TaskStateAssigned ASSIGNED - TaskStateAssigned TaskState = "assigned" - // TaskStateAccepted ACCEPTED - TaskStateAccepted TaskState = "accepted" - // TaskStatePreparing PREPARING - TaskStatePreparing TaskState = "preparing" - // TaskStateReady READY - TaskStateReady TaskState = "ready" - // TaskStateStarting STARTING - TaskStateStarting TaskState = "starting" - // TaskStateRunning RUNNING - TaskStateRunning TaskState = "running" - // TaskStateComplete COMPLETE - TaskStateComplete TaskState = "complete" - // TaskStateShutdown SHUTDOWN - TaskStateShutdown TaskState = "shutdown" - // TaskStateFailed FAILED - TaskStateFailed TaskState = "failed" - // TaskStateRejected REJECTED - TaskStateRejected TaskState = "rejected" - // TaskStateRemove REMOVE - TaskStateRemove TaskState = "remove" - // TaskStateOrphaned ORPHANED - TaskStateOrphaned TaskState = "orphaned" -) - -// Task represents a task. -type Task struct { - ID string - Meta - Annotations - - Spec TaskSpec `json:",omitempty"` - ServiceID string `json:",omitempty"` - Slot int `json:",omitempty"` - NodeID string `json:",omitempty"` - Status TaskStatus `json:",omitempty"` - DesiredState TaskState `json:",omitempty"` - NetworksAttachments []NetworkAttachment `json:",omitempty"` - GenericResources []GenericResource `json:",omitempty"` - - // JobIteration is the JobIteration of the Service that this Task was - // spawned from, if the Service is a ReplicatedJob or GlobalJob. This is - // used to determine which Tasks belong to which run of the job. This field - // is absent if the Service mode is Replicated or Global. - JobIteration *Version `json:",omitempty"` -} - -// TaskSpec represents the spec of a task. -type TaskSpec struct { - // ContainerSpec, NetworkAttachmentSpec, and PluginSpec are mutually exclusive. - // PluginSpec is only used when the `Runtime` field is set to `plugin` - // NetworkAttachmentSpec is used if the `Runtime` field is set to - // `attachment`. - ContainerSpec *ContainerSpec `json:",omitempty"` - PluginSpec *runtime.PluginSpec `json:",omitempty"` - NetworkAttachmentSpec *NetworkAttachmentSpec `json:",omitempty"` - - Resources *ResourceRequirements `json:",omitempty"` - RestartPolicy *RestartPolicy `json:",omitempty"` - Placement *Placement `json:",omitempty"` - Networks []NetworkAttachmentConfig `json:",omitempty"` - - // LogDriver specifies the LogDriver to use for tasks created from this - // spec. If not present, the one on cluster default on swarm.Spec will be - // used, finally falling back to the engine default if not specified. - LogDriver *Driver `json:",omitempty"` - - // ForceUpdate is a counter that triggers an update even if no relevant - // parameters have been changed. - ForceUpdate uint64 - - Runtime RuntimeType `json:",omitempty"` -} - -// Resources represents resources (CPU/Memory) which can be advertised by a -// node and requested to be reserved for a task. -type Resources struct { - NanoCPUs int64 `json:",omitempty"` - MemoryBytes int64 `json:",omitempty"` - GenericResources []GenericResource `json:",omitempty"` -} - -// Limit describes limits on resources which can be requested by a task. -type Limit struct { - NanoCPUs int64 `json:",omitempty"` - MemoryBytes int64 `json:",omitempty"` - Pids int64 `json:",omitempty"` -} - -// GenericResource represents a "user defined" resource which can -// be either an integer (e.g: SSD=3) or a string (e.g: SSD=sda1) -type GenericResource struct { - NamedResourceSpec *NamedGenericResource `json:",omitempty"` - DiscreteResourceSpec *DiscreteGenericResource `json:",omitempty"` -} - -// NamedGenericResource represents a "user defined" resource which is defined -// as a string. -// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) -// Value is used to identify the resource (GPU="UUID-1", FPGA="/dev/sdb5", ...) -type NamedGenericResource struct { - Kind string `json:",omitempty"` - Value string `json:",omitempty"` -} - -// DiscreteGenericResource represents a "user defined" resource which is defined -// as an integer -// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) -// Value is used to count the resource (SSD=5, HDD=3, ...) -type DiscreteGenericResource struct { - Kind string `json:",omitempty"` - Value int64 `json:",omitempty"` -} - -// ResourceRequirements represents resources requirements. -type ResourceRequirements struct { - Limits *Limit `json:",omitempty"` - Reservations *Resources `json:",omitempty"` -} - -// Placement represents orchestration parameters. -type Placement struct { - Constraints []string `json:",omitempty"` - Preferences []PlacementPreference `json:",omitempty"` - MaxReplicas uint64 `json:",omitempty"` - - // Platforms stores all the platforms that the image can run on. - // This field is used in the platform filter for scheduling. If empty, - // then the platform filter is off, meaning there are no scheduling restrictions. - Platforms []Platform `json:",omitempty"` -} - -// PlacementPreference provides a way to make the scheduler aware of factors -// such as topology. -type PlacementPreference struct { - Spread *SpreadOver -} - -// SpreadOver is a scheduling preference that instructs the scheduler to spread -// tasks evenly over groups of nodes identified by labels. -type SpreadOver struct { - // label descriptor, such as engine.labels.az - SpreadDescriptor string -} - -// RestartPolicy represents the restart policy. -type RestartPolicy struct { - Condition RestartPolicyCondition `json:",omitempty"` - Delay *time.Duration `json:",omitempty"` - MaxAttempts *uint64 `json:",omitempty"` - Window *time.Duration `json:",omitempty"` -} - -// RestartPolicyCondition represents when to restart. -type RestartPolicyCondition string - -const ( - // RestartPolicyConditionNone NONE - RestartPolicyConditionNone RestartPolicyCondition = "none" - // RestartPolicyConditionOnFailure ON_FAILURE - RestartPolicyConditionOnFailure RestartPolicyCondition = "on-failure" - // RestartPolicyConditionAny ANY - RestartPolicyConditionAny RestartPolicyCondition = "any" -) - -// TaskStatus represents the status of a task. -type TaskStatus struct { - Timestamp time.Time `json:",omitempty"` - State TaskState `json:",omitempty"` - Message string `json:",omitempty"` - Err string `json:",omitempty"` - ContainerStatus *ContainerStatus `json:",omitempty"` - PortStatus PortStatus `json:",omitempty"` -} - -// ContainerStatus represents the status of a container. -type ContainerStatus struct { - ContainerID string - PID int - ExitCode int -} - -// PortStatus represents the port status of a task's host ports whose -// service has published host ports -type PortStatus struct { - Ports []PortConfig `json:",omitempty"` -} diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/types.go b/ui/wasm/vendor/github.com/docker/docker/api/types/types.go deleted file mode 100644 index e3a159912e2..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/types.go +++ /dev/null @@ -1,635 +0,0 @@ -package types // import "github.com/docker/docker/api/types" - -import ( - "errors" - "fmt" - "io" - "os" - "strings" - "time" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/go-connections/nat" -) - -// RootFS returns Image's RootFS description including the layer IDs. -type RootFS struct { - Type string - Layers []string `json:",omitempty"` - BaseLayer string `json:",omitempty"` -} - -// ImageInspect contains response of Engine API: -// GET "/images/{name:.*}/json" -type ImageInspect struct { - ID string `json:"Id"` - RepoTags []string - RepoDigests []string - Parent string - Comment string - Created string - Container string - ContainerConfig *container.Config - DockerVersion string - Author string - Config *container.Config - Architecture string - Variant string `json:",omitempty"` - Os string - OsVersion string `json:",omitempty"` - Size int64 - VirtualSize int64 - GraphDriver GraphDriverData - RootFS RootFS - Metadata ImageMetadata -} - -// ImageMetadata contains engine-local data about the image -type ImageMetadata struct { - LastTagTime time.Time `json:",omitempty"` -} - -// Container contains response of Engine API: -// GET "/containers/json" -type Container struct { - ID string `json:"Id"` - Names []string - Image string - ImageID string - Command string - Created int64 - Ports []Port - SizeRw int64 `json:",omitempty"` - SizeRootFs int64 `json:",omitempty"` - Labels map[string]string - State string - Status string - HostConfig struct { - NetworkMode string `json:",omitempty"` - } - NetworkSettings *SummaryNetworkSettings - Mounts []MountPoint -} - -// CopyConfig contains request body of Engine API: -// POST "/containers/"+containerID+"/copy" -type CopyConfig struct { - Resource string -} - -// ContainerPathStat is used to encode the header from -// GET "/containers/{name:.*}/archive" -// "Name" is the file or directory name. -type ContainerPathStat struct { - Name string `json:"name"` - Size int64 `json:"size"` - Mode os.FileMode `json:"mode"` - Mtime time.Time `json:"mtime"` - LinkTarget string `json:"linkTarget"` -} - -// ContainerStats contains response of Engine API: -// GET "/stats" -type ContainerStats struct { - Body io.ReadCloser `json:"body"` - OSType string `json:"ostype"` -} - -// Ping contains response of Engine API: -// GET "/_ping" -type Ping struct { - APIVersion string - OSType string - Experimental bool - BuilderVersion BuilderVersion -} - -// ComponentVersion describes the version information for a specific component. -type ComponentVersion struct { - Name string - Version string - Details map[string]string `json:",omitempty"` -} - -// Version contains response of Engine API: -// GET "/version" -type Version struct { - Platform struct{ Name string } `json:",omitempty"` - Components []ComponentVersion `json:",omitempty"` - - // The following fields are deprecated, they relate to the Engine component and are kept for backwards compatibility - - Version string - APIVersion string `json:"ApiVersion"` - MinAPIVersion string `json:"MinAPIVersion,omitempty"` - GitCommit string - GoVersion string - Os string - Arch string - KernelVersion string `json:",omitempty"` - Experimental bool `json:",omitempty"` - BuildTime string `json:",omitempty"` -} - -// Commit holds the Git-commit (SHA1) that a binary was built from, as reported -// in the version-string of external tools, such as containerd, or runC. -type Commit struct { - ID string // ID is the actual commit ID of external tool. - Expected string // Expected is the commit ID of external tool expected by dockerd as set at build time. -} - -// Info contains response of Engine API: -// GET "/info" -type Info struct { - ID string - Containers int - ContainersRunning int - ContainersPaused int - ContainersStopped int - Images int - Driver string - DriverStatus [][2]string - SystemStatus [][2]string `json:",omitempty"` // SystemStatus is only propagated by the Swarm standalone API - Plugins PluginsInfo - MemoryLimit bool - SwapLimit bool - KernelMemory bool // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes - KernelMemoryTCP bool - CPUCfsPeriod bool `json:"CpuCfsPeriod"` - CPUCfsQuota bool `json:"CpuCfsQuota"` - CPUShares bool - CPUSet bool - PidsLimit bool - IPv4Forwarding bool - BridgeNfIptables bool - BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` - Debug bool - NFd int - OomKillDisable bool - NGoroutines int - SystemTime string - LoggingDriver string - CgroupDriver string - CgroupVersion string `json:",omitempty"` - NEventsListener int - KernelVersion string - OperatingSystem string - OSVersion string - OSType string - Architecture string - IndexServerAddress string - RegistryConfig *registry.ServiceConfig - NCPU int - MemTotal int64 - GenericResources []swarm.GenericResource - DockerRootDir string - HTTPProxy string `json:"HttpProxy"` - HTTPSProxy string `json:"HttpsProxy"` - NoProxy string - Name string - Labels []string - ExperimentalBuild bool - ServerVersion string - ClusterStore string `json:",omitempty"` // Deprecated: host-discovery and overlay networks with external k/v stores are deprecated - ClusterAdvertise string `json:",omitempty"` // Deprecated: host-discovery and overlay networks with external k/v stores are deprecated - Runtimes map[string]Runtime - DefaultRuntime string - Swarm swarm.Info - // LiveRestoreEnabled determines whether containers should be kept - // running when the daemon is shutdown or upon daemon start if - // running containers are detected - LiveRestoreEnabled bool - Isolation container.Isolation - InitBinary string - ContainerdCommit Commit - RuncCommit Commit - InitCommit Commit - SecurityOptions []string - ProductLicense string `json:",omitempty"` - DefaultAddressPools []NetworkAddressPool `json:",omitempty"` - Warnings []string -} - -// KeyValue holds a key/value pair -type KeyValue struct { - Key, Value string -} - -// NetworkAddressPool is a temp struct used by Info struct -type NetworkAddressPool struct { - Base string - Size int -} - -// SecurityOpt contains the name and options of a security option -type SecurityOpt struct { - Name string - Options []KeyValue -} - -// DecodeSecurityOptions decodes a security options string slice to a type safe -// SecurityOpt -func DecodeSecurityOptions(opts []string) ([]SecurityOpt, error) { - so := []SecurityOpt{} - for _, opt := range opts { - // support output from a < 1.13 docker daemon - if !strings.Contains(opt, "=") { - so = append(so, SecurityOpt{Name: opt}) - continue - } - secopt := SecurityOpt{} - split := strings.Split(opt, ",") - for _, s := range split { - kv := strings.SplitN(s, "=", 2) - if len(kv) != 2 { - return nil, fmt.Errorf("invalid security option %q", s) - } - if kv[0] == "" || kv[1] == "" { - return nil, errors.New("invalid empty security option") - } - if kv[0] == "name" { - secopt.Name = kv[1] - continue - } - secopt.Options = append(secopt.Options, KeyValue{Key: kv[0], Value: kv[1]}) - } - so = append(so, secopt) - } - return so, nil -} - -// PluginsInfo is a temp struct holding Plugins name -// registered with docker daemon. It is used by Info struct -type PluginsInfo struct { - // List of Volume plugins registered - Volume []string - // List of Network plugins registered - Network []string - // List of Authorization plugins registered - Authorization []string - // List of Log plugins registered - Log []string -} - -// ExecStartCheck is a temp struct used by execStart -// Config fields is part of ExecConfig in runconfig package -type ExecStartCheck struct { - // ExecStart will first check if it's detached - Detach bool - // Check if there's a tty - Tty bool -} - -// HealthcheckResult stores information about a single run of a healthcheck probe -type HealthcheckResult struct { - Start time.Time // Start is the time this check started - End time.Time // End is the time this check ended - ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe - Output string // Output from last check -} - -// Health states -const ( - NoHealthcheck = "none" // Indicates there is no healthcheck - Starting = "starting" // Starting indicates that the container is not yet ready - Healthy = "healthy" // Healthy indicates that the container is running correctly - Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem -) - -// Health stores information about the container's healthcheck results -type Health struct { - Status string // Status is one of Starting, Healthy or Unhealthy - FailingStreak int // FailingStreak is the number of consecutive failures - Log []*HealthcheckResult // Log contains the last few results (oldest first) -} - -// ContainerState stores container's running state -// it's part of ContainerJSONBase and will return by "inspect" command -type ContainerState struct { - Status string // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead" - Running bool - Paused bool - Restarting bool - OOMKilled bool - Dead bool - Pid int - ExitCode int - Error string - StartedAt string - FinishedAt string - Health *Health `json:",omitempty"` -} - -// ContainerNode stores information about the node that a container -// is running on. It's only used by the Docker Swarm standalone API -type ContainerNode struct { - ID string - IPAddress string `json:"IP"` - Addr string - Name string - Cpus int - Memory int64 - Labels map[string]string -} - -// ContainerJSONBase contains response of Engine API: -// GET "/containers/{name:.*}/json" -type ContainerJSONBase struct { - ID string `json:"Id"` - Created string - Path string - Args []string - State *ContainerState - Image string - ResolvConfPath string - HostnamePath string - HostsPath string - LogPath string - Node *ContainerNode `json:",omitempty"` // Node is only propagated by Docker Swarm standalone API - Name string - RestartCount int - Driver string - Platform string - MountLabel string - ProcessLabel string - AppArmorProfile string - ExecIDs []string - HostConfig *container.HostConfig - GraphDriver GraphDriverData - SizeRw *int64 `json:",omitempty"` - SizeRootFs *int64 `json:",omitempty"` -} - -// ContainerJSON is newly used struct along with MountPoint -type ContainerJSON struct { - *ContainerJSONBase - Mounts []MountPoint - Config *container.Config - NetworkSettings *NetworkSettings -} - -// NetworkSettings exposes the network settings in the api -type NetworkSettings struct { - NetworkSettingsBase - DefaultNetworkSettings - Networks map[string]*network.EndpointSettings -} - -// SummaryNetworkSettings provides a summary of container's networks -// in /containers/json -type SummaryNetworkSettings struct { - Networks map[string]*network.EndpointSettings -} - -// NetworkSettingsBase holds basic information about networks -type NetworkSettingsBase struct { - Bridge string // Bridge is the Bridge name the network uses(e.g. `docker0`) - SandboxID string // SandboxID uniquely represents a container's network stack - HairpinMode bool // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface - LinkLocalIPv6Address string // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix - LinkLocalIPv6PrefixLen int // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address - Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port - SandboxKey string // SandboxKey identifies the sandbox - SecondaryIPAddresses []network.Address - SecondaryIPv6Addresses []network.Address -} - -// DefaultNetworkSettings holds network information -// during the 2 release deprecation period. -// It will be removed in Docker 1.11. -type DefaultNetworkSettings struct { - EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox - Gateway string // Gateway holds the gateway address for the network - GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address - GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address - IPAddress string // IPAddress holds the IPv4 address for the network - IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address - IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6 - MacAddress string // MacAddress holds the MAC address for the network -} - -// MountPoint represents a mount point configuration inside the container. -// This is used for reporting the mountpoints in use by a container. -type MountPoint struct { - Type mount.Type `json:",omitempty"` - Name string `json:",omitempty"` - Source string - Destination string - Driver string `json:",omitempty"` - Mode string - RW bool - Propagation mount.Propagation -} - -// NetworkResource is the body of the "get network" http response message -type NetworkResource struct { - Name string // Name is the requested name of the network - ID string `json:"Id"` // ID uniquely identifies a network on a single machine - Created time.Time // Created is the time the network created - Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level) - Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`) - EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6 - IPAM network.IPAM // IPAM is the network's IP Address Management - Internal bool // Internal represents if the network is used internal only - Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. - Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster. - ConfigFrom network.ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network. - ConfigOnly bool // ConfigOnly networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services. - Containers map[string]EndpointResource // Containers contains endpoints belonging to the network - Options map[string]string // Options holds the network specific options to use for when creating the network - Labels map[string]string // Labels holds metadata specific to the network being created - Peers []network.PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network - Services map[string]network.ServiceInfo `json:",omitempty"` -} - -// EndpointResource contains network resources allocated and used for a container in a network -type EndpointResource struct { - Name string - EndpointID string - MacAddress string - IPv4Address string - IPv6Address string -} - -// NetworkCreate is the expected body of the "create network" http request message -type NetworkCreate struct { - // Check for networks with duplicate names. - // Network is primarily keyed based on a random ID and not on the name. - // Network name is strictly a user-friendly alias to the network - // which is uniquely identified using ID. - // And there is no guaranteed way to check for duplicates. - // Option CheckDuplicate is there to provide a best effort checking of any networks - // which has the same name but it is not guaranteed to catch all name collisions. - CheckDuplicate bool - Driver string - Scope string - EnableIPv6 bool - IPAM *network.IPAM - Internal bool - Attachable bool - Ingress bool - ConfigOnly bool - ConfigFrom *network.ConfigReference - Options map[string]string - Labels map[string]string -} - -// NetworkCreateRequest is the request message sent to the server for network create call. -type NetworkCreateRequest struct { - NetworkCreate - Name string -} - -// NetworkCreateResponse is the response message sent by the server for network create call -type NetworkCreateResponse struct { - ID string `json:"Id"` - Warning string -} - -// NetworkConnect represents the data to be used to connect a container to the network -type NetworkConnect struct { - Container string - EndpointConfig *network.EndpointSettings `json:",omitempty"` -} - -// NetworkDisconnect represents the data to be used to disconnect a container from the network -type NetworkDisconnect struct { - Container string - Force bool -} - -// NetworkInspectOptions holds parameters to inspect network -type NetworkInspectOptions struct { - Scope string - Verbose bool -} - -// Checkpoint represents the details of a checkpoint -type Checkpoint struct { - Name string // Name is the name of the checkpoint -} - -// Runtime describes an OCI runtime -type Runtime struct { - Path string `json:"path"` - Args []string `json:"runtimeArgs,omitempty"` - - // This is exposed here only for internal use - // It is not currently supported to specify custom shim configs - Shim *ShimConfig `json:"-"` -} - -// ShimConfig is used by runtime to configure containerd shims -type ShimConfig struct { - Binary string - Opts interface{} -} - -// DiskUsage contains response of Engine API: -// GET "/system/df" -type DiskUsage struct { - LayersSize int64 - Images []*ImageSummary - Containers []*Container - Volumes []*Volume - BuildCache []*BuildCache - BuilderSize int64 // deprecated -} - -// ContainersPruneReport contains the response for Engine API: -// POST "/containers/prune" -type ContainersPruneReport struct { - ContainersDeleted []string - SpaceReclaimed uint64 -} - -// VolumesPruneReport contains the response for Engine API: -// POST "/volumes/prune" -type VolumesPruneReport struct { - VolumesDeleted []string - SpaceReclaimed uint64 -} - -// ImagesPruneReport contains the response for Engine API: -// POST "/images/prune" -type ImagesPruneReport struct { - ImagesDeleted []ImageDeleteResponseItem - SpaceReclaimed uint64 -} - -// BuildCachePruneReport contains the response for Engine API: -// POST "/build/prune" -type BuildCachePruneReport struct { - CachesDeleted []string - SpaceReclaimed uint64 -} - -// NetworksPruneReport contains the response for Engine API: -// POST "/networks/prune" -type NetworksPruneReport struct { - NetworksDeleted []string -} - -// SecretCreateResponse contains the information returned to a client -// on the creation of a new secret. -type SecretCreateResponse struct { - // ID is the id of the created secret. - ID string -} - -// SecretListOptions holds parameters to list secrets -type SecretListOptions struct { - Filters filters.Args -} - -// ConfigCreateResponse contains the information returned to a client -// on the creation of a new config. -type ConfigCreateResponse struct { - // ID is the id of the created config. - ID string -} - -// ConfigListOptions holds parameters to list configs -type ConfigListOptions struct { - Filters filters.Args -} - -// PushResult contains the tag, manifest digest, and manifest size from the -// push. It's used to signal this information to the trust code in the client -// so it can sign the manifest if necessary. -type PushResult struct { - Tag string - Digest string - Size int -} - -// BuildResult contains the image id of a successful build -type BuildResult struct { - ID string -} - -// BuildCache contains information about a build cache record -type BuildCache struct { - ID string - Parent string - Type string - Description string - InUse bool - Shared bool - Size int64 - CreatedAt time.Time - LastUsedAt *time.Time - UsageCount int -} - -// BuildCachePruneOptions hold parameters to prune the build cache -type BuildCachePruneOptions struct { - All bool - KeepStorage int64 - Filters filters.Args -} diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/versions/README.md b/ui/wasm/vendor/github.com/docker/docker/api/types/versions/README.md deleted file mode 100644 index 1ef911edb0f..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/versions/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# Legacy API type versions - -This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`. - -Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`. - -## Package name conventions - -The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention: - -1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`. -2. We cannot use `_` because golint complains about it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`. - -For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`. diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/versions/compare.go b/ui/wasm/vendor/github.com/docker/docker/api/types/versions/compare.go deleted file mode 100644 index 8ccb0aa92eb..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/versions/compare.go +++ /dev/null @@ -1,62 +0,0 @@ -package versions // import "github.com/docker/docker/api/types/versions" - -import ( - "strconv" - "strings" -) - -// compare compares two version strings -// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise. -func compare(v1, v2 string) int { - var ( - currTab = strings.Split(v1, ".") - otherTab = strings.Split(v2, ".") - ) - - max := len(currTab) - if len(otherTab) > max { - max = len(otherTab) - } - for i := 0; i < max; i++ { - var currInt, otherInt int - - if len(currTab) > i { - currInt, _ = strconv.Atoi(currTab[i]) - } - if len(otherTab) > i { - otherInt, _ = strconv.Atoi(otherTab[i]) - } - if currInt > otherInt { - return 1 - } - if otherInt > currInt { - return -1 - } - } - return 0 -} - -// LessThan checks if a version is less than another -func LessThan(v, other string) bool { - return compare(v, other) == -1 -} - -// LessThanOrEqualTo checks if a version is less than or equal to another -func LessThanOrEqualTo(v, other string) bool { - return compare(v, other) <= 0 -} - -// GreaterThan checks if a version is greater than another -func GreaterThan(v, other string) bool { - return compare(v, other) == 1 -} - -// GreaterThanOrEqualTo checks if a version is greater than or equal to another -func GreaterThanOrEqualTo(v, other string) bool { - return compare(v, other) >= 0 -} - -// Equal checks if a version is equal to another -func Equal(v, other string) bool { - return compare(v, other) == 0 -} diff --git a/ui/wasm/vendor/github.com/docker/docker/api/types/volume.go b/ui/wasm/vendor/github.com/docker/docker/api/types/volume.go deleted file mode 100644 index c69b08448df..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/api/types/volume.go +++ /dev/null @@ -1,72 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// Volume volume -// swagger:model Volume -type Volume struct { - - // Date/Time the volume was created. - CreatedAt string `json:"CreatedAt,omitempty"` - - // Name of the volume driver used by the volume. - // Required: true - Driver string `json:"Driver"` - - // User-defined key/value metadata. - // Required: true - Labels map[string]string `json:"Labels"` - - // Mount path of the volume on the host. - // Required: true - Mountpoint string `json:"Mountpoint"` - - // Name of the volume. - // Required: true - Name string `json:"Name"` - - // The driver specific options used when creating the volume. - // - // Required: true - Options map[string]string `json:"Options"` - - // The level at which the volume exists. Either `global` for cluster-wide, - // or `local` for machine level. - // - // Required: true - Scope string `json:"Scope"` - - // Low-level details about the volume, provided by the volume driver. - // Details are returned as a map with key/value pairs: - // `{"key":"value","key2":"value2"}`. - // - // The `Status` field is optional, and is omitted if the volume driver - // does not support this feature. - // - Status map[string]interface{} `json:"Status,omitempty"` - - // usage data - UsageData *VolumeUsageData `json:"UsageData,omitempty"` -} - -// VolumeUsageData Usage details about the volume. This information is used by the -// `GET /system/df` endpoint, and omitted in other endpoints. -// -// swagger:model VolumeUsageData -type VolumeUsageData struct { - - // The number of containers referencing this volume. This field - // is set to `-1` if the reference-count is not available. - // - // Required: true - RefCount int64 `json:"RefCount"` - - // Amount of disk space used by the volume (in bytes). This information - // is only available for volumes created with the `"local"` volume - // driver. For volumes created with other volume drivers, this field - // is set to `-1` ("not available") - // - // Required: true - Size int64 `json:"Size"` -} diff --git a/ui/wasm/vendor/github.com/docker/docker/errdefs/defs.go b/ui/wasm/vendor/github.com/docker/docker/errdefs/defs.go deleted file mode 100644 index 61e7456b4eb..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/errdefs/defs.go +++ /dev/null @@ -1,69 +0,0 @@ -package errdefs // import "github.com/docker/docker/errdefs" - -// ErrNotFound signals that the requested object doesn't exist -type ErrNotFound interface { - NotFound() -} - -// ErrInvalidParameter signals that the user input is invalid -type ErrInvalidParameter interface { - InvalidParameter() -} - -// ErrConflict signals that some internal state conflicts with the requested action and can't be performed. -// A change in state should be able to clear this error. -type ErrConflict interface { - Conflict() -} - -// ErrUnauthorized is used to signify that the user is not authorized to perform a specific action -type ErrUnauthorized interface { - Unauthorized() -} - -// ErrUnavailable signals that the requested action/subsystem is not available. -type ErrUnavailable interface { - Unavailable() -} - -// ErrForbidden signals that the requested action cannot be performed under any circumstances. -// When a ErrForbidden is returned, the caller should never retry the action. -type ErrForbidden interface { - Forbidden() -} - -// ErrSystem signals that some internal error occurred. -// An example of this would be a failed mount request. -type ErrSystem interface { - System() -} - -// ErrNotModified signals that an action can't be performed because it's already in the desired state -type ErrNotModified interface { - NotModified() -} - -// ErrNotImplemented signals that the requested action/feature is not implemented on the system as configured. -type ErrNotImplemented interface { - NotImplemented() -} - -// ErrUnknown signals that the kind of error that occurred is not known. -type ErrUnknown interface { - Unknown() -} - -// ErrCancelled signals that the action was cancelled. -type ErrCancelled interface { - Cancelled() -} - -// ErrDeadline signals that the deadline was reached before the action completed. -type ErrDeadline interface { - DeadlineExceeded() -} - -// ErrDataLoss indicates that data was lost or there is data corruption. -type ErrDataLoss interface { - DataLoss() -} diff --git a/ui/wasm/vendor/github.com/docker/docker/errdefs/doc.go b/ui/wasm/vendor/github.com/docker/docker/errdefs/doc.go deleted file mode 100644 index c211f174fc1..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/errdefs/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -// Package errdefs defines a set of error interfaces that packages should use for communicating classes of errors. -// Errors that cross the package boundary should implement one (and only one) of these interfaces. -// -// Packages should not reference these interfaces directly, only implement them. -// To check if a particular error implements one of these interfaces, there are helper -// functions provided (e.g. `Is`) which can be used rather than asserting the interfaces directly. -// If you must assert on these interfaces, be sure to check the causal chain (`err.Cause()`). -package errdefs // import "github.com/docker/docker/errdefs" diff --git a/ui/wasm/vendor/github.com/docker/docker/errdefs/helpers.go b/ui/wasm/vendor/github.com/docker/docker/errdefs/helpers.go deleted file mode 100644 index fe06fb6f703..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/errdefs/helpers.go +++ /dev/null @@ -1,279 +0,0 @@ -package errdefs // import "github.com/docker/docker/errdefs" - -import "context" - -type errNotFound struct{ error } - -func (errNotFound) NotFound() {} - -func (e errNotFound) Cause() error { - return e.error -} - -func (e errNotFound) Unwrap() error { - return e.error -} - -// NotFound is a helper to create an error of the class with the same name from any error type -func NotFound(err error) error { - if err == nil || IsNotFound(err) { - return err - } - return errNotFound{err} -} - -type errInvalidParameter struct{ error } - -func (errInvalidParameter) InvalidParameter() {} - -func (e errInvalidParameter) Cause() error { - return e.error -} - -func (e errInvalidParameter) Unwrap() error { - return e.error -} - -// InvalidParameter is a helper to create an error of the class with the same name from any error type -func InvalidParameter(err error) error { - if err == nil || IsInvalidParameter(err) { - return err - } - return errInvalidParameter{err} -} - -type errConflict struct{ error } - -func (errConflict) Conflict() {} - -func (e errConflict) Cause() error { - return e.error -} - -func (e errConflict) Unwrap() error { - return e.error -} - -// Conflict is a helper to create an error of the class with the same name from any error type -func Conflict(err error) error { - if err == nil || IsConflict(err) { - return err - } - return errConflict{err} -} - -type errUnauthorized struct{ error } - -func (errUnauthorized) Unauthorized() {} - -func (e errUnauthorized) Cause() error { - return e.error -} - -func (e errUnauthorized) Unwrap() error { - return e.error -} - -// Unauthorized is a helper to create an error of the class with the same name from any error type -func Unauthorized(err error) error { - if err == nil || IsUnauthorized(err) { - return err - } - return errUnauthorized{err} -} - -type errUnavailable struct{ error } - -func (errUnavailable) Unavailable() {} - -func (e errUnavailable) Cause() error { - return e.error -} - -func (e errUnavailable) Unwrap() error { - return e.error -} - -// Unavailable is a helper to create an error of the class with the same name from any error type -func Unavailable(err error) error { - if err == nil || IsUnavailable(err) { - return err - } - return errUnavailable{err} -} - -type errForbidden struct{ error } - -func (errForbidden) Forbidden() {} - -func (e errForbidden) Cause() error { - return e.error -} - -func (e errForbidden) Unwrap() error { - return e.error -} - -// Forbidden is a helper to create an error of the class with the same name from any error type -func Forbidden(err error) error { - if err == nil || IsForbidden(err) { - return err - } - return errForbidden{err} -} - -type errSystem struct{ error } - -func (errSystem) System() {} - -func (e errSystem) Cause() error { - return e.error -} - -func (e errSystem) Unwrap() error { - return e.error -} - -// System is a helper to create an error of the class with the same name from any error type -func System(err error) error { - if err == nil || IsSystem(err) { - return err - } - return errSystem{err} -} - -type errNotModified struct{ error } - -func (errNotModified) NotModified() {} - -func (e errNotModified) Cause() error { - return e.error -} - -func (e errNotModified) Unwrap() error { - return e.error -} - -// NotModified is a helper to create an error of the class with the same name from any error type -func NotModified(err error) error { - if err == nil || IsNotModified(err) { - return err - } - return errNotModified{err} -} - -type errNotImplemented struct{ error } - -func (errNotImplemented) NotImplemented() {} - -func (e errNotImplemented) Cause() error { - return e.error -} - -func (e errNotImplemented) Unwrap() error { - return e.error -} - -// NotImplemented is a helper to create an error of the class with the same name from any error type -func NotImplemented(err error) error { - if err == nil || IsNotImplemented(err) { - return err - } - return errNotImplemented{err} -} - -type errUnknown struct{ error } - -func (errUnknown) Unknown() {} - -func (e errUnknown) Cause() error { - return e.error -} - -func (e errUnknown) Unwrap() error { - return e.error -} - -// Unknown is a helper to create an error of the class with the same name from any error type -func Unknown(err error) error { - if err == nil || IsUnknown(err) { - return err - } - return errUnknown{err} -} - -type errCancelled struct{ error } - -func (errCancelled) Cancelled() {} - -func (e errCancelled) Cause() error { - return e.error -} - -func (e errCancelled) Unwrap() error { - return e.error -} - -// Cancelled is a helper to create an error of the class with the same name from any error type -func Cancelled(err error) error { - if err == nil || IsCancelled(err) { - return err - } - return errCancelled{err} -} - -type errDeadline struct{ error } - -func (errDeadline) DeadlineExceeded() {} - -func (e errDeadline) Cause() error { - return e.error -} - -func (e errDeadline) Unwrap() error { - return e.error -} - -// Deadline is a helper to create an error of the class with the same name from any error type -func Deadline(err error) error { - if err == nil || IsDeadline(err) { - return err - } - return errDeadline{err} -} - -type errDataLoss struct{ error } - -func (errDataLoss) DataLoss() {} - -func (e errDataLoss) Cause() error { - return e.error -} - -func (e errDataLoss) Unwrap() error { - return e.error -} - -// DataLoss is a helper to create an error of the class with the same name from any error type -func DataLoss(err error) error { - if err == nil || IsDataLoss(err) { - return err - } - return errDataLoss{err} -} - -// FromContext returns the error class from the passed in context -func FromContext(ctx context.Context) error { - e := ctx.Err() - if e == nil { - return nil - } - - if e == context.Canceled { - return Cancelled(e) - } - if e == context.DeadlineExceeded { - return Deadline(e) - } - return Unknown(e) -} diff --git a/ui/wasm/vendor/github.com/docker/docker/errdefs/http_helpers.go b/ui/wasm/vendor/github.com/docker/docker/errdefs/http_helpers.go deleted file mode 100644 index 07552f1cc1d..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/errdefs/http_helpers.go +++ /dev/null @@ -1,191 +0,0 @@ -package errdefs // import "github.com/docker/docker/errdefs" - -import ( - "fmt" - "net/http" - - containerderrors "github.com/containerd/containerd/errdefs" - "github.com/docker/distribution/registry/api/errcode" - "github.com/sirupsen/logrus" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// GetHTTPErrorStatusCode retrieves status code from error message. -func GetHTTPErrorStatusCode(err error) int { - if err == nil { - logrus.WithFields(logrus.Fields{"error": err}).Error("unexpected HTTP error handling") - return http.StatusInternalServerError - } - - var statusCode int - - // Stop right there - // Are you sure you should be adding a new error class here? Do one of the existing ones work? - - // Note that the below functions are already checking the error causal chain for matches. - switch { - case IsNotFound(err): - statusCode = http.StatusNotFound - case IsInvalidParameter(err): - statusCode = http.StatusBadRequest - case IsConflict(err): - statusCode = http.StatusConflict - case IsUnauthorized(err): - statusCode = http.StatusUnauthorized - case IsUnavailable(err): - statusCode = http.StatusServiceUnavailable - case IsForbidden(err): - statusCode = http.StatusForbidden - case IsNotModified(err): - statusCode = http.StatusNotModified - case IsNotImplemented(err): - statusCode = http.StatusNotImplemented - case IsSystem(err) || IsUnknown(err) || IsDataLoss(err) || IsDeadline(err) || IsCancelled(err): - statusCode = http.StatusInternalServerError - default: - statusCode = statusCodeFromGRPCError(err) - if statusCode != http.StatusInternalServerError { - return statusCode - } - statusCode = statusCodeFromContainerdError(err) - if statusCode != http.StatusInternalServerError { - return statusCode - } - statusCode = statusCodeFromDistributionError(err) - if statusCode != http.StatusInternalServerError { - return statusCode - } - if e, ok := err.(causer); ok { - return GetHTTPErrorStatusCode(e.Cause()) - } - - logrus.WithFields(logrus.Fields{ - "module": "api", - "error_type": fmt.Sprintf("%T", err), - }).Debugf("FIXME: Got an API for which error does not match any expected type!!!: %+v", err) - } - - if statusCode == 0 { - statusCode = http.StatusInternalServerError - } - - return statusCode -} - -// FromStatusCode creates an errdef error, based on the provided HTTP status-code -func FromStatusCode(err error, statusCode int) error { - if err == nil { - return err - } - switch statusCode { - case http.StatusNotFound: - err = NotFound(err) - case http.StatusBadRequest: - err = InvalidParameter(err) - case http.StatusConflict: - err = Conflict(err) - case http.StatusUnauthorized: - err = Unauthorized(err) - case http.StatusServiceUnavailable: - err = Unavailable(err) - case http.StatusForbidden: - err = Forbidden(err) - case http.StatusNotModified: - err = NotModified(err) - case http.StatusNotImplemented: - err = NotImplemented(err) - case http.StatusInternalServerError: - if !IsSystem(err) && !IsUnknown(err) && !IsDataLoss(err) && !IsDeadline(err) && !IsCancelled(err) { - err = System(err) - } - default: - logrus.WithFields(logrus.Fields{ - "module": "api", - "status_code": fmt.Sprintf("%d", statusCode), - }).Debugf("FIXME: Got an status-code for which error does not match any expected type!!!: %d", statusCode) - - switch { - case statusCode >= 200 && statusCode < 400: - // it's a client error - case statusCode >= 400 && statusCode < 500: - err = InvalidParameter(err) - case statusCode >= 500 && statusCode < 600: - err = System(err) - default: - err = Unknown(err) - } - } - return err -} - -// statusCodeFromGRPCError returns status code according to gRPC error -func statusCodeFromGRPCError(err error) int { - switch status.Code(err) { - case codes.InvalidArgument: // code 3 - return http.StatusBadRequest - case codes.NotFound: // code 5 - return http.StatusNotFound - case codes.AlreadyExists: // code 6 - return http.StatusConflict - case codes.PermissionDenied: // code 7 - return http.StatusForbidden - case codes.FailedPrecondition: // code 9 - return http.StatusBadRequest - case codes.Unauthenticated: // code 16 - return http.StatusUnauthorized - case codes.OutOfRange: // code 11 - return http.StatusBadRequest - case codes.Unimplemented: // code 12 - return http.StatusNotImplemented - case codes.Unavailable: // code 14 - return http.StatusServiceUnavailable - default: - // codes.Canceled(1) - // codes.Unknown(2) - // codes.DeadlineExceeded(4) - // codes.ResourceExhausted(8) - // codes.Aborted(10) - // codes.Internal(13) - // codes.DataLoss(15) - return http.StatusInternalServerError - } -} - -// statusCodeFromDistributionError returns status code according to registry errcode -// code is loosely based on errcode.ServeJSON() in docker/distribution -func statusCodeFromDistributionError(err error) int { - switch errs := err.(type) { - case errcode.Errors: - if len(errs) < 1 { - return http.StatusInternalServerError - } - if _, ok := errs[0].(errcode.ErrorCoder); ok { - return statusCodeFromDistributionError(errs[0]) - } - case errcode.ErrorCoder: - return errs.ErrorCode().Descriptor().HTTPStatusCode - } - return http.StatusInternalServerError -} - -// statusCodeFromContainerdError returns status code for containerd errors when -// consumed directly (not through gRPC) -func statusCodeFromContainerdError(err error) int { - switch { - case containerderrors.IsInvalidArgument(err): - return http.StatusBadRequest - case containerderrors.IsNotFound(err): - return http.StatusNotFound - case containerderrors.IsAlreadyExists(err): - return http.StatusConflict - case containerderrors.IsFailedPrecondition(err): - return http.StatusPreconditionFailed - case containerderrors.IsUnavailable(err): - return http.StatusServiceUnavailable - case containerderrors.IsNotImplemented(err): - return http.StatusNotImplemented - default: - return http.StatusInternalServerError - } -} diff --git a/ui/wasm/vendor/github.com/docker/docker/errdefs/is.go b/ui/wasm/vendor/github.com/docker/docker/errdefs/is.go deleted file mode 100644 index 3abf07d0c35..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/errdefs/is.go +++ /dev/null @@ -1,107 +0,0 @@ -package errdefs // import "github.com/docker/docker/errdefs" - -type causer interface { - Cause() error -} - -func getImplementer(err error) error { - switch e := err.(type) { - case - ErrNotFound, - ErrInvalidParameter, - ErrConflict, - ErrUnauthorized, - ErrUnavailable, - ErrForbidden, - ErrSystem, - ErrNotModified, - ErrNotImplemented, - ErrCancelled, - ErrDeadline, - ErrDataLoss, - ErrUnknown: - return err - case causer: - return getImplementer(e.Cause()) - default: - return err - } -} - -// IsNotFound returns if the passed in error is an ErrNotFound -func IsNotFound(err error) bool { - _, ok := getImplementer(err).(ErrNotFound) - return ok -} - -// IsInvalidParameter returns if the passed in error is an ErrInvalidParameter -func IsInvalidParameter(err error) bool { - _, ok := getImplementer(err).(ErrInvalidParameter) - return ok -} - -// IsConflict returns if the passed in error is an ErrConflict -func IsConflict(err error) bool { - _, ok := getImplementer(err).(ErrConflict) - return ok -} - -// IsUnauthorized returns if the passed in error is an ErrUnauthorized -func IsUnauthorized(err error) bool { - _, ok := getImplementer(err).(ErrUnauthorized) - return ok -} - -// IsUnavailable returns if the passed in error is an ErrUnavailable -func IsUnavailable(err error) bool { - _, ok := getImplementer(err).(ErrUnavailable) - return ok -} - -// IsForbidden returns if the passed in error is an ErrForbidden -func IsForbidden(err error) bool { - _, ok := getImplementer(err).(ErrForbidden) - return ok -} - -// IsSystem returns if the passed in error is an ErrSystem -func IsSystem(err error) bool { - _, ok := getImplementer(err).(ErrSystem) - return ok -} - -// IsNotModified returns if the passed in error is a NotModified error -func IsNotModified(err error) bool { - _, ok := getImplementer(err).(ErrNotModified) - return ok -} - -// IsNotImplemented returns if the passed in error is an ErrNotImplemented -func IsNotImplemented(err error) bool { - _, ok := getImplementer(err).(ErrNotImplemented) - return ok -} - -// IsUnknown returns if the passed in error is an ErrUnknown -func IsUnknown(err error) bool { - _, ok := getImplementer(err).(ErrUnknown) - return ok -} - -// IsCancelled returns if the passed in error is an ErrCancelled -func IsCancelled(err error) bool { - _, ok := getImplementer(err).(ErrCancelled) - return ok -} - -// IsDeadline returns if the passed in error is an ErrDeadline -func IsDeadline(err error) bool { - _, ok := getImplementer(err).(ErrDeadline) - return ok -} - -// IsDataLoss returns if the passed in error is an ErrDataLoss -func IsDataLoss(err error) bool { - _, ok := getImplementer(err).(ErrDataLoss) - return ok -} diff --git a/ui/wasm/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go b/ui/wasm/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go deleted file mode 100644 index 5e6310fdcd6..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go +++ /dev/null @@ -1,93 +0,0 @@ -package homedir // import "github.com/docker/docker/pkg/homedir" - -import ( - "errors" - "os" - "path/filepath" - "strings" -) - -// GetRuntimeDir returns XDG_RUNTIME_DIR. -// XDG_RUNTIME_DIR is typically configured via pam_systemd. -// GetRuntimeDir returns non-nil error if XDG_RUNTIME_DIR is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetRuntimeDir() (string, error) { - if xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR"); xdgRuntimeDir != "" { - return xdgRuntimeDir, nil - } - return "", errors.New("could not get XDG_RUNTIME_DIR") -} - -// StickRuntimeDirContents sets the sticky bit on files that are under -// XDG_RUNTIME_DIR, so that the files won't be periodically removed by the system. -// -// StickyRuntimeDir returns slice of sticked files. -// StickyRuntimeDir returns nil error if XDG_RUNTIME_DIR is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func StickRuntimeDirContents(files []string) ([]string, error) { - runtimeDir, err := GetRuntimeDir() - if err != nil { - // ignore error if runtimeDir is empty - return nil, nil - } - runtimeDir, err = filepath.Abs(runtimeDir) - if err != nil { - return nil, err - } - var sticked []string - for _, f := range files { - f, err = filepath.Abs(f) - if err != nil { - return sticked, err - } - if strings.HasPrefix(f, runtimeDir+"/") { - if err = stick(f); err != nil { - return sticked, err - } - sticked = append(sticked, f) - } - } - return sticked, nil -} - -func stick(f string) error { - st, err := os.Stat(f) - if err != nil { - return err - } - m := st.Mode() - m |= os.ModeSticky - return os.Chmod(f, m) -} - -// GetDataHome returns XDG_DATA_HOME. -// GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetDataHome() (string, error) { - if xdgDataHome := os.Getenv("XDG_DATA_HOME"); xdgDataHome != "" { - return xdgDataHome, nil - } - home := os.Getenv("HOME") - if home == "" { - return "", errors.New("could not get either XDG_DATA_HOME or HOME") - } - return filepath.Join(home, ".local", "share"), nil -} - -// GetConfigHome returns XDG_CONFIG_HOME. -// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetConfigHome() (string, error) { - if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" { - return xdgConfigHome, nil - } - home := os.Getenv("HOME") - if home == "" { - return "", errors.New("could not get either XDG_CONFIG_HOME or HOME") - } - return filepath.Join(home, ".config"), nil -} diff --git a/ui/wasm/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go b/ui/wasm/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go deleted file mode 100644 index 67ab9e9b31e..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go +++ /dev/null @@ -1,27 +0,0 @@ -// +build !linux - -package homedir // import "github.com/docker/docker/pkg/homedir" - -import ( - "errors" -) - -// GetRuntimeDir is unsupported on non-linux system. -func GetRuntimeDir() (string, error) { - return "", errors.New("homedir.GetRuntimeDir() is not supported on this system") -} - -// StickRuntimeDirContents is unsupported on non-linux system. -func StickRuntimeDirContents(files []string) ([]string, error) { - return nil, errors.New("homedir.StickRuntimeDirContents() is not supported on this system") -} - -// GetDataHome is unsupported on non-linux system. -func GetDataHome() (string, error) { - return "", errors.New("homedir.GetDataHome() is not supported on this system") -} - -// GetConfigHome is unsupported on non-linux system. -func GetConfigHome() (string, error) { - return "", errors.New("homedir.GetConfigHome() is not supported on this system") -} diff --git a/ui/wasm/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go b/ui/wasm/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go deleted file mode 100644 index 441bd727b60..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go +++ /dev/null @@ -1,38 +0,0 @@ -// +build !windows - -package homedir // import "github.com/docker/docker/pkg/homedir" - -import ( - "os" - "os/user" -) - -// Key returns the env var name for the user's home dir based on -// the platform being run on -func Key() string { - return "HOME" -} - -// Get returns the home directory of the current user with the help of -// environment variables depending on the target operating system. -// Returned path should be used with "path/filepath" to form new paths. -// -// If linking statically with cgo enabled against glibc, ensure the -// osusergo build tag is used. -// -// If needing to do nss lookups, do not disable cgo or set osusergo. -func Get() string { - home := os.Getenv(Key()) - if home == "" { - if u, err := user.Current(); err == nil { - return u.HomeDir - } - } - return home -} - -// GetShortcutString returns the string that is shortcut to user's home directory -// in the native shell of the platform running on. -func GetShortcutString() string { - return "~" -} diff --git a/ui/wasm/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go b/ui/wasm/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go deleted file mode 100644 index 2f81813b287..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go +++ /dev/null @@ -1,24 +0,0 @@ -package homedir // import "github.com/docker/docker/pkg/homedir" - -import ( - "os" -) - -// Key returns the env var name for the user's home dir based on -// the platform being run on -func Key() string { - return "USERPROFILE" -} - -// Get returns the home directory of the current user with the help of -// environment variables depending on the target operating system. -// Returned path should be used with "path/filepath" to form new paths. -func Get() string { - return os.Getenv(Key()) -} - -// GetShortcutString returns the string that is shortcut to user's home directory -// in the native shell of the platform running on. -func GetShortcutString() string { - return "%USERPROFILE%" // be careful while using in format functions -} diff --git a/ui/wasm/vendor/github.com/docker/docker/pkg/ioutils/buffer.go b/ui/wasm/vendor/github.com/docker/docker/pkg/ioutils/buffer.go deleted file mode 100644 index 466f79294b8..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/pkg/ioutils/buffer.go +++ /dev/null @@ -1,51 +0,0 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import ( - "errors" - "io" -) - -var errBufferFull = errors.New("buffer is full") - -type fixedBuffer struct { - buf []byte - pos int - lastRead int -} - -func (b *fixedBuffer) Write(p []byte) (int, error) { - n := copy(b.buf[b.pos:cap(b.buf)], p) - b.pos += n - - if n < len(p) { - if b.pos == cap(b.buf) { - return n, errBufferFull - } - return n, io.ErrShortWrite - } - return n, nil -} - -func (b *fixedBuffer) Read(p []byte) (int, error) { - n := copy(p, b.buf[b.lastRead:b.pos]) - b.lastRead += n - return n, nil -} - -func (b *fixedBuffer) Len() int { - return b.pos - b.lastRead -} - -func (b *fixedBuffer) Cap() int { - return cap(b.buf) -} - -func (b *fixedBuffer) Reset() { - b.pos = 0 - b.lastRead = 0 - b.buf = b.buf[:0] -} - -func (b *fixedBuffer) String() string { - return string(b.buf[b.lastRead:b.pos]) -} diff --git a/ui/wasm/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go b/ui/wasm/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go deleted file mode 100644 index 87514b643d7..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go +++ /dev/null @@ -1,187 +0,0 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import ( - "errors" - "io" - "sync" -) - -// maxCap is the highest capacity to use in byte slices that buffer data. -const maxCap = 1e6 - -// minCap is the lowest capacity to use in byte slices that buffer data -const minCap = 64 - -// blockThreshold is the minimum number of bytes in the buffer which will cause -// a write to BytesPipe to block when allocating a new slice. -const blockThreshold = 1e6 - -var ( - // ErrClosed is returned when Write is called on a closed BytesPipe. - ErrClosed = errors.New("write to closed BytesPipe") - - bufPools = make(map[int]*sync.Pool) - bufPoolsLock sync.Mutex -) - -// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue). -// All written data may be read at most once. Also, BytesPipe allocates -// and releases new byte slices to adjust to current needs, so the buffer -// won't be overgrown after peak loads. -type BytesPipe struct { - mu sync.Mutex - wait *sync.Cond - buf []*fixedBuffer - bufLen int - closeErr error // error to return from next Read. set to nil if not closed. -} - -// NewBytesPipe creates new BytesPipe, initialized by specified slice. -// If buf is nil, then it will be initialized with slice which cap is 64. -// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf). -func NewBytesPipe() *BytesPipe { - bp := &BytesPipe{} - bp.buf = append(bp.buf, getBuffer(minCap)) - bp.wait = sync.NewCond(&bp.mu) - return bp -} - -// Write writes p to BytesPipe. -// It can allocate new []byte slices in a process of writing. -func (bp *BytesPipe) Write(p []byte) (int, error) { - bp.mu.Lock() - - written := 0 -loop0: - for { - if bp.closeErr != nil { - bp.mu.Unlock() - return written, ErrClosed - } - - if len(bp.buf) == 0 { - bp.buf = append(bp.buf, getBuffer(64)) - } - // get the last buffer - b := bp.buf[len(bp.buf)-1] - - n, err := b.Write(p) - written += n - bp.bufLen += n - - // errBufferFull is an error we expect to get if the buffer is full - if err != nil && err != errBufferFull { - bp.wait.Broadcast() - bp.mu.Unlock() - return written, err - } - - // if there was enough room to write all then break - if len(p) == n { - break - } - - // more data: write to the next slice - p = p[n:] - - // make sure the buffer doesn't grow too big from this write - for bp.bufLen >= blockThreshold { - bp.wait.Wait() - if bp.closeErr != nil { - continue loop0 - } - } - - // add new byte slice to the buffers slice and continue writing - nextCap := b.Cap() * 2 - if nextCap > maxCap { - nextCap = maxCap - } - bp.buf = append(bp.buf, getBuffer(nextCap)) - } - bp.wait.Broadcast() - bp.mu.Unlock() - return written, nil -} - -// CloseWithError causes further reads from a BytesPipe to return immediately. -func (bp *BytesPipe) CloseWithError(err error) error { - bp.mu.Lock() - if err != nil { - bp.closeErr = err - } else { - bp.closeErr = io.EOF - } - bp.wait.Broadcast() - bp.mu.Unlock() - return nil -} - -// Close causes further reads from a BytesPipe to return immediately. -func (bp *BytesPipe) Close() error { - return bp.CloseWithError(nil) -} - -// Read reads bytes from BytesPipe. -// Data could be read only once. -func (bp *BytesPipe) Read(p []byte) (n int, err error) { - bp.mu.Lock() - if bp.bufLen == 0 { - if bp.closeErr != nil { - err := bp.closeErr - bp.mu.Unlock() - return 0, err - } - bp.wait.Wait() - if bp.bufLen == 0 && bp.closeErr != nil { - err := bp.closeErr - bp.mu.Unlock() - return 0, err - } - } - - for bp.bufLen > 0 { - b := bp.buf[0] - read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error - n += read - bp.bufLen -= read - - if b.Len() == 0 { - // it's empty so return it to the pool and move to the next one - returnBuffer(b) - bp.buf[0] = nil - bp.buf = bp.buf[1:] - } - - if len(p) == read { - break - } - - p = p[read:] - } - - bp.wait.Broadcast() - bp.mu.Unlock() - return -} - -func returnBuffer(b *fixedBuffer) { - b.Reset() - bufPoolsLock.Lock() - pool := bufPools[b.Cap()] - bufPoolsLock.Unlock() - if pool != nil { - pool.Put(b) - } -} - -func getBuffer(size int) *fixedBuffer { - bufPoolsLock.Lock() - pool, ok := bufPools[size] - if !ok { - pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }} - bufPools[size] = pool - } - bufPoolsLock.Unlock() - return pool.Get().(*fixedBuffer) -} diff --git a/ui/wasm/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go b/ui/wasm/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go deleted file mode 100644 index 534d66ac268..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go +++ /dev/null @@ -1,162 +0,0 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import ( - "io" - "io/ioutil" - "os" - "path/filepath" -) - -// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a -// temporary file and closing it atomically changes the temporary file to -// destination path. Writing and closing concurrently is not allowed. -func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) { - f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) - if err != nil { - return nil, err - } - - abspath, err := filepath.Abs(filename) - if err != nil { - return nil, err - } - return &atomicFileWriter{ - f: f, - fn: abspath, - perm: perm, - }, nil -} - -// AtomicWriteFile atomically writes data to a file named by filename. -func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error { - f, err := NewAtomicFileWriter(filename, perm) - if err != nil { - return err - } - n, err := f.Write(data) - if err == nil && n < len(data) { - err = io.ErrShortWrite - f.(*atomicFileWriter).writeErr = err - } - if err1 := f.Close(); err == nil { - err = err1 - } - return err -} - -type atomicFileWriter struct { - f *os.File - fn string - writeErr error - perm os.FileMode -} - -func (w *atomicFileWriter) Write(dt []byte) (int, error) { - n, err := w.f.Write(dt) - if err != nil { - w.writeErr = err - } - return n, err -} - -func (w *atomicFileWriter) Close() (retErr error) { - defer func() { - if retErr != nil || w.writeErr != nil { - os.Remove(w.f.Name()) - } - }() - if err := w.f.Sync(); err != nil { - w.f.Close() - return err - } - if err := w.f.Close(); err != nil { - return err - } - if err := os.Chmod(w.f.Name(), w.perm); err != nil { - return err - } - if w.writeErr == nil { - return os.Rename(w.f.Name(), w.fn) - } - return nil -} - -// AtomicWriteSet is used to atomically write a set -// of files and ensure they are visible at the same time. -// Must be committed to a new directory. -type AtomicWriteSet struct { - root string -} - -// NewAtomicWriteSet creates a new atomic write set to -// atomically create a set of files. The given directory -// is used as the base directory for storing files before -// commit. If no temporary directory is given the system -// default is used. -func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) { - td, err := ioutil.TempDir(tmpDir, "write-set-") - if err != nil { - return nil, err - } - - return &AtomicWriteSet{ - root: td, - }, nil -} - -// WriteFile writes a file to the set, guaranteeing the file -// has been synced. -func (ws *AtomicWriteSet) WriteFile(filename string, data []byte, perm os.FileMode) error { - f, err := ws.FileWriter(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) - if err != nil { - return err - } - n, err := f.Write(data) - if err == nil && n < len(data) { - err = io.ErrShortWrite - } - if err1 := f.Close(); err == nil { - err = err1 - } - return err -} - -type syncFileCloser struct { - *os.File -} - -func (w syncFileCloser) Close() error { - err := w.File.Sync() - if err1 := w.File.Close(); err == nil { - err = err1 - } - return err -} - -// FileWriter opens a file writer inside the set. The file -// should be synced and closed before calling commit. -func (ws *AtomicWriteSet) FileWriter(name string, flag int, perm os.FileMode) (io.WriteCloser, error) { - f, err := os.OpenFile(filepath.Join(ws.root, name), flag, perm) - if err != nil { - return nil, err - } - return syncFileCloser{f}, nil -} - -// Cancel cancels the set and removes all temporary data -// created in the set. -func (ws *AtomicWriteSet) Cancel() error { - return os.RemoveAll(ws.root) -} - -// Commit moves all created files to the target directory. The -// target directory must not exist and the parent of the target -// directory must exist. -func (ws *AtomicWriteSet) Commit(target string) error { - return os.Rename(ws.root, target) -} - -// String returns the location the set is writing to. -func (ws *AtomicWriteSet) String() string { - return ws.root -} diff --git a/ui/wasm/vendor/github.com/docker/docker/pkg/ioutils/readers.go b/ui/wasm/vendor/github.com/docker/docker/pkg/ioutils/readers.go deleted file mode 100644 index 1f657bd3dca..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/pkg/ioutils/readers.go +++ /dev/null @@ -1,157 +0,0 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import ( - "context" - "crypto/sha256" - "encoding/hex" - "io" -) - -// ReadCloserWrapper wraps an io.Reader, and implements an io.ReadCloser -// It calls the given callback function when closed. It should be constructed -// with NewReadCloserWrapper -type ReadCloserWrapper struct { - io.Reader - closer func() error -} - -// Close calls back the passed closer function -func (r *ReadCloserWrapper) Close() error { - return r.closer() -} - -// NewReadCloserWrapper returns a new io.ReadCloser. -func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { - return &ReadCloserWrapper{ - Reader: r, - closer: closer, - } -} - -type readerErrWrapper struct { - reader io.Reader - closer func() -} - -func (r *readerErrWrapper) Read(p []byte) (int, error) { - n, err := r.reader.Read(p) - if err != nil { - r.closer() - } - return n, err -} - -// NewReaderErrWrapper returns a new io.Reader. -func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { - return &readerErrWrapper{ - reader: r, - closer: closer, - } -} - -// HashData returns the sha256 sum of src. -func HashData(src io.Reader) (string, error) { - h := sha256.New() - if _, err := io.Copy(h, src); err != nil { - return "", err - } - return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil -} - -// OnEOFReader wraps an io.ReadCloser and a function -// the function will run at the end of file or close the file. -type OnEOFReader struct { - Rc io.ReadCloser - Fn func() -} - -func (r *OnEOFReader) Read(p []byte) (n int, err error) { - n, err = r.Rc.Read(p) - if err == io.EOF { - r.runFunc() - } - return -} - -// Close closes the file and run the function. -func (r *OnEOFReader) Close() error { - err := r.Rc.Close() - r.runFunc() - return err -} - -func (r *OnEOFReader) runFunc() { - if fn := r.Fn; fn != nil { - fn() - r.Fn = nil - } -} - -// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read -// operations. -type cancelReadCloser struct { - cancel func() - pR *io.PipeReader // Stream to read from - pW *io.PipeWriter -} - -// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the -// context is cancelled. The returned io.ReadCloser must be closed when it is -// no longer needed. -func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser { - pR, pW := io.Pipe() - - // Create a context used to signal when the pipe is closed - doneCtx, cancel := context.WithCancel(context.Background()) - - p := &cancelReadCloser{ - cancel: cancel, - pR: pR, - pW: pW, - } - - go func() { - _, err := io.Copy(pW, in) - select { - case <-ctx.Done(): - // If the context was closed, p.closeWithError - // was already called. Calling it again would - // change the error that Read returns. - default: - p.closeWithError(err) - } - in.Close() - }() - go func() { - for { - select { - case <-ctx.Done(): - p.closeWithError(ctx.Err()) - case <-doneCtx.Done(): - return - } - } - }() - - return p -} - -// Read wraps the Read method of the pipe that provides data from the wrapped -// ReadCloser. -func (p *cancelReadCloser) Read(buf []byte) (n int, err error) { - return p.pR.Read(buf) -} - -// closeWithError closes the wrapper and its underlying reader. It will -// cause future calls to Read to return err. -func (p *cancelReadCloser) closeWithError(err error) { - p.pW.CloseWithError(err) - p.cancel() -} - -// Close closes the wrapper its underlying reader. It will cause -// future calls to Read to return io.EOF. -func (p *cancelReadCloser) Close() error { - p.closeWithError(io.EOF) - return nil -} diff --git a/ui/wasm/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go b/ui/wasm/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go deleted file mode 100644 index dc894f91314..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !windows - -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import "io/ioutil" - -// TempDir on Unix systems is equivalent to ioutil.TempDir. -func TempDir(dir, prefix string) (string, error) { - return ioutil.TempDir(dir, prefix) -} diff --git a/ui/wasm/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go b/ui/wasm/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go deleted file mode 100644 index ecaba2e36d2..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go +++ /dev/null @@ -1,16 +0,0 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import ( - "io/ioutil" - - "github.com/docker/docker/pkg/longpath" -) - -// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format. -func TempDir(dir, prefix string) (string, error) { - tempDir, err := ioutil.TempDir(dir, prefix) - if err != nil { - return "", err - } - return longpath.AddPrefix(tempDir), nil -} diff --git a/ui/wasm/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go b/ui/wasm/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go deleted file mode 100644 index 91b8d182662..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go +++ /dev/null @@ -1,92 +0,0 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import ( - "io" - "sync" -) - -// WriteFlusher wraps the Write and Flush operation ensuring that every write -// is a flush. In addition, the Close method can be called to intercept -// Read/Write calls if the targets lifecycle has already ended. -type WriteFlusher struct { - w io.Writer - flusher flusher - flushed chan struct{} - flushedOnce sync.Once - closed chan struct{} - closeLock sync.Mutex -} - -type flusher interface { - Flush() -} - -var errWriteFlusherClosed = io.EOF - -func (wf *WriteFlusher) Write(b []byte) (n int, err error) { - select { - case <-wf.closed: - return 0, errWriteFlusherClosed - default: - } - - n, err = wf.w.Write(b) - wf.Flush() // every write is a flush. - return n, err -} - -// Flush the stream immediately. -func (wf *WriteFlusher) Flush() { - select { - case <-wf.closed: - return - default: - } - - wf.flushedOnce.Do(func() { - close(wf.flushed) - }) - wf.flusher.Flush() -} - -// Flushed returns the state of flushed. -// If it's flushed, return true, or else it return false. -func (wf *WriteFlusher) Flushed() bool { - // BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to - // be used to detect whether or a response code has been issued or not. - // Another hook should be used instead. - var flushed bool - select { - case <-wf.flushed: - flushed = true - default: - } - return flushed -} - -// Close closes the write flusher, disallowing any further writes to the -// target. After the flusher is closed, all calls to write or flush will -// result in an error. -func (wf *WriteFlusher) Close() error { - wf.closeLock.Lock() - defer wf.closeLock.Unlock() - - select { - case <-wf.closed: - return errWriteFlusherClosed - default: - close(wf.closed) - } - return nil -} - -// NewWriteFlusher returns a new WriteFlusher. -func NewWriteFlusher(w io.Writer) *WriteFlusher { - var fl flusher - if f, ok := w.(flusher); ok { - fl = f - } else { - fl = &NopFlusher{} - } - return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})} -} diff --git a/ui/wasm/vendor/github.com/docker/docker/pkg/ioutils/writers.go b/ui/wasm/vendor/github.com/docker/docker/pkg/ioutils/writers.go deleted file mode 100644 index 61c679497da..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/pkg/ioutils/writers.go +++ /dev/null @@ -1,66 +0,0 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import "io" - -// NopWriter represents a type which write operation is nop. -type NopWriter struct{} - -func (*NopWriter) Write(buf []byte) (int, error) { - return len(buf), nil -} - -type nopWriteCloser struct { - io.Writer -} - -func (w *nopWriteCloser) Close() error { return nil } - -// NopWriteCloser returns a nopWriteCloser. -func NopWriteCloser(w io.Writer) io.WriteCloser { - return &nopWriteCloser{w} -} - -// NopFlusher represents a type which flush operation is nop. -type NopFlusher struct{} - -// Flush is a nop operation. -func (f *NopFlusher) Flush() {} - -type writeCloserWrapper struct { - io.Writer - closer func() error -} - -func (r *writeCloserWrapper) Close() error { - return r.closer() -} - -// NewWriteCloserWrapper returns a new io.WriteCloser. -func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { - return &writeCloserWrapper{ - Writer: r, - closer: closer, - } -} - -// WriteCounter wraps a concrete io.Writer and hold a count of the number -// of bytes written to the writer during a "session". -// This can be convenient when write return is masked -// (e.g., json.Encoder.Encode()) -type WriteCounter struct { - Count int64 - Writer io.Writer -} - -// NewWriteCounter returns a new WriteCounter. -func NewWriteCounter(w io.Writer) *WriteCounter { - return &WriteCounter{ - Writer: w, - } -} - -func (wc *WriteCounter) Write(p []byte) (count int, err error) { - count, err = wc.Writer.Write(p) - wc.Count += int64(count) - return -} diff --git a/ui/wasm/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go b/ui/wasm/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go deleted file mode 100644 index cf8d04b1b20..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go +++ /dev/null @@ -1,283 +0,0 @@ -package jsonmessage // import "github.com/docker/docker/pkg/jsonmessage" - -import ( - "encoding/json" - "fmt" - "io" - "strings" - "time" - - units "github.com/docker/go-units" - "github.com/moby/term" - "github.com/morikuni/aec" -) - -// RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to -// ensure the formatted time isalways the same number of characters. -const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" - -// JSONError wraps a concrete Code and Message, `Code` is -// is an integer error code, `Message` is the error message. -type JSONError struct { - Code int `json:"code,omitempty"` - Message string `json:"message,omitempty"` -} - -func (e *JSONError) Error() string { - return e.Message -} - -// JSONProgress describes a Progress. terminalFd is the fd of the current terminal, -// Start is the initial value for the operation. Current is the current status and -// value of the progress made towards Total. Total is the end value describing when -// we made 100% progress for an operation. -type JSONProgress struct { - terminalFd uintptr - Current int64 `json:"current,omitempty"` - Total int64 `json:"total,omitempty"` - Start int64 `json:"start,omitempty"` - // If true, don't show xB/yB - HideCounts bool `json:"hidecounts,omitempty"` - Units string `json:"units,omitempty"` - nowFunc func() time.Time - winSize int -} - -func (p *JSONProgress) String() string { - var ( - width = p.width() - pbBox string - numbersBox string - timeLeftBox string - ) - if p.Current <= 0 && p.Total <= 0 { - return "" - } - if p.Total <= 0 { - switch p.Units { - case "": - current := units.HumanSize(float64(p.Current)) - return fmt.Sprintf("%8v", current) - default: - return fmt.Sprintf("%d %s", p.Current, p.Units) - } - } - - percentage := int(float64(p.Current)/float64(p.Total)*100) / 2 - if percentage > 50 { - percentage = 50 - } - if width > 110 { - // this number can't be negative gh#7136 - numSpaces := 0 - if 50-percentage > 0 { - numSpaces = 50 - percentage - } - pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces)) - } - - switch { - case p.HideCounts: - case p.Units == "": // no units, use bytes - current := units.HumanSize(float64(p.Current)) - total := units.HumanSize(float64(p.Total)) - - numbersBox = fmt.Sprintf("%8v/%v", current, total) - - if p.Current > p.Total { - // remove total display if the reported current is wonky. - numbersBox = fmt.Sprintf("%8v", current) - } - default: - numbersBox = fmt.Sprintf("%d/%d %s", p.Current, p.Total, p.Units) - - if p.Current > p.Total { - // remove total display if the reported current is wonky. - numbersBox = fmt.Sprintf("%d %s", p.Current, p.Units) - } - } - - if p.Current > 0 && p.Start > 0 && percentage < 50 { - fromStart := p.now().Sub(time.Unix(p.Start, 0)) - perEntry := fromStart / time.Duration(p.Current) - left := time.Duration(p.Total-p.Current) * perEntry - left = (left / time.Second) * time.Second - - if width > 50 { - timeLeftBox = " " + left.String() - } - } - return pbBox + numbersBox + timeLeftBox -} - -// shim for testing -func (p *JSONProgress) now() time.Time { - if p.nowFunc == nil { - p.nowFunc = func() time.Time { - return time.Now().UTC() - } - } - return p.nowFunc() -} - -// shim for testing -func (p *JSONProgress) width() int { - if p.winSize != 0 { - return p.winSize - } - ws, err := term.GetWinsize(p.terminalFd) - if err == nil { - return int(ws.Width) - } - return 200 -} - -// JSONMessage defines a message struct. It describes -// the created time, where it from, status, ID of the -// message. It's used for docker events. -type JSONMessage struct { - Stream string `json:"stream,omitempty"` - Status string `json:"status,omitempty"` - Progress *JSONProgress `json:"progressDetail,omitempty"` - ProgressMessage string `json:"progress,omitempty"` // deprecated - ID string `json:"id,omitempty"` - From string `json:"from,omitempty"` - Time int64 `json:"time,omitempty"` - TimeNano int64 `json:"timeNano,omitempty"` - Error *JSONError `json:"errorDetail,omitempty"` - ErrorMessage string `json:"error,omitempty"` // deprecated - // Aux contains out-of-band data, such as digests for push signing and image id after building. - Aux *json.RawMessage `json:"aux,omitempty"` -} - -func clearLine(out io.Writer) { - eraseMode := aec.EraseModes.All - cl := aec.EraseLine(eraseMode) - fmt.Fprint(out, cl) -} - -func cursorUp(out io.Writer, l uint) { - fmt.Fprint(out, aec.Up(l)) -} - -func cursorDown(out io.Writer, l uint) { - fmt.Fprint(out, aec.Down(l)) -} - -// Display displays the JSONMessage to `out`. If `isTerminal` is true, it will erase the -// entire current line when displaying the progressbar. -func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { - if jm.Error != nil { - if jm.Error.Code == 401 { - return fmt.Errorf("authentication is required") - } - return jm.Error - } - var endl string - if isTerminal && jm.Stream == "" && jm.Progress != nil { - clearLine(out) - endl = "\r" - fmt.Fprint(out, endl) - } else if jm.Progress != nil && jm.Progress.String() != "" { // disable progressbar in non-terminal - return nil - } - if jm.TimeNano != 0 { - fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(RFC3339NanoFixed)) - } else if jm.Time != 0 { - fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(RFC3339NanoFixed)) - } - if jm.ID != "" { - fmt.Fprintf(out, "%s: ", jm.ID) - } - if jm.From != "" { - fmt.Fprintf(out, "(from %s) ", jm.From) - } - if jm.Progress != nil && isTerminal { - fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl) - } else if jm.ProgressMessage != "" { // deprecated - fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl) - } else if jm.Stream != "" { - fmt.Fprintf(out, "%s%s", jm.Stream, endl) - } else { - fmt.Fprintf(out, "%s%s\n", jm.Status, endl) - } - return nil -} - -// DisplayJSONMessagesStream displays a json message stream from `in` to `out`, `isTerminal` -// describes if `out` is a terminal. If this is the case, it will print `\n` at the end of -// each line and move the cursor while displaying. -func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(JSONMessage)) error { - var ( - dec = json.NewDecoder(in) - ids = make(map[string]uint) - ) - - for { - var diff uint - var jm JSONMessage - if err := dec.Decode(&jm); err != nil { - if err == io.EOF { - break - } - return err - } - - if jm.Aux != nil { - if auxCallback != nil { - auxCallback(jm) - } - continue - } - - if jm.Progress != nil { - jm.Progress.terminalFd = terminalFd - } - if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") { - line, ok := ids[jm.ID] - if !ok { - // NOTE: This approach of using len(id) to - // figure out the number of lines of history - // only works as long as we clear the history - // when we output something that's not - // accounted for in the map, such as a line - // with no ID. - line = uint(len(ids)) - ids[jm.ID] = line - if isTerminal { - fmt.Fprintf(out, "\n") - } - } - diff = uint(len(ids)) - line - if isTerminal { - cursorUp(out, diff) - } - } else { - // When outputting something that isn't progress - // output, clear the history of previous lines. We - // don't want progress entries from some previous - // operation to be updated (for example, pull -a - // with multiple tags). - ids = make(map[string]uint) - } - err := jm.Display(out, isTerminal) - if jm.ID != "" && isTerminal { - cursorDown(out, diff) - } - if err != nil { - return err - } - } - return nil -} - -type stream interface { - io.Writer - FD() uintptr - IsTerminal() bool -} - -// DisplayJSONMessagesToStream prints json messages to the output stream -func DisplayJSONMessagesToStream(in io.Reader, stream stream, auxCallback func(JSONMessage)) error { - return DisplayJSONMessagesStream(in, stream, stream.FD(), stream.IsTerminal(), auxCallback) -} diff --git a/ui/wasm/vendor/github.com/docker/docker/pkg/longpath/longpath.go b/ui/wasm/vendor/github.com/docker/docker/pkg/longpath/longpath.go deleted file mode 100644 index 4177affba24..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/pkg/longpath/longpath.go +++ /dev/null @@ -1,26 +0,0 @@ -// longpath introduces some constants and helper functions for handling long paths -// in Windows, which are expected to be prepended with `\\?\` and followed by either -// a drive letter, a UNC server\share, or a volume identifier. - -package longpath // import "github.com/docker/docker/pkg/longpath" - -import ( - "strings" -) - -// Prefix is the longpath prefix for Windows file paths. -const Prefix = `\\?\` - -// AddPrefix will add the Windows long path prefix to the path provided if -// it does not already have it. -func AddPrefix(path string) string { - if !strings.HasPrefix(path, Prefix) { - if strings.HasPrefix(path, `\\`) { - // This is a UNC path, so we need to add 'UNC' to the path as well. - path = Prefix + `UNC` + path[1:] - } else { - path = Prefix + path - } - } - return path -} diff --git a/ui/wasm/vendor/github.com/docker/docker/pkg/stringid/README.md b/ui/wasm/vendor/github.com/docker/docker/pkg/stringid/README.md deleted file mode 100644 index 37a5098fd98..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/pkg/stringid/README.md +++ /dev/null @@ -1 +0,0 @@ -This package provides helper functions for dealing with string identifiers diff --git a/ui/wasm/vendor/github.com/docker/docker/pkg/stringid/stringid.go b/ui/wasm/vendor/github.com/docker/docker/pkg/stringid/stringid.go deleted file mode 100644 index 5fe071d6284..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/pkg/stringid/stringid.go +++ /dev/null @@ -1,63 +0,0 @@ -// Package stringid provides helper functions for dealing with string identifiers -package stringid // import "github.com/docker/docker/pkg/stringid" - -import ( - "crypto/rand" - "encoding/hex" - "fmt" - "regexp" - "strconv" - "strings" -) - -const shortLen = 12 - -var ( - validShortID = regexp.MustCompile("^[a-f0-9]{12}$") - validHex = regexp.MustCompile(`^[a-f0-9]{64}$`) -) - -// IsShortID determines if an arbitrary string *looks like* a short ID. -func IsShortID(id string) bool { - return validShortID.MatchString(id) -} - -// TruncateID returns a shorthand version of a string identifier for convenience. -// A collision with other shorthands is very unlikely, but possible. -// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller -// will need to use a longer prefix, or the full-length Id. -func TruncateID(id string) string { - if i := strings.IndexRune(id, ':'); i >= 0 { - id = id[i+1:] - } - if len(id) > shortLen { - id = id[:shortLen] - } - return id -} - -// GenerateRandomID returns a unique id. -func GenerateRandomID() string { - b := make([]byte, 32) - for { - if _, err := rand.Read(b); err != nil { - panic(err) // This shouldn't happen - } - id := hex.EncodeToString(b) - // if we try to parse the truncated for as an int and we don't have - // an error then the value is all numeric and causes issues when - // used as a hostname. ref #3869 - if _, err := strconv.ParseInt(TruncateID(id), 10, 64); err == nil { - continue - } - return id - } -} - -// ValidateID checks whether an ID string is a valid image ID. -func ValidateID(id string) error { - if ok := validHex.MatchString(id); !ok { - return fmt.Errorf("image ID %q is invalid", id) - } - return nil -} diff --git a/ui/wasm/vendor/github.com/docker/docker/registry/auth.go b/ui/wasm/vendor/github.com/docker/docker/registry/auth.go deleted file mode 100644 index 2d0ecde2d47..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/registry/auth.go +++ /dev/null @@ -1,247 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import ( - "net/http" - "net/url" - "strings" - "time" - - "github.com/docker/distribution/registry/client/auth" - "github.com/docker/distribution/registry/client/auth/challenge" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/docker/api/types" - registrytypes "github.com/docker/docker/api/types/registry" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -const ( - // AuthClientID is used the ClientID used for the token server - AuthClientID = "docker" -) - -type loginCredentialStore struct { - authConfig *types.AuthConfig -} - -func (lcs loginCredentialStore) Basic(*url.URL) (string, string) { - return lcs.authConfig.Username, lcs.authConfig.Password -} - -func (lcs loginCredentialStore) RefreshToken(*url.URL, string) string { - return lcs.authConfig.IdentityToken -} - -func (lcs loginCredentialStore) SetRefreshToken(u *url.URL, service, token string) { - lcs.authConfig.IdentityToken = token -} - -type staticCredentialStore struct { - auth *types.AuthConfig -} - -// NewStaticCredentialStore returns a credential store -// which always returns the same credential values. -func NewStaticCredentialStore(auth *types.AuthConfig) auth.CredentialStore { - return staticCredentialStore{ - auth: auth, - } -} - -func (scs staticCredentialStore) Basic(*url.URL) (string, string) { - if scs.auth == nil { - return "", "" - } - return scs.auth.Username, scs.auth.Password -} - -func (scs staticCredentialStore) RefreshToken(*url.URL, string) string { - if scs.auth == nil { - return "" - } - return scs.auth.IdentityToken -} - -func (scs staticCredentialStore) SetRefreshToken(*url.URL, string, string) { -} - -type fallbackError struct { - err error -} - -func (err fallbackError) Error() string { - return err.err.Error() -} - -// loginV2 tries to login to the v2 registry server. The given registry -// endpoint will be pinged to get authorization challenges. These challenges -// will be used to authenticate against the registry to validate credentials. -func loginV2(authConfig *types.AuthConfig, endpoint APIEndpoint, userAgent string) (string, string, error) { - var ( - endpointStr = strings.TrimRight(endpoint.URL.String(), "/") + "/v2/" - modifiers = Headers(userAgent, nil) - authTransport = transport.NewTransport(NewTransport(endpoint.TLSConfig), modifiers...) - credentialAuthConfig = *authConfig - creds = loginCredentialStore{authConfig: &credentialAuthConfig} - ) - - logrus.Debugf("attempting v2 login to registry endpoint %s", endpointStr) - - loginClient, foundV2, err := v2AuthHTTPClient(endpoint.URL, authTransport, modifiers, creds, nil) - if err != nil { - return "", "", err - } - - req, err := http.NewRequest(http.MethodGet, endpointStr, nil) - if err != nil { - if !foundV2 { - err = fallbackError{err: err} - } - return "", "", err - } - - resp, err := loginClient.Do(req) - if err != nil { - err = translateV2AuthError(err) - if !foundV2 { - err = fallbackError{err: err} - } - - return "", "", err - } - defer resp.Body.Close() - - if resp.StatusCode == http.StatusOK { - return "Login Succeeded", credentialAuthConfig.IdentityToken, nil - } - - // TODO(dmcgowan): Attempt to further interpret result, status code and error code string - err = errors.Errorf("login attempt to %s failed with status: %d %s", endpointStr, resp.StatusCode, http.StatusText(resp.StatusCode)) - if !foundV2 { - err = fallbackError{err: err} - } - return "", "", err -} - -func v2AuthHTTPClient(endpoint *url.URL, authTransport http.RoundTripper, modifiers []transport.RequestModifier, creds auth.CredentialStore, scopes []auth.Scope) (*http.Client, bool, error) { - challengeManager, foundV2, err := PingV2Registry(endpoint, authTransport) - if err != nil { - if !foundV2 { - err = fallbackError{err: err} - } - return nil, foundV2, err - } - - tokenHandlerOptions := auth.TokenHandlerOptions{ - Transport: authTransport, - Credentials: creds, - OfflineAccess: true, - ClientID: AuthClientID, - Scopes: scopes, - } - tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions) - basicHandler := auth.NewBasicHandler(creds) - modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)) - tr := transport.NewTransport(authTransport, modifiers...) - - return &http.Client{ - Transport: tr, - Timeout: 15 * time.Second, - }, foundV2, nil - -} - -// ConvertToHostname converts a registry url which has http|https prepended -// to just an hostname. -func ConvertToHostname(url string) string { - stripped := url - if strings.HasPrefix(url, "http://") { - stripped = strings.TrimPrefix(url, "http://") - } else if strings.HasPrefix(url, "https://") { - stripped = strings.TrimPrefix(url, "https://") - } - - nameParts := strings.SplitN(stripped, "/", 2) - - return nameParts[0] -} - -// ResolveAuthConfig matches an auth configuration to a server address or a URL -func ResolveAuthConfig(authConfigs map[string]types.AuthConfig, index *registrytypes.IndexInfo) types.AuthConfig { - configKey := GetAuthConfigKey(index) - // First try the happy case - if c, found := authConfigs[configKey]; found || index.Official { - return c - } - - // Maybe they have a legacy config file, we will iterate the keys converting - // them to the new format and testing - for registry, ac := range authConfigs { - if configKey == ConvertToHostname(registry) { - return ac - } - } - - // When all else fails, return an empty auth config - return types.AuthConfig{} -} - -// PingResponseError is used when the response from a ping -// was received but invalid. -type PingResponseError struct { - Err error -} - -func (err PingResponseError) Error() string { - return err.Err.Error() -} - -// PingV2Registry attempts to ping a v2 registry and on success return a -// challenge manager for the supported authentication types and -// whether v2 was confirmed by the response. If a response is received but -// cannot be interpreted a PingResponseError will be returned. -func PingV2Registry(endpoint *url.URL, transport http.RoundTripper) (challenge.Manager, bool, error) { - var ( - foundV2 = false - v2Version = auth.APIVersion{ - Type: "registry", - Version: "2.0", - } - ) - - pingClient := &http.Client{ - Transport: transport, - Timeout: 15 * time.Second, - } - endpointStr := strings.TrimRight(endpoint.String(), "/") + "/v2/" - req, err := http.NewRequest(http.MethodGet, endpointStr, nil) - if err != nil { - return nil, false, err - } - resp, err := pingClient.Do(req) - if err != nil { - return nil, false, err - } - defer resp.Body.Close() - - versions := auth.APIVersions(resp, DefaultRegistryVersionHeader) - for _, pingVersion := range versions { - if pingVersion == v2Version { - // The version header indicates we're definitely - // talking to a v2 registry. So don't allow future - // fallbacks to the v1 protocol. - - foundV2 = true - break - } - } - - challengeManager := challenge.NewSimpleManager() - if err := challengeManager.AddResponse(resp); err != nil { - return nil, foundV2, PingResponseError{ - Err: err, - } - } - - return challengeManager, foundV2, nil -} diff --git a/ui/wasm/vendor/github.com/docker/docker/registry/config.go b/ui/wasm/vendor/github.com/docker/docker/registry/config.go deleted file mode 100644 index 54b83fa40aa..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/registry/config.go +++ /dev/null @@ -1,433 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import ( - "fmt" - "net" - "net/url" - "regexp" - "strconv" - "strings" - - "github.com/docker/distribution/reference" - registrytypes "github.com/docker/docker/api/types/registry" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// ServiceOptions holds command line options. -type ServiceOptions struct { - AllowNondistributableArtifacts []string `json:"allow-nondistributable-artifacts,omitempty"` - Mirrors []string `json:"registry-mirrors,omitempty"` - InsecureRegistries []string `json:"insecure-registries,omitempty"` -} - -// serviceConfig holds daemon configuration for the registry service. -type serviceConfig struct { - registrytypes.ServiceConfig -} - -const ( - // DefaultNamespace is the default namespace - DefaultNamespace = "docker.io" - // DefaultRegistryVersionHeader is the name of the default HTTP header - // that carries Registry version info - DefaultRegistryVersionHeader = "Docker-Distribution-Api-Version" - - // IndexHostname is the index hostname - IndexHostname = "index.docker.io" - // IndexServer is used for user auth and image search - IndexServer = "https://" + IndexHostname + "/v1/" - // IndexName is the name of the index - IndexName = "docker.io" -) - -var ( - // DefaultV2Registry is the URI of the default v2 registry - DefaultV2Registry = &url.URL{ - Scheme: "https", - Host: "registry-1.docker.io", - } - - // ErrInvalidRepositoryName is an error returned if the repository name did - // not have the correct form - ErrInvalidRepositoryName = errors.New("Invalid repository name (ex: \"registry.domain.tld/myrepos\")") - - emptyServiceConfig, _ = newServiceConfig(ServiceOptions{}) - validHostPortRegex = regexp.MustCompile(`^` + reference.DomainRegexp.String() + `$`) - - // for mocking in unit tests - lookupIP = net.LookupIP -) - -// newServiceConfig returns a new instance of ServiceConfig -func newServiceConfig(options ServiceOptions) (*serviceConfig, error) { - config := &serviceConfig{ - ServiceConfig: registrytypes.ServiceConfig{ - InsecureRegistryCIDRs: make([]*registrytypes.NetIPNet, 0), - IndexConfigs: make(map[string]*registrytypes.IndexInfo), - // Hack: Bypass setting the mirrors to IndexConfigs since they are going away - // and Mirrors are only for the official registry anyways. - }, - } - if err := config.LoadAllowNondistributableArtifacts(options.AllowNondistributableArtifacts); err != nil { - return nil, err - } - if err := config.LoadMirrors(options.Mirrors); err != nil { - return nil, err - } - if err := config.LoadInsecureRegistries(options.InsecureRegistries); err != nil { - return nil, err - } - - return config, nil -} - -// LoadAllowNondistributableArtifacts loads allow-nondistributable-artifacts registries into config. -func (config *serviceConfig) LoadAllowNondistributableArtifacts(registries []string) error { - cidrs := map[string]*registrytypes.NetIPNet{} - hostnames := map[string]bool{} - - for _, r := range registries { - if _, err := ValidateIndexName(r); err != nil { - return err - } - if validateNoScheme(r) != nil { - return fmt.Errorf("allow-nondistributable-artifacts registry %s should not contain '://'", r) - } - - if _, ipnet, err := net.ParseCIDR(r); err == nil { - // Valid CIDR. - cidrs[ipnet.String()] = (*registrytypes.NetIPNet)(ipnet) - } else if err := validateHostPort(r); err == nil { - // Must be `host:port` if not CIDR. - hostnames[r] = true - } else { - return fmt.Errorf("allow-nondistributable-artifacts registry %s is not valid: %v", r, err) - } - } - - config.AllowNondistributableArtifactsCIDRs = make([]*(registrytypes.NetIPNet), 0) - for _, c := range cidrs { - config.AllowNondistributableArtifactsCIDRs = append(config.AllowNondistributableArtifactsCIDRs, c) - } - - config.AllowNondistributableArtifactsHostnames = make([]string, 0) - for h := range hostnames { - config.AllowNondistributableArtifactsHostnames = append(config.AllowNondistributableArtifactsHostnames, h) - } - - return nil -} - -// LoadMirrors loads mirrors to config, after removing duplicates. -// Returns an error if mirrors contains an invalid mirror. -func (config *serviceConfig) LoadMirrors(mirrors []string) error { - mMap := map[string]struct{}{} - unique := []string{} - - for _, mirror := range mirrors { - m, err := ValidateMirror(mirror) - if err != nil { - return err - } - if _, exist := mMap[m]; !exist { - mMap[m] = struct{}{} - unique = append(unique, m) - } - } - - config.Mirrors = unique - - // Configure public registry since mirrors may have changed. - config.IndexConfigs[IndexName] = ®istrytypes.IndexInfo{ - Name: IndexName, - Mirrors: config.Mirrors, - Secure: true, - Official: true, - } - - return nil -} - -// LoadInsecureRegistries loads insecure registries to config -func (config *serviceConfig) LoadInsecureRegistries(registries []string) error { - // Localhost is by default considered as an insecure registry - // This is a stop-gap for people who are running a private registry on localhost (especially on Boot2docker). - // - // TODO: should we deprecate this once it is easier for people to set up a TLS registry or change - // daemon flags on boot2docker? - registries = append(registries, "127.0.0.0/8") - - // Store original InsecureRegistryCIDRs and IndexConfigs - // Clean InsecureRegistryCIDRs and IndexConfigs in config, as passed registries has all insecure registry info. - originalCIDRs := config.ServiceConfig.InsecureRegistryCIDRs - originalIndexInfos := config.ServiceConfig.IndexConfigs - - config.ServiceConfig.InsecureRegistryCIDRs = make([]*registrytypes.NetIPNet, 0) - config.ServiceConfig.IndexConfigs = make(map[string]*registrytypes.IndexInfo) - -skip: - for _, r := range registries { - // validate insecure registry - if _, err := ValidateIndexName(r); err != nil { - // before returning err, roll back to original data - config.ServiceConfig.InsecureRegistryCIDRs = originalCIDRs - config.ServiceConfig.IndexConfigs = originalIndexInfos - return err - } - if strings.HasPrefix(strings.ToLower(r), "http://") { - logrus.Warnf("insecure registry %s should not contain 'http://' and 'http://' has been removed from the insecure registry config", r) - r = r[7:] - } else if strings.HasPrefix(strings.ToLower(r), "https://") { - logrus.Warnf("insecure registry %s should not contain 'https://' and 'https://' has been removed from the insecure registry config", r) - r = r[8:] - } else if validateNoScheme(r) != nil { - // Insecure registry should not contain '://' - // before returning err, roll back to original data - config.ServiceConfig.InsecureRegistryCIDRs = originalCIDRs - config.ServiceConfig.IndexConfigs = originalIndexInfos - return fmt.Errorf("insecure registry %s should not contain '://'", r) - } - // Check if CIDR was passed to --insecure-registry - _, ipnet, err := net.ParseCIDR(r) - if err == nil { - // Valid CIDR. If ipnet is already in config.InsecureRegistryCIDRs, skip. - data := (*registrytypes.NetIPNet)(ipnet) - for _, value := range config.InsecureRegistryCIDRs { - if value.IP.String() == data.IP.String() && value.Mask.String() == data.Mask.String() { - continue skip - } - } - // ipnet is not found, add it in config.InsecureRegistryCIDRs - config.InsecureRegistryCIDRs = append(config.InsecureRegistryCIDRs, data) - - } else { - if err := validateHostPort(r); err != nil { - config.ServiceConfig.InsecureRegistryCIDRs = originalCIDRs - config.ServiceConfig.IndexConfigs = originalIndexInfos - return fmt.Errorf("insecure registry %s is not valid: %v", r, err) - - } - // Assume `host:port` if not CIDR. - config.IndexConfigs[r] = ®istrytypes.IndexInfo{ - Name: r, - Mirrors: make([]string, 0), - Secure: false, - Official: false, - } - } - } - - // Configure public registry. - config.IndexConfigs[IndexName] = ®istrytypes.IndexInfo{ - Name: IndexName, - Mirrors: config.Mirrors, - Secure: true, - Official: true, - } - - return nil -} - -// allowNondistributableArtifacts returns true if the provided hostname is part of the list of registries -// that allow push of nondistributable artifacts. -// -// The list can contain elements with CIDR notation to specify a whole subnet. If the subnet contains an IP -// of the registry specified by hostname, true is returned. -// -// hostname should be a URL.Host (`host:port` or `host`) where the `host` part can be either a domain name -// or an IP address. If it is a domain name, then it will be resolved to IP addresses for matching. If -// resolution fails, CIDR matching is not performed. -func allowNondistributableArtifacts(config *serviceConfig, hostname string) bool { - for _, h := range config.AllowNondistributableArtifactsHostnames { - if h == hostname { - return true - } - } - - return isCIDRMatch(config.AllowNondistributableArtifactsCIDRs, hostname) -} - -// isSecureIndex returns false if the provided indexName is part of the list of insecure registries -// Insecure registries accept HTTP and/or accept HTTPS with certificates from unknown CAs. -// -// The list of insecure registries can contain an element with CIDR notation to specify a whole subnet. -// If the subnet contains one of the IPs of the registry specified by indexName, the latter is considered -// insecure. -// -// indexName should be a URL.Host (`host:port` or `host`) where the `host` part can be either a domain name -// or an IP address. If it is a domain name, then it will be resolved in order to check if the IP is contained -// in a subnet. If the resolving is not successful, isSecureIndex will only try to match hostname to any element -// of insecureRegistries. -func isSecureIndex(config *serviceConfig, indexName string) bool { - // Check for configured index, first. This is needed in case isSecureIndex - // is called from anything besides newIndexInfo, in order to honor per-index configurations. - if index, ok := config.IndexConfigs[indexName]; ok { - return index.Secure - } - - return !isCIDRMatch(config.InsecureRegistryCIDRs, indexName) -} - -// isCIDRMatch returns true if URLHost matches an element of cidrs. URLHost is a URL.Host (`host:port` or `host`) -// where the `host` part can be either a domain name or an IP address. If it is a domain name, then it will be -// resolved to IP addresses for matching. If resolution fails, false is returned. -func isCIDRMatch(cidrs []*registrytypes.NetIPNet, URLHost string) bool { - host, _, err := net.SplitHostPort(URLHost) - if err != nil { - // Assume URLHost is of the form `host` without the port and go on. - host = URLHost - } - - addrs, err := lookupIP(host) - if err != nil { - ip := net.ParseIP(host) - if ip != nil { - addrs = []net.IP{ip} - } - - // if ip == nil, then `host` is neither an IP nor it could be looked up, - // either because the index is unreachable, or because the index is behind an HTTP proxy. - // So, len(addrs) == 0 and we're not aborting. - } - - // Try CIDR notation only if addrs has any elements, i.e. if `host`'s IP could be determined. - for _, addr := range addrs { - for _, ipnet := range cidrs { - // check if the addr falls in the subnet - if (*net.IPNet)(ipnet).Contains(addr) { - return true - } - } - } - - return false -} - -// ValidateMirror validates an HTTP(S) registry mirror -func ValidateMirror(val string) (string, error) { - uri, err := url.Parse(val) - if err != nil { - return "", fmt.Errorf("invalid mirror: %q is not a valid URI", val) - } - if uri.Scheme != "http" && uri.Scheme != "https" { - return "", fmt.Errorf("invalid mirror: unsupported scheme %q in %q", uri.Scheme, uri) - } - if (uri.Path != "" && uri.Path != "/") || uri.RawQuery != "" || uri.Fragment != "" { - return "", fmt.Errorf("invalid mirror: path, query, or fragment at end of the URI %q", uri) - } - if uri.User != nil { - // strip password from output - uri.User = url.UserPassword(uri.User.Username(), "xxxxx") - return "", fmt.Errorf("invalid mirror: username/password not allowed in URI %q", uri) - } - return strings.TrimSuffix(val, "/") + "/", nil -} - -// ValidateIndexName validates an index name. -func ValidateIndexName(val string) (string, error) { - // TODO: upstream this to check to reference package - if val == "index.docker.io" { - val = "docker.io" - } - if strings.HasPrefix(val, "-") || strings.HasSuffix(val, "-") { - return "", fmt.Errorf("invalid index name (%s). Cannot begin or end with a hyphen", val) - } - return val, nil -} - -func validateNoScheme(reposName string) error { - if strings.Contains(reposName, "://") { - // It cannot contain a scheme! - return ErrInvalidRepositoryName - } - return nil -} - -func validateHostPort(s string) error { - // Split host and port, and in case s can not be splitted, assume host only - host, port, err := net.SplitHostPort(s) - if err != nil { - host = s - port = "" - } - // If match against the `host:port` pattern fails, - // it might be `IPv6:port`, which will be captured by net.ParseIP(host) - if !validHostPortRegex.MatchString(s) && net.ParseIP(host) == nil { - return fmt.Errorf("invalid host %q", host) - } - if port != "" { - v, err := strconv.Atoi(port) - if err != nil { - return err - } - if v < 0 || v > 65535 { - return fmt.Errorf("invalid port %q", port) - } - } - return nil -} - -// newIndexInfo returns IndexInfo configuration from indexName -func newIndexInfo(config *serviceConfig, indexName string) (*registrytypes.IndexInfo, error) { - var err error - indexName, err = ValidateIndexName(indexName) - if err != nil { - return nil, err - } - - // Return any configured index info, first. - if index, ok := config.IndexConfigs[indexName]; ok { - return index, nil - } - - // Construct a non-configured index info. - index := ®istrytypes.IndexInfo{ - Name: indexName, - Mirrors: make([]string, 0), - Official: false, - } - index.Secure = isSecureIndex(config, indexName) - return index, nil -} - -// GetAuthConfigKey special-cases using the full index address of the official -// index as the AuthConfig key, and uses the (host)name[:port] for private indexes. -func GetAuthConfigKey(index *registrytypes.IndexInfo) string { - if index.Official { - return IndexServer - } - return index.Name -} - -// newRepositoryInfo validates and breaks down a repository name into a RepositoryInfo -func newRepositoryInfo(config *serviceConfig, name reference.Named) (*RepositoryInfo, error) { - index, err := newIndexInfo(config, reference.Domain(name)) - if err != nil { - return nil, err - } - official := !strings.ContainsRune(reference.FamiliarName(name), '/') - - return &RepositoryInfo{ - Name: reference.TrimNamed(name), - Index: index, - Official: official, - }, nil -} - -// ParseRepositoryInfo performs the breakdown of a repository name into a RepositoryInfo, but -// lacks registry configuration. -func ParseRepositoryInfo(reposName reference.Named) (*RepositoryInfo, error) { - return newRepositoryInfo(emptyServiceConfig, reposName) -} - -// ParseSearchIndexInfo will use repository name to get back an indexInfo. -func ParseSearchIndexInfo(reposName string) (*registrytypes.IndexInfo, error) { - indexName, _ := splitReposSearchTerm(reposName) - - indexInfo, err := newIndexInfo(emptyServiceConfig, indexName) - if err != nil { - return nil, err - } - return indexInfo, nil -} diff --git a/ui/wasm/vendor/github.com/docker/docker/registry/config_unix.go b/ui/wasm/vendor/github.com/docker/docker/registry/config_unix.go deleted file mode 100644 index 8ee8fedfc15..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/registry/config_unix.go +++ /dev/null @@ -1,31 +0,0 @@ -// +build !windows - -package registry // import "github.com/docker/docker/registry" - -import ( - "path/filepath" - - "github.com/docker/docker/pkg/homedir" - "github.com/docker/docker/rootless" -) - -// CertsDir is the directory where certificates are stored -func CertsDir() string { - d := "/etc/docker/certs.d" - - if rootless.RunningWithRootlessKit() { - configHome, err := homedir.GetConfigHome() - if err == nil { - d = filepath.Join(configHome, "docker/certs.d") - } - } - return d -} - -// cleanPath is used to ensure that a directory name is valid on the target -// platform. It will be passed in something *similar* to a URL such as -// https:/index.docker.io/v1. Not all platforms support directory names -// which contain those characters (such as : on Windows) -func cleanPath(s string) string { - return s -} diff --git a/ui/wasm/vendor/github.com/docker/docker/registry/config_windows.go b/ui/wasm/vendor/github.com/docker/docker/registry/config_windows.go deleted file mode 100644 index 4ae1e07abfd..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/registry/config_windows.go +++ /dev/null @@ -1,20 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import ( - "os" - "path/filepath" - "strings" -) - -// CertsDir is the directory where certificates are stored -func CertsDir() string { - return os.Getenv("programdata") + `\docker\certs.d` -} - -// cleanPath is used to ensure that a directory name is valid on the target -// platform. It will be passed in something *similar* to a URL such as -// https:\index.docker.io\v1. Not all platforms support directory names -// which contain those characters (such as : on Windows) -func cleanPath(s string) string { - return filepath.FromSlash(strings.Replace(s, ":", "", -1)) -} diff --git a/ui/wasm/vendor/github.com/docker/docker/registry/endpoint_v1.go b/ui/wasm/vendor/github.com/docker/docker/registry/endpoint_v1.go deleted file mode 100644 index db342d1412b..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/registry/endpoint_v1.go +++ /dev/null @@ -1,195 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import ( - "crypto/tls" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "strings" - - "github.com/docker/distribution/registry/client/transport" - registrytypes "github.com/docker/docker/api/types/registry" - "github.com/sirupsen/logrus" -) - -// V1Endpoint stores basic information about a V1 registry endpoint. -type V1Endpoint struct { - client *http.Client - URL *url.URL - IsSecure bool -} - -// NewV1Endpoint parses the given address to return a registry endpoint. -func NewV1Endpoint(index *registrytypes.IndexInfo, userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { - tlsConfig, err := newTLSConfig(index.Name, index.Secure) - if err != nil { - return nil, err - } - - endpoint, err := newV1EndpointFromStr(GetAuthConfigKey(index), tlsConfig, userAgent, metaHeaders) - if err != nil { - return nil, err - } - - if err := validateEndpoint(endpoint); err != nil { - return nil, err - } - - return endpoint, nil -} - -func validateEndpoint(endpoint *V1Endpoint) error { - logrus.Debugf("pinging registry endpoint %s", endpoint) - - // Try HTTPS ping to registry - endpoint.URL.Scheme = "https" - if _, err := endpoint.Ping(); err != nil { - if endpoint.IsSecure { - // If registry is secure and HTTPS failed, show user the error and tell them about `--insecure-registry` - // in case that's what they need. DO NOT accept unknown CA certificates, and DO NOT fallback to HTTP. - return fmt.Errorf("invalid registry endpoint %s: %v. If this private registry supports only HTTP or HTTPS with an unknown CA certificate, please add `--insecure-registry %s` to the daemon's arguments. In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; simply place the CA certificate at /etc/docker/certs.d/%s/ca.crt", endpoint, err, endpoint.URL.Host, endpoint.URL.Host) - } - - // If registry is insecure and HTTPS failed, fallback to HTTP. - logrus.Debugf("Error from registry %q marked as insecure: %v. Insecurely falling back to HTTP", endpoint, err) - endpoint.URL.Scheme = "http" - - var err2 error - if _, err2 = endpoint.Ping(); err2 == nil { - return nil - } - - return fmt.Errorf("invalid registry endpoint %q. HTTPS attempt: %v. HTTP attempt: %v", endpoint, err, err2) - } - - return nil -} - -func newV1Endpoint(address url.URL, tlsConfig *tls.Config, userAgent string, metaHeaders http.Header) *V1Endpoint { - endpoint := &V1Endpoint{ - IsSecure: tlsConfig == nil || !tlsConfig.InsecureSkipVerify, - URL: new(url.URL), - } - - *endpoint.URL = address - - // TODO(tiborvass): make sure a ConnectTimeout transport is used - tr := NewTransport(tlsConfig) - endpoint.client = HTTPClient(transport.NewTransport(tr, Headers(userAgent, metaHeaders)...)) - return endpoint -} - -// trimV1Address trims the version off the address and returns the -// trimmed address or an error if there is a non-V1 version. -func trimV1Address(address string) (string, error) { - var ( - chunks []string - apiVersionStr string - ) - - if strings.HasSuffix(address, "/") { - address = address[:len(address)-1] - } - - chunks = strings.Split(address, "/") - apiVersionStr = chunks[len(chunks)-1] - if apiVersionStr == "v1" { - return strings.Join(chunks[:len(chunks)-1], "/"), nil - } - - for k, v := range apiVersions { - if k != APIVersion1 && apiVersionStr == v { - return "", fmt.Errorf("unsupported V1 version path %s", apiVersionStr) - } - } - - return address, nil -} - -func newV1EndpointFromStr(address string, tlsConfig *tls.Config, userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { - if !strings.HasPrefix(address, "http://") && !strings.HasPrefix(address, "https://") { - address = "https://" + address - } - - address, err := trimV1Address(address) - if err != nil { - return nil, err - } - - uri, err := url.Parse(address) - if err != nil { - return nil, err - } - - endpoint := newV1Endpoint(*uri, tlsConfig, userAgent, metaHeaders) - - return endpoint, nil -} - -// Get the formatted URL for the root of this registry Endpoint -func (e *V1Endpoint) String() string { - return e.URL.String() + "/v1/" -} - -// Path returns a formatted string for the URL -// of this endpoint with the given path appended. -func (e *V1Endpoint) Path(path string) string { - return e.URL.String() + "/v1/" + path -} - -// Ping returns a PingResult which indicates whether the registry is standalone or not. -func (e *V1Endpoint) Ping() (PingResult, error) { - logrus.Debugf("attempting v1 ping for registry endpoint %s", e) - - if e.String() == IndexServer { - // Skip the check, we know this one is valid - // (and we never want to fallback to http in case of error) - return PingResult{Standalone: false}, nil - } - - req, err := http.NewRequest(http.MethodGet, e.Path("_ping"), nil) - if err != nil { - return PingResult{Standalone: false}, err - } - - resp, err := e.client.Do(req) - if err != nil { - return PingResult{Standalone: false}, err - } - - defer resp.Body.Close() - - jsonString, err := ioutil.ReadAll(resp.Body) - if err != nil { - return PingResult{Standalone: false}, fmt.Errorf("error while reading the http response: %s", err) - } - - // If the header is absent, we assume true for compatibility with earlier - // versions of the registry. default to true - info := PingResult{ - Standalone: true, - } - if err := json.Unmarshal(jsonString, &info); err != nil { - logrus.Debugf("Error unmarshaling the _ping PingResult: %s", err) - // don't stop here. Just assume sane defaults - } - if hdr := resp.Header.Get("X-Docker-Registry-Version"); hdr != "" { - logrus.Debugf("Registry version header: '%s'", hdr) - info.Version = hdr - } - logrus.Debugf("PingResult.Version: %q", info.Version) - - standalone := resp.Header.Get("X-Docker-Registry-Standalone") - logrus.Debugf("Registry standalone header: '%s'", standalone) - // Accepted values are "true" (case-insensitive) and "1". - if strings.EqualFold(standalone, "true") || standalone == "1" { - info.Standalone = true - } else if len(standalone) > 0 { - // there is a header set, and it is not "true" or "1", so assume fails - info.Standalone = false - } - logrus.Debugf("PingResult.Standalone: %t", info.Standalone) - return info, nil -} diff --git a/ui/wasm/vendor/github.com/docker/docker/registry/errors.go b/ui/wasm/vendor/github.com/docker/docker/registry/errors.go deleted file mode 100644 index 4906303efcf..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/registry/errors.go +++ /dev/null @@ -1,23 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import ( - "net/url" - - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/docker/errdefs" -) - -func translateV2AuthError(err error) error { - switch e := err.(type) { - case *url.Error: - switch e2 := e.Err.(type) { - case errcode.Error: - switch e2.Code { - case errcode.ErrorCodeUnauthorized: - return errdefs.Unauthorized(err) - } - } - } - - return err -} diff --git a/ui/wasm/vendor/github.com/docker/docker/registry/registry.go b/ui/wasm/vendor/github.com/docker/docker/registry/registry.go deleted file mode 100644 index 7a70bf28b51..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/registry/registry.go +++ /dev/null @@ -1,199 +0,0 @@ -// Package registry contains client primitives to interact with a remote Docker registry. -package registry // import "github.com/docker/docker/registry" - -import ( - "crypto/tls" - "errors" - "fmt" - "io/ioutil" - "net" - "net/http" - "os" - "path/filepath" - "strings" - "time" - - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/go-connections/tlsconfig" - "github.com/sirupsen/logrus" -) - -var ( - // ErrAlreadyExists is an error returned if an image being pushed - // already exists on the remote side - ErrAlreadyExists = errors.New("Image already exists") -) - -// HostCertsDir returns the config directory for a specific host -func HostCertsDir(hostname string) (string, error) { - certsDir := CertsDir() - - hostDir := filepath.Join(certsDir, cleanPath(hostname)) - - return hostDir, nil -} - -func newTLSConfig(hostname string, isSecure bool) (*tls.Config, error) { - // PreferredServerCipherSuites should have no effect - tlsConfig := tlsconfig.ServerDefault() - - tlsConfig.InsecureSkipVerify = !isSecure - - if isSecure && CertsDir() != "" { - hostDir, err := HostCertsDir(hostname) - if err != nil { - return nil, err - } - - logrus.Debugf("hostDir: %s", hostDir) - if err := ReadCertsDirectory(tlsConfig, hostDir); err != nil { - return nil, err - } - } - - return tlsConfig, nil -} - -func hasFile(files []os.FileInfo, name string) bool { - for _, f := range files { - if f.Name() == name { - return true - } - } - return false -} - -// ReadCertsDirectory reads the directory for TLS certificates -// including roots and certificate pairs and updates the -// provided TLS configuration. -func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error { - fs, err := ioutil.ReadDir(directory) - if err != nil && !os.IsNotExist(err) { - return err - } - - for _, f := range fs { - if strings.HasSuffix(f.Name(), ".crt") { - if tlsConfig.RootCAs == nil { - systemPool, err := tlsconfig.SystemCertPool() - if err != nil { - return fmt.Errorf("unable to get system cert pool: %v", err) - } - tlsConfig.RootCAs = systemPool - } - logrus.Debugf("crt: %s", filepath.Join(directory, f.Name())) - data, err := ioutil.ReadFile(filepath.Join(directory, f.Name())) - if err != nil { - return err - } - tlsConfig.RootCAs.AppendCertsFromPEM(data) - } - if strings.HasSuffix(f.Name(), ".cert") { - certName := f.Name() - keyName := certName[:len(certName)-5] + ".key" - logrus.Debugf("cert: %s", filepath.Join(directory, f.Name())) - if !hasFile(fs, keyName) { - return fmt.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName) - } - cert, err := tls.LoadX509KeyPair(filepath.Join(directory, certName), filepath.Join(directory, keyName)) - if err != nil { - return err - } - tlsConfig.Certificates = append(tlsConfig.Certificates, cert) - } - if strings.HasSuffix(f.Name(), ".key") { - keyName := f.Name() - certName := keyName[:len(keyName)-4] + ".cert" - logrus.Debugf("key: %s", filepath.Join(directory, f.Name())) - if !hasFile(fs, certName) { - return fmt.Errorf("Missing client certificate %s for key %s", certName, keyName) - } - } - } - - return nil -} - -// Headers returns request modifiers with a User-Agent and metaHeaders -func Headers(userAgent string, metaHeaders http.Header) []transport.RequestModifier { - modifiers := []transport.RequestModifier{} - if userAgent != "" { - modifiers = append(modifiers, transport.NewHeaderRequestModifier(http.Header{ - "User-Agent": []string{userAgent}, - })) - } - if metaHeaders != nil { - modifiers = append(modifiers, transport.NewHeaderRequestModifier(metaHeaders)) - } - return modifiers -} - -// HTTPClient returns an HTTP client structure which uses the given transport -// and contains the necessary headers for redirected requests -func HTTPClient(transport http.RoundTripper) *http.Client { - return &http.Client{ - Transport: transport, - CheckRedirect: addRequiredHeadersToRedirectedRequests, - } -} - -func trustedLocation(req *http.Request) bool { - var ( - trusteds = []string{"docker.com", "docker.io"} - hostname = strings.SplitN(req.Host, ":", 2)[0] - ) - if req.URL.Scheme != "https" { - return false - } - - for _, trusted := range trusteds { - if hostname == trusted || strings.HasSuffix(hostname, "."+trusted) { - return true - } - } - return false -} - -// addRequiredHeadersToRedirectedRequests adds the necessary redirection headers -// for redirected requests -func addRequiredHeadersToRedirectedRequests(req *http.Request, via []*http.Request) error { - if len(via) != 0 && via[0] != nil { - if trustedLocation(req) && trustedLocation(via[0]) { - req.Header = via[0].Header - return nil - } - for k, v := range via[0].Header { - if k != "Authorization" { - for _, vv := range v { - req.Header.Add(k, vv) - } - } - } - } - return nil -} - -// NewTransport returns a new HTTP transport. If tlsConfig is nil, it uses the -// default TLS configuration. -func NewTransport(tlsConfig *tls.Config) *http.Transport { - if tlsConfig == nil { - tlsConfig = tlsconfig.ServerDefault() - } - - direct := &net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - } - - base := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: direct.DialContext, - TLSHandshakeTimeout: 10 * time.Second, - TLSClientConfig: tlsConfig, - // TODO(dmcgowan): Call close idle connections when complete and use keep alive - DisableKeepAlives: true, - } - - return base -} diff --git a/ui/wasm/vendor/github.com/docker/docker/registry/service.go b/ui/wasm/vendor/github.com/docker/docker/registry/service.go deleted file mode 100644 index 3b08e39da2c..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/registry/service.go +++ /dev/null @@ -1,297 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import ( - "context" - "crypto/tls" - "net/http" - "net/url" - "strings" - "sync" - - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/client/auth" - "github.com/docker/docker/api/types" - registrytypes "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/errdefs" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -const ( - // DefaultSearchLimit is the default value for maximum number of returned search results. - DefaultSearchLimit = 25 -) - -// Service is the interface defining what a registry service should implement. -type Service interface { - Auth(ctx context.Context, authConfig *types.AuthConfig, userAgent string) (status, token string, err error) - LookupPullEndpoints(hostname string) (endpoints []APIEndpoint, err error) - LookupPushEndpoints(hostname string) (endpoints []APIEndpoint, err error) - ResolveRepository(name reference.Named) (*RepositoryInfo, error) - Search(ctx context.Context, term string, limit int, authConfig *types.AuthConfig, userAgent string, headers map[string][]string) (*registrytypes.SearchResults, error) - ServiceConfig() *registrytypes.ServiceConfig - TLSConfig(hostname string) (*tls.Config, error) - LoadAllowNondistributableArtifacts([]string) error - LoadMirrors([]string) error - LoadInsecureRegistries([]string) error -} - -// DefaultService is a registry service. It tracks configuration data such as a list -// of mirrors. -type DefaultService struct { - config *serviceConfig - mu sync.Mutex -} - -// NewService returns a new instance of DefaultService ready to be -// installed into an engine. -func NewService(options ServiceOptions) (*DefaultService, error) { - config, err := newServiceConfig(options) - - return &DefaultService{config: config}, err -} - -// ServiceConfig returns the public registry service configuration. -func (s *DefaultService) ServiceConfig() *registrytypes.ServiceConfig { - s.mu.Lock() - defer s.mu.Unlock() - - servConfig := registrytypes.ServiceConfig{ - AllowNondistributableArtifactsCIDRs: make([]*(registrytypes.NetIPNet), 0), - AllowNondistributableArtifactsHostnames: make([]string, 0), - InsecureRegistryCIDRs: make([]*(registrytypes.NetIPNet), 0), - IndexConfigs: make(map[string]*(registrytypes.IndexInfo)), - Mirrors: make([]string, 0), - } - - // construct a new ServiceConfig which will not retrieve s.Config directly, - // and look up items in s.config with mu locked - servConfig.AllowNondistributableArtifactsCIDRs = append(servConfig.AllowNondistributableArtifactsCIDRs, s.config.ServiceConfig.AllowNondistributableArtifactsCIDRs...) - servConfig.AllowNondistributableArtifactsHostnames = append(servConfig.AllowNondistributableArtifactsHostnames, s.config.ServiceConfig.AllowNondistributableArtifactsHostnames...) - servConfig.InsecureRegistryCIDRs = append(servConfig.InsecureRegistryCIDRs, s.config.ServiceConfig.InsecureRegistryCIDRs...) - - for key, value := range s.config.ServiceConfig.IndexConfigs { - servConfig.IndexConfigs[key] = value - } - - servConfig.Mirrors = append(servConfig.Mirrors, s.config.ServiceConfig.Mirrors...) - - return &servConfig -} - -// LoadAllowNondistributableArtifacts loads allow-nondistributable-artifacts registries for Service. -func (s *DefaultService) LoadAllowNondistributableArtifacts(registries []string) error { - s.mu.Lock() - defer s.mu.Unlock() - - return s.config.LoadAllowNondistributableArtifacts(registries) -} - -// LoadMirrors loads registry mirrors for Service -func (s *DefaultService) LoadMirrors(mirrors []string) error { - s.mu.Lock() - defer s.mu.Unlock() - - return s.config.LoadMirrors(mirrors) -} - -// LoadInsecureRegistries loads insecure registries for Service -func (s *DefaultService) LoadInsecureRegistries(registries []string) error { - s.mu.Lock() - defer s.mu.Unlock() - - return s.config.LoadInsecureRegistries(registries) -} - -// Auth contacts the public registry with the provided credentials, -// and returns OK if authentication was successful. -// It can be used to verify the validity of a client's credentials. -func (s *DefaultService) Auth(ctx context.Context, authConfig *types.AuthConfig, userAgent string) (status, token string, err error) { - // TODO Use ctx when searching for repositories - var registryHostName = IndexHostname - - if authConfig.ServerAddress != "" { - serverAddress := authConfig.ServerAddress - if !strings.HasPrefix(serverAddress, "https://") && !strings.HasPrefix(serverAddress, "http://") { - serverAddress = "https://" + serverAddress - } - u, err := url.Parse(serverAddress) - if err != nil { - return "", "", errdefs.InvalidParameter(errors.Errorf("unable to parse server address: %v", err)) - } - registryHostName = u.Host - } - - // Lookup endpoints for authentication using "LookupPushEndpoints", which - // excludes mirrors to prevent sending credentials of the upstream registry - // to a mirror. - endpoints, err := s.LookupPushEndpoints(registryHostName) - if err != nil { - return "", "", errdefs.InvalidParameter(err) - } - - for _, endpoint := range endpoints { - status, token, err = loginV2(authConfig, endpoint, userAgent) - if err == nil { - return - } - if fErr, ok := err.(fallbackError); ok { - logrus.WithError(fErr.err).Infof("Error logging in to endpoint, trying next endpoint") - continue - } - - return "", "", err - } - - return "", "", err -} - -// splitReposSearchTerm breaks a search term into an index name and remote name -func splitReposSearchTerm(reposName string) (string, string) { - nameParts := strings.SplitN(reposName, "/", 2) - if len(nameParts) == 1 || (!strings.Contains(nameParts[0], ".") && - !strings.Contains(nameParts[0], ":") && nameParts[0] != "localhost") { - // This is a Docker Hub repository (ex: samalba/hipache or ubuntu), - // use the default Docker Hub registry (docker.io) - return IndexName, reposName - } - return nameParts[0], nameParts[1] -} - -// Search queries the public registry for images matching the specified -// search terms, and returns the results. -func (s *DefaultService) Search(ctx context.Context, term string, limit int, authConfig *types.AuthConfig, userAgent string, headers map[string][]string) (*registrytypes.SearchResults, error) { - // TODO Use ctx when searching for repositories - if err := validateNoScheme(term); err != nil { - return nil, err - } - - indexName, remoteName := splitReposSearchTerm(term) - - // Search is a long-running operation, just lock s.config to avoid block others. - s.mu.Lock() - index, err := newIndexInfo(s.config, indexName) - s.mu.Unlock() - - if err != nil { - return nil, err - } - - // *TODO: Search multiple indexes. - endpoint, err := NewV1Endpoint(index, userAgent, headers) - if err != nil { - return nil, err - } - - var client *http.Client - if authConfig != nil && authConfig.IdentityToken != "" && authConfig.Username != "" { - creds := NewStaticCredentialStore(authConfig) - scopes := []auth.Scope{ - auth.RegistryScope{ - Name: "catalog", - Actions: []string{"search"}, - }, - } - - modifiers := Headers(userAgent, nil) - v2Client, foundV2, err := v2AuthHTTPClient(endpoint.URL, endpoint.client.Transport, modifiers, creds, scopes) - if err != nil { - if fErr, ok := err.(fallbackError); ok { - logrus.Errorf("Cannot use identity token for search, v2 auth not supported: %v", fErr.err) - } else { - return nil, err - } - } else if foundV2 { - // Copy non transport http client features - v2Client.Timeout = endpoint.client.Timeout - v2Client.CheckRedirect = endpoint.client.CheckRedirect - v2Client.Jar = endpoint.client.Jar - - logrus.Debugf("using v2 client for search to %s", endpoint.URL) - client = v2Client - } - } - - if client == nil { - client = endpoint.client - if err := authorizeClient(client, authConfig, endpoint); err != nil { - return nil, err - } - } - - r := newSession(client, authConfig, endpoint) - - if index.Official { - // If pull "library/foo", it's stored locally under "foo" - remoteName = strings.TrimPrefix(remoteName, "library/") - } - return r.SearchRepositories(remoteName, limit) -} - -// ResolveRepository splits a repository name into its components -// and configuration of the associated registry. -func (s *DefaultService) ResolveRepository(name reference.Named) (*RepositoryInfo, error) { - s.mu.Lock() - defer s.mu.Unlock() - return newRepositoryInfo(s.config, name) -} - -// APIEndpoint represents a remote API endpoint -type APIEndpoint struct { - Mirror bool - URL *url.URL - Version APIVersion - AllowNondistributableArtifacts bool - Official bool - TrimHostname bool - TLSConfig *tls.Config -} - -// ToV1Endpoint returns a V1 API endpoint based on the APIEndpoint -// Deprecated: this function is deprecated and will be removed in a future update -func (e APIEndpoint) ToV1Endpoint(userAgent string, metaHeaders http.Header) *V1Endpoint { - return newV1Endpoint(*e.URL, e.TLSConfig, userAgent, metaHeaders) -} - -// TLSConfig constructs a client TLS configuration based on server defaults -func (s *DefaultService) TLSConfig(hostname string) (*tls.Config, error) { - s.mu.Lock() - defer s.mu.Unlock() - - return newTLSConfig(hostname, isSecureIndex(s.config, hostname)) -} - -// tlsConfig constructs a client TLS configuration based on server defaults -func (s *DefaultService) tlsConfig(hostname string) (*tls.Config, error) { - return newTLSConfig(hostname, isSecureIndex(s.config, hostname)) -} - -func (s *DefaultService) tlsConfigForMirror(mirrorURL *url.URL) (*tls.Config, error) { - return s.tlsConfig(mirrorURL.Host) -} - -// LookupPullEndpoints creates a list of v2 endpoints to try to pull from, in order of preference. -// It gives preference to mirrors over the actual registry, and HTTPS over plain HTTP. -func (s *DefaultService) LookupPullEndpoints(hostname string) (endpoints []APIEndpoint, err error) { - s.mu.Lock() - defer s.mu.Unlock() - - return s.lookupV2Endpoints(hostname) -} - -// LookupPushEndpoints creates a list of v2 endpoints to try to push to, in order of preference. -// It gives preference to HTTPS over plain HTTP. Mirrors are not included. -func (s *DefaultService) LookupPushEndpoints(hostname string) (endpoints []APIEndpoint, err error) { - s.mu.Lock() - defer s.mu.Unlock() - - allEndpoints, err := s.lookupV2Endpoints(hostname) - if err == nil { - for _, endpoint := range allEndpoints { - if !endpoint.Mirror { - endpoints = append(endpoints, endpoint) - } - } - } - return endpoints, err -} diff --git a/ui/wasm/vendor/github.com/docker/docker/registry/service_v2.go b/ui/wasm/vendor/github.com/docker/docker/registry/service_v2.go deleted file mode 100644 index 3e3a5b41ffb..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/registry/service_v2.go +++ /dev/null @@ -1,79 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import ( - "net/url" - "strings" - - "github.com/docker/go-connections/tlsconfig" -) - -func (s *DefaultService) lookupV2Endpoints(hostname string) (endpoints []APIEndpoint, err error) { - tlsConfig := tlsconfig.ServerDefault() - if hostname == DefaultNamespace || hostname == IndexHostname { - for _, mirror := range s.config.Mirrors { - if !strings.HasPrefix(mirror, "http://") && !strings.HasPrefix(mirror, "https://") { - mirror = "https://" + mirror - } - mirrorURL, err := url.Parse(mirror) - if err != nil { - return nil, err - } - mirrorTLSConfig, err := s.tlsConfigForMirror(mirrorURL) - if err != nil { - return nil, err - } - endpoints = append(endpoints, APIEndpoint{ - URL: mirrorURL, - Version: APIVersion2, - Mirror: true, - TrimHostname: true, - TLSConfig: mirrorTLSConfig, - }) - } - endpoints = append(endpoints, APIEndpoint{ - URL: DefaultV2Registry, - Version: APIVersion2, - Official: true, - TrimHostname: true, - TLSConfig: tlsConfig, - }) - - return endpoints, nil - } - - ana := allowNondistributableArtifacts(s.config, hostname) - - tlsConfig, err = s.tlsConfig(hostname) - if err != nil { - return nil, err - } - - endpoints = []APIEndpoint{ - { - URL: &url.URL{ - Scheme: "https", - Host: hostname, - }, - Version: APIVersion2, - AllowNondistributableArtifacts: ana, - TrimHostname: true, - TLSConfig: tlsConfig, - }, - } - - if tlsConfig.InsecureSkipVerify { - endpoints = append(endpoints, APIEndpoint{ - URL: &url.URL{ - Scheme: "http", - Host: hostname, - }, - Version: APIVersion2, - AllowNondistributableArtifacts: ana, - TrimHostname: true, - // used to check if supposed to be secure via InsecureSkipVerify - TLSConfig: tlsConfig, - }) - } - - return endpoints, nil -} diff --git a/ui/wasm/vendor/github.com/docker/docker/registry/session.go b/ui/wasm/vendor/github.com/docker/docker/registry/session.go deleted file mode 100644 index d34dc1e58ab..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/registry/session.go +++ /dev/null @@ -1,227 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import ( - // this is required for some certificates - _ "crypto/sha512" - "encoding/json" - "fmt" - "net/http" - "net/http/cookiejar" - "net/url" - "strings" - "sync" - - "github.com/docker/docker/api/types" - registrytypes "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/errdefs" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/pkg/stringid" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// A Session is used to communicate with a V1 registry -type Session struct { - indexEndpoint *V1Endpoint - client *http.Client - // TODO(tiborvass): remove authConfig - authConfig *types.AuthConfig - id string -} - -type authTransport struct { - http.RoundTripper - *types.AuthConfig - - alwaysSetBasicAuth bool - token []string - - mu sync.Mutex // guards modReq - modReq map[*http.Request]*http.Request // original -> modified -} - -// AuthTransport handles the auth layer when communicating with a v1 registry (private or official) -// -// For private v1 registries, set alwaysSetBasicAuth to true. -// -// For the official v1 registry, if there isn't already an Authorization header in the request, -// but there is an X-Docker-Token header set to true, then Basic Auth will be used to set the Authorization header. -// After sending the request with the provided base http.RoundTripper, if an X-Docker-Token header, representing -// a token, is present in the response, then it gets cached and sent in the Authorization header of all subsequent -// requests. -// -// If the server sends a token without the client having requested it, it is ignored. -// -// This RoundTripper also has a CancelRequest method important for correct timeout handling. -func AuthTransport(base http.RoundTripper, authConfig *types.AuthConfig, alwaysSetBasicAuth bool) http.RoundTripper { - if base == nil { - base = http.DefaultTransport - } - return &authTransport{ - RoundTripper: base, - AuthConfig: authConfig, - alwaysSetBasicAuth: alwaysSetBasicAuth, - modReq: make(map[*http.Request]*http.Request), - } -} - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header, len(r.Header)) - for k, s := range r.Header { - r2.Header[k] = append([]string(nil), s...) - } - - return r2 -} - -// RoundTrip changes an HTTP request's headers to add the necessary -// authentication-related headers -func (tr *authTransport) RoundTrip(orig *http.Request) (*http.Response, error) { - // Authorization should not be set on 302 redirect for untrusted locations. - // This logic mirrors the behavior in addRequiredHeadersToRedirectedRequests. - // As the authorization logic is currently implemented in RoundTrip, - // a 302 redirect is detected by looking at the Referrer header as go http package adds said header. - // This is safe as Docker doesn't set Referrer in other scenarios. - if orig.Header.Get("Referer") != "" && !trustedLocation(orig) { - return tr.RoundTripper.RoundTrip(orig) - } - - req := cloneRequest(orig) - tr.mu.Lock() - tr.modReq[orig] = req - tr.mu.Unlock() - - if tr.alwaysSetBasicAuth { - if tr.AuthConfig == nil { - return nil, errors.New("unexpected error: empty auth config") - } - req.SetBasicAuth(tr.Username, tr.Password) - return tr.RoundTripper.RoundTrip(req) - } - - // Don't override - if req.Header.Get("Authorization") == "" { - if req.Header.Get("X-Docker-Token") == "true" && tr.AuthConfig != nil && len(tr.Username) > 0 { - req.SetBasicAuth(tr.Username, tr.Password) - } else if len(tr.token) > 0 { - req.Header.Set("Authorization", "Token "+strings.Join(tr.token, ",")) - } - } - resp, err := tr.RoundTripper.RoundTrip(req) - if err != nil { - tr.mu.Lock() - delete(tr.modReq, orig) - tr.mu.Unlock() - return nil, err - } - if len(resp.Header["X-Docker-Token"]) > 0 { - tr.token = resp.Header["X-Docker-Token"] - } - resp.Body = &ioutils.OnEOFReader{ - Rc: resp.Body, - Fn: func() { - tr.mu.Lock() - delete(tr.modReq, orig) - tr.mu.Unlock() - }, - } - return resp, nil -} - -// CancelRequest cancels an in-flight request by closing its connection. -func (tr *authTransport) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := tr.RoundTripper.(canceler); ok { - tr.mu.Lock() - modReq := tr.modReq[req] - delete(tr.modReq, req) - tr.mu.Unlock() - cr.CancelRequest(modReq) - } -} - -func authorizeClient(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) error { - var alwaysSetBasicAuth bool - - // If we're working with a standalone private registry over HTTPS, send Basic Auth headers - // alongside all our requests. - if endpoint.String() != IndexServer && endpoint.URL.Scheme == "https" { - info, err := endpoint.Ping() - if err != nil { - return err - } - if info.Standalone && authConfig != nil { - logrus.Debugf("Endpoint %s is eligible for private registry. Enabling decorator.", endpoint.String()) - alwaysSetBasicAuth = true - } - } - - // Annotate the transport unconditionally so that v2 can - // properly fallback on v1 when an image is not found. - client.Transport = AuthTransport(client.Transport, authConfig, alwaysSetBasicAuth) - - jar, err := cookiejar.New(nil) - if err != nil { - return errors.New("cookiejar.New is not supposed to return an error") - } - client.Jar = jar - - return nil -} - -func newSession(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) *Session { - return &Session{ - authConfig: authConfig, - client: client, - indexEndpoint: endpoint, - id: stringid.GenerateRandomID(), - } -} - -// NewSession creates a new session -// TODO(tiborvass): remove authConfig param once registry client v2 is vendored -func NewSession(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) (*Session, error) { - if err := authorizeClient(client, authConfig, endpoint); err != nil { - return nil, err - } - - return newSession(client, authConfig, endpoint), nil -} - -// SearchRepositories performs a search against the remote repository -func (r *Session) SearchRepositories(term string, limit int) (*registrytypes.SearchResults, error) { - if limit < 1 || limit > 100 { - return nil, errdefs.InvalidParameter(errors.Errorf("Limit %d is outside the range of [1, 100]", limit)) - } - logrus.Debugf("Index server: %s", r.indexEndpoint) - u := r.indexEndpoint.String() + "search?q=" + url.QueryEscape(term) + "&n=" + url.QueryEscape(fmt.Sprintf("%d", limit)) - - req, err := http.NewRequest(http.MethodGet, u, nil) - if err != nil { - return nil, errors.Wrap(errdefs.InvalidParameter(err), "Error building request") - } - // Have the AuthTransport send authentication, when logged in. - req.Header.Set("X-Docker-Token", "true") - res, err := r.client.Do(req) - if err != nil { - return nil, errdefs.System(err) - } - defer res.Body.Close() - if res.StatusCode != http.StatusOK { - return nil, &jsonmessage.JSONError{ - Message: fmt.Sprintf("Unexpected status code %d", res.StatusCode), - Code: res.StatusCode, - } - } - result := new(registrytypes.SearchResults) - return result, errors.Wrap(json.NewDecoder(res.Body).Decode(result), "error decoding registry search results") -} diff --git a/ui/wasm/vendor/github.com/docker/docker/registry/types.go b/ui/wasm/vendor/github.com/docker/docker/registry/types.go deleted file mode 100644 index 28ed2bfa5e8..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/registry/types.go +++ /dev/null @@ -1,70 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import ( - "github.com/docker/distribution/reference" - registrytypes "github.com/docker/docker/api/types/registry" -) - -// RepositoryData tracks the image list, list of endpoints for a repository -type RepositoryData struct { - // ImgList is a list of images in the repository - ImgList map[string]*ImgData - // Endpoints is a list of endpoints returned in X-Docker-Endpoints - Endpoints []string -} - -// ImgData is used to transfer image checksums to and from the registry -type ImgData struct { - // ID is an opaque string that identifies the image - ID string `json:"id"` - Checksum string `json:"checksum,omitempty"` - ChecksumPayload string `json:"-"` - Tag string `json:",omitempty"` -} - -// PingResult contains the information returned when pinging a registry. It -// indicates the registry's version and whether the registry claims to be a -// standalone registry. -type PingResult struct { - // Version is the registry version supplied by the registry in an HTTP - // header - Version string `json:"version"` - // Standalone is set to true if the registry indicates it is a - // standalone registry in the X-Docker-Registry-Standalone - // header - Standalone bool `json:"standalone"` -} - -// APIVersion is an integral representation of an API version (presently -// either 1 or 2) -type APIVersion int - -func (av APIVersion) String() string { - return apiVersions[av] -} - -// API Version identifiers. -const ( - _ = iota - APIVersion1 APIVersion = iota - APIVersion2 -) - -var apiVersions = map[APIVersion]string{ - APIVersion1: "v1", - APIVersion2: "v2", -} - -// RepositoryInfo describes a repository -type RepositoryInfo struct { - Name reference.Named - // Index points to registry information - Index *registrytypes.IndexInfo - // Official indicates whether the repository is considered official. - // If the registry is official, and the normalized name does not - // contain a '/' (e.g. "foo"), then it is considered an official repo. - Official bool - // Class represents the class of the repository, such as "plugin" - // or "image". - Class string -} diff --git a/ui/wasm/vendor/github.com/docker/docker/rootless/rootless.go b/ui/wasm/vendor/github.com/docker/docker/rootless/rootless.go deleted file mode 100644 index 376d5263de9..00000000000 --- a/ui/wasm/vendor/github.com/docker/docker/rootless/rootless.go +++ /dev/null @@ -1,25 +0,0 @@ -package rootless // import "github.com/docker/docker/rootless" - -import ( - "os" - "sync" -) - -const ( - // RootlessKitDockerProxyBinary is the binary name of rootlesskit-docker-proxy - RootlessKitDockerProxyBinary = "rootlesskit-docker-proxy" -) - -var ( - runningWithRootlessKit bool - runningWithRootlessKitOnce sync.Once -) - -// RunningWithRootlessKit returns true if running under RootlessKit namespaces. -func RunningWithRootlessKit() bool { - runningWithRootlessKitOnce.Do(func() { - u := os.Getenv("ROOTLESSKIT_STATE_DIR") - runningWithRootlessKit = u != "" - }) - return runningWithRootlessKit -} diff --git a/ui/wasm/vendor/github.com/docker/go-connections/LICENSE b/ui/wasm/vendor/github.com/docker/go-connections/LICENSE deleted file mode 100644 index b55b37bc316..00000000000 --- a/ui/wasm/vendor/github.com/docker/go-connections/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/ui/wasm/vendor/github.com/docker/go-connections/nat/nat.go b/ui/wasm/vendor/github.com/docker/go-connections/nat/nat.go deleted file mode 100644 index bb7e4e33695..00000000000 --- a/ui/wasm/vendor/github.com/docker/go-connections/nat/nat.go +++ /dev/null @@ -1,242 +0,0 @@ -// Package nat is a convenience package for manipulation of strings describing network ports. -package nat - -import ( - "fmt" - "net" - "strconv" - "strings" -) - -const ( - // portSpecTemplate is the expected format for port specifications - portSpecTemplate = "ip:hostPort:containerPort" -) - -// PortBinding represents a binding between a Host IP address and a Host Port -type PortBinding struct { - // HostIP is the host IP Address - HostIP string `json:"HostIp"` - // HostPort is the host port number - HostPort string -} - -// PortMap is a collection of PortBinding indexed by Port -type PortMap map[Port][]PortBinding - -// PortSet is a collection of structs indexed by Port -type PortSet map[Port]struct{} - -// Port is a string containing port number and protocol in the format "80/tcp" -type Port string - -// NewPort creates a new instance of a Port given a protocol and port number or port range -func NewPort(proto, port string) (Port, error) { - // Check for parsing issues on "port" now so we can avoid having - // to check it later on. - - portStartInt, portEndInt, err := ParsePortRangeToInt(port) - if err != nil { - return "", err - } - - if portStartInt == portEndInt { - return Port(fmt.Sprintf("%d/%s", portStartInt, proto)), nil - } - return Port(fmt.Sprintf("%d-%d/%s", portStartInt, portEndInt, proto)), nil -} - -// ParsePort parses the port number string and returns an int -func ParsePort(rawPort string) (int, error) { - if len(rawPort) == 0 { - return 0, nil - } - port, err := strconv.ParseUint(rawPort, 10, 16) - if err != nil { - return 0, err - } - return int(port), nil -} - -// ParsePortRangeToInt parses the port range string and returns start/end ints -func ParsePortRangeToInt(rawPort string) (int, int, error) { - if len(rawPort) == 0 { - return 0, 0, nil - } - start, end, err := ParsePortRange(rawPort) - if err != nil { - return 0, 0, err - } - return int(start), int(end), nil -} - -// Proto returns the protocol of a Port -func (p Port) Proto() string { - proto, _ := SplitProtoPort(string(p)) - return proto -} - -// Port returns the port number of a Port -func (p Port) Port() string { - _, port := SplitProtoPort(string(p)) - return port -} - -// Int returns the port number of a Port as an int -func (p Port) Int() int { - portStr := p.Port() - // We don't need to check for an error because we're going to - // assume that any error would have been found, and reported, in NewPort() - port, _ := ParsePort(portStr) - return port -} - -// Range returns the start/end port numbers of a Port range as ints -func (p Port) Range() (int, int, error) { - return ParsePortRangeToInt(p.Port()) -} - -// SplitProtoPort splits a port in the format of proto/port -func SplitProtoPort(rawPort string) (string, string) { - parts := strings.Split(rawPort, "/") - l := len(parts) - if len(rawPort) == 0 || l == 0 || len(parts[0]) == 0 { - return "", "" - } - if l == 1 { - return "tcp", rawPort - } - if len(parts[1]) == 0 { - return "tcp", parts[0] - } - return parts[1], parts[0] -} - -func validateProto(proto string) bool { - for _, availableProto := range []string{"tcp", "udp", "sctp"} { - if availableProto == proto { - return true - } - } - return false -} - -// ParsePortSpecs receives port specs in the format of ip:public:private/proto and parses -// these in to the internal types -func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, error) { - var ( - exposedPorts = make(map[Port]struct{}, len(ports)) - bindings = make(map[Port][]PortBinding) - ) - for _, rawPort := range ports { - portMappings, err := ParsePortSpec(rawPort) - if err != nil { - return nil, nil, err - } - - for _, portMapping := range portMappings { - port := portMapping.Port - if _, exists := exposedPorts[port]; !exists { - exposedPorts[port] = struct{}{} - } - bslice, exists := bindings[port] - if !exists { - bslice = []PortBinding{} - } - bindings[port] = append(bslice, portMapping.Binding) - } - } - return exposedPorts, bindings, nil -} - -// PortMapping is a data object mapping a Port to a PortBinding -type PortMapping struct { - Port Port - Binding PortBinding -} - -func splitParts(rawport string) (string, string, string) { - parts := strings.Split(rawport, ":") - n := len(parts) - containerport := parts[n-1] - - switch n { - case 1: - return "", "", containerport - case 2: - return "", parts[0], containerport - case 3: - return parts[0], parts[1], containerport - default: - return strings.Join(parts[:n-2], ":"), parts[n-2], containerport - } -} - -// ParsePortSpec parses a port specification string into a slice of PortMappings -func ParsePortSpec(rawPort string) ([]PortMapping, error) { - var proto string - rawIP, hostPort, containerPort := splitParts(rawPort) - proto, containerPort = SplitProtoPort(containerPort) - - // Strip [] from IPV6 addresses - ip, _, err := net.SplitHostPort(rawIP + ":") - if err != nil { - return nil, fmt.Errorf("Invalid ip address %v: %s", rawIP, err) - } - if ip != "" && net.ParseIP(ip) == nil { - return nil, fmt.Errorf("Invalid ip address: %s", ip) - } - if containerPort == "" { - return nil, fmt.Errorf("No port specified: %s", rawPort) - } - - startPort, endPort, err := ParsePortRange(containerPort) - if err != nil { - return nil, fmt.Errorf("Invalid containerPort: %s", containerPort) - } - - var startHostPort, endHostPort uint64 = 0, 0 - if len(hostPort) > 0 { - startHostPort, endHostPort, err = ParsePortRange(hostPort) - if err != nil { - return nil, fmt.Errorf("Invalid hostPort: %s", hostPort) - } - } - - if hostPort != "" && (endPort-startPort) != (endHostPort-startHostPort) { - // Allow host port range iff containerPort is not a range. - // In this case, use the host port range as the dynamic - // host port range to allocate into. - if endPort != startPort { - return nil, fmt.Errorf("Invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort) - } - } - - if !validateProto(strings.ToLower(proto)) { - return nil, fmt.Errorf("Invalid proto: %s", proto) - } - - ports := []PortMapping{} - for i := uint64(0); i <= (endPort - startPort); i++ { - containerPort = strconv.FormatUint(startPort+i, 10) - if len(hostPort) > 0 { - hostPort = strconv.FormatUint(startHostPort+i, 10) - } - // Set hostPort to a range only if there is a single container port - // and a dynamic host port. - if startPort == endPort && startHostPort != endHostPort { - hostPort = fmt.Sprintf("%s-%s", hostPort, strconv.FormatUint(endHostPort, 10)) - } - port, err := NewPort(strings.ToLower(proto), containerPort) - if err != nil { - return nil, err - } - - binding := PortBinding{ - HostIP: ip, - HostPort: hostPort, - } - ports = append(ports, PortMapping{Port: port, Binding: binding}) - } - return ports, nil -} diff --git a/ui/wasm/vendor/github.com/docker/go-connections/nat/parse.go b/ui/wasm/vendor/github.com/docker/go-connections/nat/parse.go deleted file mode 100644 index 892adf8c667..00000000000 --- a/ui/wasm/vendor/github.com/docker/go-connections/nat/parse.go +++ /dev/null @@ -1,57 +0,0 @@ -package nat - -import ( - "fmt" - "strconv" - "strings" -) - -// PartParser parses and validates the specified string (data) using the specified template -// e.g. ip:public:private -> 192.168.0.1:80:8000 -// DEPRECATED: do not use, this function may be removed in a future version -func PartParser(template, data string) (map[string]string, error) { - // ip:public:private - var ( - templateParts = strings.Split(template, ":") - parts = strings.Split(data, ":") - out = make(map[string]string, len(templateParts)) - ) - if len(parts) != len(templateParts) { - return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template) - } - - for i, t := range templateParts { - value := "" - if len(parts) > i { - value = parts[i] - } - out[t] = value - } - return out, nil -} - -// ParsePortRange parses and validates the specified string as a port-range (8000-9000) -func ParsePortRange(ports string) (uint64, uint64, error) { - if ports == "" { - return 0, 0, fmt.Errorf("Empty string specified for ports.") - } - if !strings.Contains(ports, "-") { - start, err := strconv.ParseUint(ports, 10, 16) - end := start - return start, end, err - } - - parts := strings.Split(ports, "-") - start, err := strconv.ParseUint(parts[0], 10, 16) - if err != nil { - return 0, 0, err - } - end, err := strconv.ParseUint(parts[1], 10, 16) - if err != nil { - return 0, 0, err - } - if end < start { - return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports) - } - return start, end, nil -} diff --git a/ui/wasm/vendor/github.com/docker/go-connections/nat/sort.go b/ui/wasm/vendor/github.com/docker/go-connections/nat/sort.go deleted file mode 100644 index ce950171e31..00000000000 --- a/ui/wasm/vendor/github.com/docker/go-connections/nat/sort.go +++ /dev/null @@ -1,96 +0,0 @@ -package nat - -import ( - "sort" - "strings" -) - -type portSorter struct { - ports []Port - by func(i, j Port) bool -} - -func (s *portSorter) Len() int { - return len(s.ports) -} - -func (s *portSorter) Swap(i, j int) { - s.ports[i], s.ports[j] = s.ports[j], s.ports[i] -} - -func (s *portSorter) Less(i, j int) bool { - ip := s.ports[i] - jp := s.ports[j] - - return s.by(ip, jp) -} - -// Sort sorts a list of ports using the provided predicate -// This function should compare `i` and `j`, returning true if `i` is -// considered to be less than `j` -func Sort(ports []Port, predicate func(i, j Port) bool) { - s := &portSorter{ports, predicate} - sort.Sort(s) -} - -type portMapEntry struct { - port Port - binding PortBinding -} - -type portMapSorter []portMapEntry - -func (s portMapSorter) Len() int { return len(s) } -func (s portMapSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// sort the port so that the order is: -// 1. port with larger specified bindings -// 2. larger port -// 3. port with tcp protocol -func (s portMapSorter) Less(i, j int) bool { - pi, pj := s[i].port, s[j].port - hpi, hpj := toInt(s[i].binding.HostPort), toInt(s[j].binding.HostPort) - return hpi > hpj || pi.Int() > pj.Int() || (pi.Int() == pj.Int() && strings.ToLower(pi.Proto()) == "tcp") -} - -// SortPortMap sorts the list of ports and their respected mapping. The ports -// will explicit HostPort will be placed first. -func SortPortMap(ports []Port, bindings PortMap) { - s := portMapSorter{} - for _, p := range ports { - if binding, ok := bindings[p]; ok { - for _, b := range binding { - s = append(s, portMapEntry{port: p, binding: b}) - } - bindings[p] = []PortBinding{} - } else { - s = append(s, portMapEntry{port: p}) - } - } - - sort.Sort(s) - var ( - i int - pm = make(map[Port]struct{}) - ) - // reorder ports - for _, entry := range s { - if _, ok := pm[entry.port]; !ok { - ports[i] = entry.port - pm[entry.port] = struct{}{} - i++ - } - // reorder bindings for this port - if _, ok := bindings[entry.port]; ok { - bindings[entry.port] = append(bindings[entry.port], entry.binding) - } - } -} - -func toInt(s string) uint64 { - i, _, err := ParsePortRange(s) - if err != nil { - i = 0 - } - return i -} diff --git a/ui/wasm/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go b/ui/wasm/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go deleted file mode 100644 index 1ca0965e06e..00000000000 --- a/ui/wasm/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build go1.7 - -package tlsconfig - -import ( - "crypto/x509" - "runtime" -) - -// SystemCertPool returns a copy of the system cert pool, -// returns an error if failed to load or empty pool on windows. -func SystemCertPool() (*x509.CertPool, error) { - certpool, err := x509.SystemCertPool() - if err != nil && runtime.GOOS == "windows" { - return x509.NewCertPool(), nil - } - return certpool, err -} diff --git a/ui/wasm/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go b/ui/wasm/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go deleted file mode 100644 index 1ff81c333c3..00000000000 --- a/ui/wasm/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !go1.7 - -package tlsconfig - -import ( - "crypto/x509" -) - -// SystemCertPool returns an new empty cert pool, -// accessing system cert pool is supported in go 1.7 -func SystemCertPool() (*x509.CertPool, error) { - return x509.NewCertPool(), nil -} diff --git a/ui/wasm/vendor/github.com/docker/go-connections/tlsconfig/config.go b/ui/wasm/vendor/github.com/docker/go-connections/tlsconfig/config.go deleted file mode 100644 index 0ef3fdcb469..00000000000 --- a/ui/wasm/vendor/github.com/docker/go-connections/tlsconfig/config.go +++ /dev/null @@ -1,254 +0,0 @@ -// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. -// -// As a reminder from https://golang.org/pkg/crypto/tls/#Config: -// A Config structure is used to configure a TLS client or server. After one has been passed to a TLS function it must not be modified. -// A Config may be reused; the tls package will also not modify it. -package tlsconfig - -import ( - "crypto/tls" - "crypto/x509" - "encoding/pem" - "fmt" - "io/ioutil" - "os" - - "github.com/pkg/errors" -) - -// Options represents the information needed to create client and server TLS configurations. -type Options struct { - CAFile string - - // If either CertFile or KeyFile is empty, Client() will not load them - // preventing the client from authenticating to the server. - // However, Server() requires them and will error out if they are empty. - CertFile string - KeyFile string - - // client-only option - InsecureSkipVerify bool - // server-only option - ClientAuth tls.ClientAuthType - // If ExclusiveRootPools is set, then if a CA file is provided, the root pool used for TLS - // creds will include exclusively the roots in that CA file. If no CA file is provided, - // the system pool will be used. - ExclusiveRootPools bool - MinVersion uint16 - // If Passphrase is set, it will be used to decrypt a TLS private key - // if the key is encrypted - Passphrase string -} - -// Extra (server-side) accepted CBC cipher suites - will phase out in the future -var acceptedCBCCiphers = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, -} - -// DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls -// options struct but wants to use a commonly accepted set of TLS cipher suites, with -// known weak algorithms removed. -var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...) - -// allTLSVersions lists all the TLS versions and is used by the code that validates -// a uint16 value as a TLS version. -var allTLSVersions = map[uint16]struct{}{ - tls.VersionSSL30: {}, - tls.VersionTLS10: {}, - tls.VersionTLS11: {}, - tls.VersionTLS12: {}, -} - -// ServerDefault returns a secure-enough TLS configuration for the server TLS configuration. -func ServerDefault(ops ...func(*tls.Config)) *tls.Config { - tlsconfig := &tls.Config{ - // Avoid fallback by default to SSL protocols < TLS1.2 - MinVersion: tls.VersionTLS12, - PreferServerCipherSuites: true, - CipherSuites: DefaultServerAcceptedCiphers, - } - - for _, op := range ops { - op(tlsconfig) - } - - return tlsconfig -} - -// ClientDefault returns a secure-enough TLS configuration for the client TLS configuration. -func ClientDefault(ops ...func(*tls.Config)) *tls.Config { - tlsconfig := &tls.Config{ - // Prefer TLS1.2 as the client minimum - MinVersion: tls.VersionTLS12, - CipherSuites: clientCipherSuites, - } - - for _, op := range ops { - op(tlsconfig) - } - - return tlsconfig -} - -// certPool returns an X.509 certificate pool from `caFile`, the certificate file. -func certPool(caFile string, exclusivePool bool) (*x509.CertPool, error) { - // If we should verify the server, we need to load a trusted ca - var ( - certPool *x509.CertPool - err error - ) - if exclusivePool { - certPool = x509.NewCertPool() - } else { - certPool, err = SystemCertPool() - if err != nil { - return nil, fmt.Errorf("failed to read system certificates: %v", err) - } - } - pem, err := ioutil.ReadFile(caFile) - if err != nil { - return nil, fmt.Errorf("could not read CA certificate %q: %v", caFile, err) - } - if !certPool.AppendCertsFromPEM(pem) { - return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile) - } - return certPool, nil -} - -// isValidMinVersion checks that the input value is a valid tls minimum version -func isValidMinVersion(version uint16) bool { - _, ok := allTLSVersions[version] - return ok -} - -// adjustMinVersion sets the MinVersion on `config`, the input configuration. -// It assumes the current MinVersion on the `config` is the lowest allowed. -func adjustMinVersion(options Options, config *tls.Config) error { - if options.MinVersion > 0 { - if !isValidMinVersion(options.MinVersion) { - return fmt.Errorf("Invalid minimum TLS version: %x", options.MinVersion) - } - if options.MinVersion < config.MinVersion { - return fmt.Errorf("Requested minimum TLS version is too low. Should be at-least: %x", config.MinVersion) - } - config.MinVersion = options.MinVersion - } - - return nil -} - -// IsErrEncryptedKey returns true if the 'err' is an error of incorrect -// password when tryin to decrypt a TLS private key -func IsErrEncryptedKey(err error) bool { - return errors.Cause(err) == x509.IncorrectPasswordError -} - -// getPrivateKey returns the private key in 'keyBytes', in PEM-encoded format. -// If the private key is encrypted, 'passphrase' is used to decrypted the -// private key. -func getPrivateKey(keyBytes []byte, passphrase string) ([]byte, error) { - // this section makes some small changes to code from notary/tuf/utils/x509.go - pemBlock, _ := pem.Decode(keyBytes) - if pemBlock == nil { - return nil, fmt.Errorf("no valid private key found") - } - - var err error - if x509.IsEncryptedPEMBlock(pemBlock) { - keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(passphrase)) - if err != nil { - return nil, errors.Wrap(err, "private key is encrypted, but could not decrypt it") - } - keyBytes = pem.EncodeToMemory(&pem.Block{Type: pemBlock.Type, Bytes: keyBytes}) - } - - return keyBytes, nil -} - -// getCert returns a Certificate from the CertFile and KeyFile in 'options', -// if the key is encrypted, the Passphrase in 'options' will be used to -// decrypt it. -func getCert(options Options) ([]tls.Certificate, error) { - if options.CertFile == "" && options.KeyFile == "" { - return nil, nil - } - - errMessage := "Could not load X509 key pair" - - cert, err := ioutil.ReadFile(options.CertFile) - if err != nil { - return nil, errors.Wrap(err, errMessage) - } - - prKeyBytes, err := ioutil.ReadFile(options.KeyFile) - if err != nil { - return nil, errors.Wrap(err, errMessage) - } - - prKeyBytes, err = getPrivateKey(prKeyBytes, options.Passphrase) - if err != nil { - return nil, errors.Wrap(err, errMessage) - } - - tlsCert, err := tls.X509KeyPair(cert, prKeyBytes) - if err != nil { - return nil, errors.Wrap(err, errMessage) - } - - return []tls.Certificate{tlsCert}, nil -} - -// Client returns a TLS configuration meant to be used by a client. -func Client(options Options) (*tls.Config, error) { - tlsConfig := ClientDefault() - tlsConfig.InsecureSkipVerify = options.InsecureSkipVerify - if !options.InsecureSkipVerify && options.CAFile != "" { - CAs, err := certPool(options.CAFile, options.ExclusiveRootPools) - if err != nil { - return nil, err - } - tlsConfig.RootCAs = CAs - } - - tlsCerts, err := getCert(options) - if err != nil { - return nil, err - } - tlsConfig.Certificates = tlsCerts - - if err := adjustMinVersion(options, tlsConfig); err != nil { - return nil, err - } - - return tlsConfig, nil -} - -// Server returns a TLS configuration meant to be used by a server. -func Server(options Options) (*tls.Config, error) { - tlsConfig := ServerDefault() - tlsConfig.ClientAuth = options.ClientAuth - tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) - if err != nil { - if os.IsNotExist(err) { - return nil, fmt.Errorf("Could not load X509 key pair (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err) - } - return nil, fmt.Errorf("Error reading X509 key pair (cert: %q, key: %q): %v. Make sure the key is not encrypted.", options.CertFile, options.KeyFile, err) - } - tlsConfig.Certificates = []tls.Certificate{tlsCert} - if options.ClientAuth >= tls.VerifyClientCertIfGiven && options.CAFile != "" { - CAs, err := certPool(options.CAFile, options.ExclusiveRootPools) - if err != nil { - return nil, err - } - tlsConfig.ClientCAs = CAs - } - - if err := adjustMinVersion(options, tlsConfig); err != nil { - return nil, err - } - - return tlsConfig, nil -} diff --git a/ui/wasm/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go b/ui/wasm/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go deleted file mode 100644 index 6b4c6a7c0d0..00000000000 --- a/ui/wasm/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build go1.5 - -// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. -// -package tlsconfig - -import ( - "crypto/tls" -) - -// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) -var clientCipherSuites = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, -} diff --git a/ui/wasm/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go b/ui/wasm/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go deleted file mode 100644 index ee22df47cb2..00000000000 --- a/ui/wasm/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !go1.5 - -// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. -// -package tlsconfig - -import ( - "crypto/tls" -) - -// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) -var clientCipherSuites = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, -} diff --git a/ui/wasm/vendor/github.com/docker/go-metrics/CONTRIBUTING.md b/ui/wasm/vendor/github.com/docker/go-metrics/CONTRIBUTING.md deleted file mode 100644 index b8a512c3665..00000000000 --- a/ui/wasm/vendor/github.com/docker/go-metrics/CONTRIBUTING.md +++ /dev/null @@ -1,55 +0,0 @@ -# Contributing - -## Sign your work - -The sign-off is a simple line at the end of the explanation for the patch. Your -signature certifies that you wrote the patch or otherwise have the right to pass -it on as an open-source patch. The rules are pretty simple: if you can certify -the below (from [developercertificate.org](http://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -660 York Street, Suite 102, -San Francisco, CA 94110 USA - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` - -Then you just add a line to every git commit message: - - Signed-off-by: Joe Smith - -Use your real name (sorry, no pseudonyms or anonymous contributions.) - -If you set your `user.name` and `user.email` git configs, you can sign your -commit automatically with `git commit -s`. diff --git a/ui/wasm/vendor/github.com/docker/go-metrics/LICENSE b/ui/wasm/vendor/github.com/docker/go-metrics/LICENSE deleted file mode 100644 index 8f3fee627a4..00000000000 --- a/ui/wasm/vendor/github.com/docker/go-metrics/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2013-2016 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/ui/wasm/vendor/github.com/docker/go-metrics/LICENSE.docs b/ui/wasm/vendor/github.com/docker/go-metrics/LICENSE.docs deleted file mode 100644 index e26cd4fc8ed..00000000000 --- a/ui/wasm/vendor/github.com/docker/go-metrics/LICENSE.docs +++ /dev/null @@ -1,425 +0,0 @@ -Attribution-ShareAlike 4.0 International - -======================================================================= - -Creative Commons Corporation ("Creative Commons") is not a law firm and -does not provide legal services or legal advice. Distribution of -Creative Commons public licenses does not create a lawyer-client or -other relationship. Creative Commons makes its licenses and related -information available on an "as-is" basis. Creative Commons gives no -warranties regarding its licenses, any material licensed under their -terms and conditions, or any related information. Creative Commons -disclaims all liability for damages resulting from their use to the -fullest extent possible. - -Using Creative Commons Public Licenses - -Creative Commons public licenses provide a standard set of terms and -conditions that creators and other rights holders may use to share -original works of authorship and other material subject to copyright -and certain other rights specified in the public license below. The -following considerations are for informational purposes only, are not -exhaustive, and do not form part of our licenses. - - Considerations for licensors: Our public licenses are - intended for use by those authorized to give the public - permission to use material in ways otherwise restricted by - copyright and certain other rights. Our licenses are - irrevocable. Licensors should read and understand the terms - and conditions of the license they choose before applying it. - Licensors should also secure all rights necessary before - applying our licenses so that the public can reuse the - material as expected. Licensors should clearly mark any - material not subject to the license. This includes other CC- - licensed material, or material used under an exception or - limitation to copyright. More considerations for licensors: - wiki.creativecommons.org/Considerations_for_licensors - - Considerations for the public: By using one of our public - licenses, a licensor grants the public permission to use the - licensed material under specified terms and conditions. If - the licensor's permission is not necessary for any reason--for - example, because of any applicable exception or limitation to - copyright--then that use is not regulated by the license. Our - licenses grant only permissions under copyright and certain - other rights that a licensor has authority to grant. Use of - the licensed material may still be restricted for other - reasons, including because others have copyright or other - rights in the material. A licensor may make special requests, - such as asking that all changes be marked or described. - Although not required by our licenses, you are encouraged to - respect those requests where reasonable. More_considerations - for the public: - wiki.creativecommons.org/Considerations_for_licensees - -======================================================================= - -Creative Commons Attribution-ShareAlike 4.0 International Public -License - -By exercising the Licensed Rights (defined below), You accept and agree -to be bound by the terms and conditions of this Creative Commons -Attribution-ShareAlike 4.0 International Public License ("Public -License"). To the extent this Public License may be interpreted as a -contract, You are granted the Licensed Rights in consideration of Your -acceptance of these terms and conditions, and the Licensor grants You -such rights in consideration of benefits the Licensor receives from -making the Licensed Material available under these terms and -conditions. - - -Section 1 -- Definitions. - - a. Adapted Material means material subject to Copyright and Similar - Rights that is derived from or based upon the Licensed Material - and in which the Licensed Material is translated, altered, - arranged, transformed, or otherwise modified in a manner requiring - permission under the Copyright and Similar Rights held by the - Licensor. For purposes of this Public License, where the Licensed - Material is a musical work, performance, or sound recording, - Adapted Material is always produced where the Licensed Material is - synched in timed relation with a moving image. - - b. Adapter's License means the license You apply to Your Copyright - and Similar Rights in Your contributions to Adapted Material in - accordance with the terms and conditions of this Public License. - - c. BY-SA Compatible License means a license listed at - creativecommons.org/compatiblelicenses, approved by Creative - Commons as essentially the equivalent of this Public License. - - d. Copyright and Similar Rights means copyright and/or similar rights - closely related to copyright including, without limitation, - performance, broadcast, sound recording, and Sui Generis Database - Rights, without regard to how the rights are labeled or - categorized. For purposes of this Public License, the rights - specified in Section 2(b)(1)-(2) are not Copyright and Similar - Rights. - - e. Effective Technological Measures means those measures that, in the - absence of proper authority, may not be circumvented under laws - fulfilling obligations under Article 11 of the WIPO Copyright - Treaty adopted on December 20, 1996, and/or similar international - agreements. - - f. Exceptions and Limitations means fair use, fair dealing, and/or - any other exception or limitation to Copyright and Similar Rights - that applies to Your use of the Licensed Material. - - g. License Elements means the license attributes listed in the name - of a Creative Commons Public License. The License Elements of this - Public License are Attribution and ShareAlike. - - h. Licensed Material means the artistic or literary work, database, - or other material to which the Licensor applied this Public - License. - - i. Licensed Rights means the rights granted to You subject to the - terms and conditions of this Public License, which are limited to - all Copyright and Similar Rights that apply to Your use of the - Licensed Material and that the Licensor has authority to license. - - j. Licensor means the individual(s) or entity(ies) granting rights - under this Public License. - - k. Share means to provide material to the public by any means or - process that requires permission under the Licensed Rights, such - as reproduction, public display, public performance, distribution, - dissemination, communication, or importation, and to make material - available to the public including in ways that members of the - public may access the material from a place and at a time - individually chosen by them. - - l. Sui Generis Database Rights means rights other than copyright - resulting from Directive 96/9/EC of the European Parliament and of - the Council of 11 March 1996 on the legal protection of databases, - as amended and/or succeeded, as well as other essentially - equivalent rights anywhere in the world. - - m. You means the individual or entity exercising the Licensed Rights - under this Public License. Your has a corresponding meaning. - - -Section 2 -- Scope. - - a. License grant. - - 1. Subject to the terms and conditions of this Public License, - the Licensor hereby grants You a worldwide, royalty-free, - non-sublicensable, non-exclusive, irrevocable license to - exercise the Licensed Rights in the Licensed Material to: - - a. reproduce and Share the Licensed Material, in whole or - in part; and - - b. produce, reproduce, and Share Adapted Material. - - 2. Exceptions and Limitations. For the avoidance of doubt, where - Exceptions and Limitations apply to Your use, this Public - License does not apply, and You do not need to comply with - its terms and conditions. - - 3. Term. The term of this Public License is specified in Section - 6(a). - - 4. Media and formats; technical modifications allowed. The - Licensor authorizes You to exercise the Licensed Rights in - all media and formats whether now known or hereafter created, - and to make technical modifications necessary to do so. The - Licensor waives and/or agrees not to assert any right or - authority to forbid You from making technical modifications - necessary to exercise the Licensed Rights, including - technical modifications necessary to circumvent Effective - Technological Measures. For purposes of this Public License, - simply making modifications authorized by this Section 2(a) - (4) never produces Adapted Material. - - 5. Downstream recipients. - - a. Offer from the Licensor -- Licensed Material. Every - recipient of the Licensed Material automatically - receives an offer from the Licensor to exercise the - Licensed Rights under the terms and conditions of this - Public License. - - b. Additional offer from the Licensor -- Adapted Material. - Every recipient of Adapted Material from You - automatically receives an offer from the Licensor to - exercise the Licensed Rights in the Adapted Material - under the conditions of the Adapter's License You apply. - - c. No downstream restrictions. You may not offer or impose - any additional or different terms or conditions on, or - apply any Effective Technological Measures to, the - Licensed Material if doing so restricts exercise of the - Licensed Rights by any recipient of the Licensed - Material. - - 6. No endorsement. Nothing in this Public License constitutes or - may be construed as permission to assert or imply that You - are, or that Your use of the Licensed Material is, connected - with, or sponsored, endorsed, or granted official status by, - the Licensor or others designated to receive attribution as - provided in Section 3(a)(1)(A)(i). - - b. Other rights. - - 1. Moral rights, such as the right of integrity, are not - licensed under this Public License, nor are publicity, - privacy, and/or other similar personality rights; however, to - the extent possible, the Licensor waives and/or agrees not to - assert any such rights held by the Licensor to the limited - extent necessary to allow You to exercise the Licensed - Rights, but not otherwise. - - 2. Patent and trademark rights are not licensed under this - Public License. - - 3. To the extent possible, the Licensor waives any right to - collect royalties from You for the exercise of the Licensed - Rights, whether directly or through a collecting society - under any voluntary or waivable statutory or compulsory - licensing scheme. In all other cases the Licensor expressly - reserves any right to collect such royalties. - - -Section 3 -- License Conditions. - -Your exercise of the Licensed Rights is expressly made subject to the -following conditions. - - a. Attribution. - - 1. If You Share the Licensed Material (including in modified - form), You must: - - a. retain the following if it is supplied by the Licensor - with the Licensed Material: - - i. identification of the creator(s) of the Licensed - Material and any others designated to receive - attribution, in any reasonable manner requested by - the Licensor (including by pseudonym if - designated); - - ii. a copyright notice; - - iii. a notice that refers to this Public License; - - iv. a notice that refers to the disclaimer of - warranties; - - v. a URI or hyperlink to the Licensed Material to the - extent reasonably practicable; - - b. indicate if You modified the Licensed Material and - retain an indication of any previous modifications; and - - c. indicate the Licensed Material is licensed under this - Public License, and include the text of, or the URI or - hyperlink to, this Public License. - - 2. You may satisfy the conditions in Section 3(a)(1) in any - reasonable manner based on the medium, means, and context in - which You Share the Licensed Material. For example, it may be - reasonable to satisfy the conditions by providing a URI or - hyperlink to a resource that includes the required - information. - - 3. If requested by the Licensor, You must remove any of the - information required by Section 3(a)(1)(A) to the extent - reasonably practicable. - - b. ShareAlike. - - In addition to the conditions in Section 3(a), if You Share - Adapted Material You produce, the following conditions also apply. - - 1. The Adapter's License You apply must be a Creative Commons - license with the same License Elements, this version or - later, or a BY-SA Compatible License. - - 2. You must include the text of, or the URI or hyperlink to, the - Adapter's License You apply. You may satisfy this condition - in any reasonable manner based on the medium, means, and - context in which You Share Adapted Material. - - 3. You may not offer or impose any additional or different terms - or conditions on, or apply any Effective Technological - Measures to, Adapted Material that restrict exercise of the - rights granted under the Adapter's License You apply. - - -Section 4 -- Sui Generis Database Rights. - -Where the Licensed Rights include Sui Generis Database Rights that -apply to Your use of the Licensed Material: - - a. for the avoidance of doubt, Section 2(a)(1) grants You the right - to extract, reuse, reproduce, and Share all or a substantial - portion of the contents of the database; - - b. if You include all or a substantial portion of the database - contents in a database in which You have Sui Generis Database - Rights, then the database in which You have Sui Generis Database - Rights (but not its individual contents) is Adapted Material, - - including for purposes of Section 3(b); and - c. You must comply with the conditions in Section 3(a) if You Share - all or a substantial portion of the contents of the database. - -For the avoidance of doubt, this Section 4 supplements and does not -replace Your obligations under this Public License where the Licensed -Rights include other Copyright and Similar Rights. - - -Section 5 -- Disclaimer of Warranties and Limitation of Liability. - - a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE - EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS - AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF - ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, - IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, - WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR - PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, - ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT - KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT - ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. - - b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE - TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, - NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, - INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, - COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR - USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN - ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR - DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR - IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. - - c. The disclaimer of warranties and limitation of liability provided - above shall be interpreted in a manner that, to the extent - possible, most closely approximates an absolute disclaimer and - waiver of all liability. - - -Section 6 -- Term and Termination. - - a. This Public License applies for the term of the Copyright and - Similar Rights licensed here. However, if You fail to comply with - this Public License, then Your rights under this Public License - terminate automatically. - - b. Where Your right to use the Licensed Material has terminated under - Section 6(a), it reinstates: - - 1. automatically as of the date the violation is cured, provided - it is cured within 30 days of Your discovery of the - violation; or - - 2. upon express reinstatement by the Licensor. - - For the avoidance of doubt, this Section 6(b) does not affect any - right the Licensor may have to seek remedies for Your violations - of this Public License. - - c. For the avoidance of doubt, the Licensor may also offer the - Licensed Material under separate terms or conditions or stop - distributing the Licensed Material at any time; however, doing so - will not terminate this Public License. - - d. Sections 1, 5, 6, 7, and 8 survive termination of this Public - License. - - -Section 7 -- Other Terms and Conditions. - - a. The Licensor shall not be bound by any additional or different - terms or conditions communicated by You unless expressly agreed. - - b. Any arrangements, understandings, or agreements regarding the - Licensed Material not stated herein are separate from and - independent of the terms and conditions of this Public License. - - -Section 8 -- Interpretation. - - a. For the avoidance of doubt, this Public License does not, and - shall not be interpreted to, reduce, limit, restrict, or impose - conditions on any use of the Licensed Material that could lawfully - be made without permission under this Public License. - - b. To the extent possible, if any provision of this Public License is - deemed unenforceable, it shall be automatically reformed to the - minimum extent necessary to make it enforceable. If the provision - cannot be reformed, it shall be severed from this Public License - without affecting the enforceability of the remaining terms and - conditions. - - c. No term or condition of this Public License will be waived and no - failure to comply consented to unless expressly agreed to by the - Licensor. - - d. Nothing in this Public License constitutes or may be interpreted - as a limitation upon, or waiver of, any privileges and immunities - that apply to the Licensor or You, including from the legal - processes of any jurisdiction or authority. - - -======================================================================= - -Creative Commons is not a party to its public licenses. -Notwithstanding, Creative Commons may elect to apply one of its public -licenses to material it publishes and in those instances will be -considered the "Licensor." Except for the limited purpose of indicating -that material is shared under a Creative Commons public license or as -otherwise permitted by the Creative Commons policies published at -creativecommons.org/policies, Creative Commons does not authorize the -use of the trademark "Creative Commons" or any other trademark or logo -of Creative Commons without its prior written consent including, -without limitation, in connection with any unauthorized modifications -to any of its public licenses or any other arrangements, -understandings, or agreements concerning use of licensed material. For -the avoidance of doubt, this paragraph does not form part of the public -licenses. - -Creative Commons may be contacted at creativecommons.org. diff --git a/ui/wasm/vendor/github.com/docker/go-metrics/NOTICE b/ui/wasm/vendor/github.com/docker/go-metrics/NOTICE deleted file mode 100644 index 8915f02773f..00000000000 --- a/ui/wasm/vendor/github.com/docker/go-metrics/NOTICE +++ /dev/null @@ -1,16 +0,0 @@ -Docker -Copyright 2012-2015 Docker, Inc. - -This product includes software developed at Docker, Inc. (https://www.docker.com). - -The following is courtesy of our legal counsel: - - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see https://www.bis.doc.gov - -See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/ui/wasm/vendor/github.com/docker/go-metrics/README.md b/ui/wasm/vendor/github.com/docker/go-metrics/README.md deleted file mode 100644 index a9e947cb566..00000000000 --- a/ui/wasm/vendor/github.com/docker/go-metrics/README.md +++ /dev/null @@ -1,91 +0,0 @@ -# go-metrics [![GoDoc](https://godoc.org/github.com/docker/go-metrics?status.svg)](https://godoc.org/github.com/docker/go-metrics) ![Badge Badge](http://doyouevenbadge.com/github.com/docker/go-metrics) - -This package is small wrapper around the prometheus go client to help enforce convention and best practices for metrics collection in Docker projects. - -## Best Practices - -This packages is meant to be used for collecting metrics in Docker projects. -It is not meant to be used as a replacement for the prometheus client but to help enforce consistent naming across metrics collected. -If you have not already read the prometheus best practices around naming and labels you can read the page [here](https://prometheus.io/docs/practices/naming/). - -The following are a few Docker specific rules that will help you name and work with metrics in your project. - -1. Namespace and Subsystem - -This package provides you with a namespace type that allows you to specify the same namespace and subsystem for your metrics. - -```go -ns := metrics.NewNamespace("engine", "daemon", metrics.Labels{ - "version": dockerversion.Version, - "commit": dockerversion.GitCommit, -}) -``` - -In the example above we are creating metrics for the Docker engine's daemon package. -`engine` would be the namespace in this example where `daemon` is the subsystem or package where we are collecting the metrics. - -A namespace also allows you to attach constant labels to the metrics such as the git commit and version that it is collecting. - -2. Declaring your Metrics - -Try to keep all your metric declarations in one file. -This makes it easy for others to see what constant labels are defined on the namespace and what labels are defined on the metrics when they are created. - -3. Use labels instead of multiple metrics - -Labels allow you to define one metric such as the time it takes to perform a certain action on an object. -If we wanted to collect timings on various container actions such as create, start, and delete then we can define one metric called `container_actions` and use labels to specify the type of action. - - -```go -containerActions = ns.NewLabeledTimer("container_actions", "The number of milliseconds it takes to process each container action", "action") -``` - -The last parameter is the label name or key. -When adding a data point to the metric you will use the `WithValues` function to specify the `action` that you are collecting for. - -```go -containerActions.WithValues("create").UpdateSince(start) -``` - -4. Always use a unit - -The metric name should describe what you are measuring but you also need to provide the unit that it is being measured with. -For a timer, the standard unit is seconds and a counter's standard unit is a total. -For gauges you must provide the unit. -This package provides a standard set of units for use within the Docker projects. - -```go -Nanoseconds Unit = "nanoseconds" -Seconds Unit = "seconds" -Bytes Unit = "bytes" -Total Unit = "total" -``` - -If you need to use a unit but it is not defined in the package please open a PR to add it but first try to see if one of the already created units will work for your metric, i.e. seconds or nanoseconds vs adding milliseconds. - -## Docs - -Package documentation can be found [here](https://godoc.org/github.com/docker/go-metrics). - -## HTTP Metrics - -To instrument a http handler, you can wrap the code like this: - -```go -namespace := metrics.NewNamespace("docker_distribution", "http", metrics.Labels{"handler": "your_http_handler_name"}) -httpMetrics := namespace.NewDefaultHttpMetrics() -metrics.Register(namespace) -instrumentedHandler = metrics.InstrumentHandler(httpMetrics, unInstrumentedHandler) -``` -Note: The `handler` label must be provided when a new namespace is created. - -## Additional Metrics - -Additional metrics are also defined here that are not available in the prometheus client. -If you need a custom metrics and it is generic enough to be used by multiple projects, define it here. - - -## Copyright and license - -Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code is released under the Apache 2.0 license. The README.md file, and files in the "docs" folder are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file "LICENSE.docs". You may obtain a duplicate copy of the same license, titled CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/. diff --git a/ui/wasm/vendor/github.com/docker/go-metrics/counter.go b/ui/wasm/vendor/github.com/docker/go-metrics/counter.go deleted file mode 100644 index fe36316a45c..00000000000 --- a/ui/wasm/vendor/github.com/docker/go-metrics/counter.go +++ /dev/null @@ -1,52 +0,0 @@ -package metrics - -import "github.com/prometheus/client_golang/prometheus" - -// Counter is a metrics that can only increment its current count -type Counter interface { - // Inc adds Sum(vs) to the counter. Sum(vs) must be positive. - // - // If len(vs) == 0, increments the counter by 1. - Inc(vs ...float64) -} - -// LabeledCounter is counter that must have labels populated before use. -type LabeledCounter interface { - WithValues(vs ...string) Counter -} - -type labeledCounter struct { - pc *prometheus.CounterVec -} - -func (lc *labeledCounter) WithValues(vs ...string) Counter { - return &counter{pc: lc.pc.WithLabelValues(vs...)} -} - -func (lc *labeledCounter) Describe(ch chan<- *prometheus.Desc) { - lc.pc.Describe(ch) -} - -func (lc *labeledCounter) Collect(ch chan<- prometheus.Metric) { - lc.pc.Collect(ch) -} - -type counter struct { - pc prometheus.Counter -} - -func (c *counter) Inc(vs ...float64) { - if len(vs) == 0 { - c.pc.Inc() - } - - c.pc.Add(sumFloat64(vs...)) -} - -func (c *counter) Describe(ch chan<- *prometheus.Desc) { - c.pc.Describe(ch) -} - -func (c *counter) Collect(ch chan<- prometheus.Metric) { - c.pc.Collect(ch) -} diff --git a/ui/wasm/vendor/github.com/docker/go-metrics/docs.go b/ui/wasm/vendor/github.com/docker/go-metrics/docs.go deleted file mode 100644 index 8fbdfc697d5..00000000000 --- a/ui/wasm/vendor/github.com/docker/go-metrics/docs.go +++ /dev/null @@ -1,3 +0,0 @@ -// This package is small wrapper around the prometheus go client to help enforce convention and best practices for metrics collection in Docker projects. - -package metrics diff --git a/ui/wasm/vendor/github.com/docker/go-metrics/gauge.go b/ui/wasm/vendor/github.com/docker/go-metrics/gauge.go deleted file mode 100644 index 74296e87740..00000000000 --- a/ui/wasm/vendor/github.com/docker/go-metrics/gauge.go +++ /dev/null @@ -1,72 +0,0 @@ -package metrics - -import "github.com/prometheus/client_golang/prometheus" - -// Gauge is a metric that allows incrementing and decrementing a value -type Gauge interface { - Inc(...float64) - Dec(...float64) - - // Add adds the provided value to the gauge's current value - Add(float64) - - // Set replaces the gauge's current value with the provided value - Set(float64) -} - -// LabeledGauge describes a gauge the must have values populated before use. -type LabeledGauge interface { - WithValues(labels ...string) Gauge -} - -type labeledGauge struct { - pg *prometheus.GaugeVec -} - -func (lg *labeledGauge) WithValues(labels ...string) Gauge { - return &gauge{pg: lg.pg.WithLabelValues(labels...)} -} - -func (lg *labeledGauge) Describe(c chan<- *prometheus.Desc) { - lg.pg.Describe(c) -} - -func (lg *labeledGauge) Collect(c chan<- prometheus.Metric) { - lg.pg.Collect(c) -} - -type gauge struct { - pg prometheus.Gauge -} - -func (g *gauge) Inc(vs ...float64) { - if len(vs) == 0 { - g.pg.Inc() - } - - g.Add(sumFloat64(vs...)) -} - -func (g *gauge) Dec(vs ...float64) { - if len(vs) == 0 { - g.pg.Dec() - } - - g.Add(-sumFloat64(vs...)) -} - -func (g *gauge) Add(v float64) { - g.pg.Add(v) -} - -func (g *gauge) Set(v float64) { - g.pg.Set(v) -} - -func (g *gauge) Describe(c chan<- *prometheus.Desc) { - g.pg.Describe(c) -} - -func (g *gauge) Collect(c chan<- prometheus.Metric) { - g.pg.Collect(c) -} diff --git a/ui/wasm/vendor/github.com/docker/go-metrics/handler.go b/ui/wasm/vendor/github.com/docker/go-metrics/handler.go deleted file mode 100644 index 05601e9ecd2..00000000000 --- a/ui/wasm/vendor/github.com/docker/go-metrics/handler.go +++ /dev/null @@ -1,74 +0,0 @@ -package metrics - -import ( - "net/http" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" -) - -// HTTPHandlerOpts describes a set of configurable options of http metrics -type HTTPHandlerOpts struct { - DurationBuckets []float64 - RequestSizeBuckets []float64 - ResponseSizeBuckets []float64 -} - -const ( - InstrumentHandlerResponseSize = iota - InstrumentHandlerRequestSize - InstrumentHandlerDuration - InstrumentHandlerCounter - InstrumentHandlerInFlight -) - -type HTTPMetric struct { - prometheus.Collector - handlerType int -} - -var ( - defaultDurationBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10, 25, 60} - defaultRequestSizeBuckets = prometheus.ExponentialBuckets(1024, 2, 22) //1K to 4G - defaultResponseSizeBuckets = defaultRequestSizeBuckets -) - -// Handler returns the global http.Handler that provides the prometheus -// metrics format on GET requests. This handler is no longer instrumented. -func Handler() http.Handler { - return promhttp.Handler() -} - -func InstrumentHandler(metrics []*HTTPMetric, handler http.Handler) http.HandlerFunc { - return InstrumentHandlerFunc(metrics, handler.ServeHTTP) -} - -func InstrumentHandlerFunc(metrics []*HTTPMetric, handlerFunc http.HandlerFunc) http.HandlerFunc { - var handler http.Handler - handler = http.HandlerFunc(handlerFunc) - for _, metric := range metrics { - switch metric.handlerType { - case InstrumentHandlerResponseSize: - if collector, ok := metric.Collector.(prometheus.ObserverVec); ok { - handler = promhttp.InstrumentHandlerResponseSize(collector, handler) - } - case InstrumentHandlerRequestSize: - if collector, ok := metric.Collector.(prometheus.ObserverVec); ok { - handler = promhttp.InstrumentHandlerRequestSize(collector, handler) - } - case InstrumentHandlerDuration: - if collector, ok := metric.Collector.(prometheus.ObserverVec); ok { - handler = promhttp.InstrumentHandlerDuration(collector, handler) - } - case InstrumentHandlerCounter: - if collector, ok := metric.Collector.(*prometheus.CounterVec); ok { - handler = promhttp.InstrumentHandlerCounter(collector, handler) - } - case InstrumentHandlerInFlight: - if collector, ok := metric.Collector.(prometheus.Gauge); ok { - handler = promhttp.InstrumentHandlerInFlight(collector, handler) - } - } - } - return handler.ServeHTTP -} diff --git a/ui/wasm/vendor/github.com/docker/go-metrics/helpers.go b/ui/wasm/vendor/github.com/docker/go-metrics/helpers.go deleted file mode 100644 index 68b7f51b338..00000000000 --- a/ui/wasm/vendor/github.com/docker/go-metrics/helpers.go +++ /dev/null @@ -1,10 +0,0 @@ -package metrics - -func sumFloat64(vs ...float64) float64 { - var sum float64 - for _, v := range vs { - sum += v - } - - return sum -} diff --git a/ui/wasm/vendor/github.com/docker/go-metrics/namespace.go b/ui/wasm/vendor/github.com/docker/go-metrics/namespace.go deleted file mode 100644 index 798315451a7..00000000000 --- a/ui/wasm/vendor/github.com/docker/go-metrics/namespace.go +++ /dev/null @@ -1,315 +0,0 @@ -package metrics - -import ( - "fmt" - "sync" - - "github.com/prometheus/client_golang/prometheus" -) - -type Labels map[string]string - -// NewNamespace returns a namespaces that is responsible for managing a collection of -// metrics for a particual namespace and subsystem -// -// labels allows const labels to be added to all metrics created in this namespace -// and are commonly used for data like application version and git commit -func NewNamespace(name, subsystem string, labels Labels) *Namespace { - if labels == nil { - labels = make(map[string]string) - } - return &Namespace{ - name: name, - subsystem: subsystem, - labels: labels, - } -} - -// Namespace describes a set of metrics that share a namespace and subsystem. -type Namespace struct { - name string - subsystem string - labels Labels - mu sync.Mutex - metrics []prometheus.Collector -} - -// WithConstLabels returns a namespace with the provided set of labels merged -// with the existing constant labels on the namespace. -// -// Only metrics created with the returned namespace will get the new constant -// labels. The returned namespace must be registered separately. -func (n *Namespace) WithConstLabels(labels Labels) *Namespace { - n.mu.Lock() - ns := &Namespace{ - name: n.name, - subsystem: n.subsystem, - labels: mergeLabels(n.labels, labels), - } - n.mu.Unlock() - return ns -} - -func (n *Namespace) NewCounter(name, help string) Counter { - c := &counter{pc: prometheus.NewCounter(n.newCounterOpts(name, help))} - n.Add(c) - return c -} - -func (n *Namespace) NewLabeledCounter(name, help string, labels ...string) LabeledCounter { - c := &labeledCounter{pc: prometheus.NewCounterVec(n.newCounterOpts(name, help), labels)} - n.Add(c) - return c -} - -func (n *Namespace) newCounterOpts(name, help string) prometheus.CounterOpts { - return prometheus.CounterOpts{ - Namespace: n.name, - Subsystem: n.subsystem, - Name: makeName(name, Total), - Help: help, - ConstLabels: prometheus.Labels(n.labels), - } -} - -func (n *Namespace) NewTimer(name, help string) Timer { - t := &timer{ - m: prometheus.NewHistogram(n.newTimerOpts(name, help)), - } - n.Add(t) - return t -} - -func (n *Namespace) NewLabeledTimer(name, help string, labels ...string) LabeledTimer { - t := &labeledTimer{ - m: prometheus.NewHistogramVec(n.newTimerOpts(name, help), labels), - } - n.Add(t) - return t -} - -func (n *Namespace) newTimerOpts(name, help string) prometheus.HistogramOpts { - return prometheus.HistogramOpts{ - Namespace: n.name, - Subsystem: n.subsystem, - Name: makeName(name, Seconds), - Help: help, - ConstLabels: prometheus.Labels(n.labels), - } -} - -func (n *Namespace) NewGauge(name, help string, unit Unit) Gauge { - g := &gauge{ - pg: prometheus.NewGauge(n.newGaugeOpts(name, help, unit)), - } - n.Add(g) - return g -} - -func (n *Namespace) NewLabeledGauge(name, help string, unit Unit, labels ...string) LabeledGauge { - g := &labeledGauge{ - pg: prometheus.NewGaugeVec(n.newGaugeOpts(name, help, unit), labels), - } - n.Add(g) - return g -} - -func (n *Namespace) newGaugeOpts(name, help string, unit Unit) prometheus.GaugeOpts { - return prometheus.GaugeOpts{ - Namespace: n.name, - Subsystem: n.subsystem, - Name: makeName(name, unit), - Help: help, - ConstLabels: prometheus.Labels(n.labels), - } -} - -func (n *Namespace) Describe(ch chan<- *prometheus.Desc) { - n.mu.Lock() - defer n.mu.Unlock() - - for _, metric := range n.metrics { - metric.Describe(ch) - } -} - -func (n *Namespace) Collect(ch chan<- prometheus.Metric) { - n.mu.Lock() - defer n.mu.Unlock() - - for _, metric := range n.metrics { - metric.Collect(ch) - } -} - -func (n *Namespace) Add(collector prometheus.Collector) { - n.mu.Lock() - n.metrics = append(n.metrics, collector) - n.mu.Unlock() -} - -func (n *Namespace) NewDesc(name, help string, unit Unit, labels ...string) *prometheus.Desc { - name = makeName(name, unit) - namespace := n.name - if n.subsystem != "" { - namespace = fmt.Sprintf("%s_%s", namespace, n.subsystem) - } - name = fmt.Sprintf("%s_%s", namespace, name) - return prometheus.NewDesc(name, help, labels, prometheus.Labels(n.labels)) -} - -// mergeLabels merges two or more labels objects into a single map, favoring -// the later labels. -func mergeLabels(lbs ...Labels) Labels { - merged := make(Labels) - - for _, target := range lbs { - for k, v := range target { - merged[k] = v - } - } - - return merged -} - -func makeName(name string, unit Unit) string { - if unit == "" { - return name - } - - return fmt.Sprintf("%s_%s", name, unit) -} - -func (n *Namespace) NewDefaultHttpMetrics(handlerName string) []*HTTPMetric { - return n.NewHttpMetricsWithOpts(handlerName, HTTPHandlerOpts{ - DurationBuckets: defaultDurationBuckets, - RequestSizeBuckets: defaultResponseSizeBuckets, - ResponseSizeBuckets: defaultResponseSizeBuckets, - }) -} - -func (n *Namespace) NewHttpMetrics(handlerName string, durationBuckets, requestSizeBuckets, responseSizeBuckets []float64) []*HTTPMetric { - return n.NewHttpMetricsWithOpts(handlerName, HTTPHandlerOpts{ - DurationBuckets: durationBuckets, - RequestSizeBuckets: requestSizeBuckets, - ResponseSizeBuckets: responseSizeBuckets, - }) -} - -func (n *Namespace) NewHttpMetricsWithOpts(handlerName string, opts HTTPHandlerOpts) []*HTTPMetric { - var httpMetrics []*HTTPMetric - inFlightMetric := n.NewInFlightGaugeMetric(handlerName) - requestTotalMetric := n.NewRequestTotalMetric(handlerName) - requestDurationMetric := n.NewRequestDurationMetric(handlerName, opts.DurationBuckets) - requestSizeMetric := n.NewRequestSizeMetric(handlerName, opts.RequestSizeBuckets) - responseSizeMetric := n.NewResponseSizeMetric(handlerName, opts.ResponseSizeBuckets) - httpMetrics = append(httpMetrics, inFlightMetric, requestDurationMetric, requestTotalMetric, requestSizeMetric, responseSizeMetric) - return httpMetrics -} - -func (n *Namespace) NewInFlightGaugeMetric(handlerName string) *HTTPMetric { - labels := prometheus.Labels(n.labels) - labels["handler"] = handlerName - metric := prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: n.name, - Subsystem: n.subsystem, - Name: "in_flight_requests", - Help: "The in-flight HTTP requests", - ConstLabels: prometheus.Labels(labels), - }) - httpMetric := &HTTPMetric{ - Collector: metric, - handlerType: InstrumentHandlerInFlight, - } - n.Add(httpMetric) - return httpMetric -} - -func (n *Namespace) NewRequestTotalMetric(handlerName string) *HTTPMetric { - labels := prometheus.Labels(n.labels) - labels["handler"] = handlerName - metric := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Namespace: n.name, - Subsystem: n.subsystem, - Name: "requests_total", - Help: "Total number of HTTP requests made.", - ConstLabels: prometheus.Labels(labels), - }, - []string{"code", "method"}, - ) - httpMetric := &HTTPMetric{ - Collector: metric, - handlerType: InstrumentHandlerCounter, - } - n.Add(httpMetric) - return httpMetric -} -func (n *Namespace) NewRequestDurationMetric(handlerName string, buckets []float64) *HTTPMetric { - if len(buckets) == 0 { - panic("DurationBuckets must be provided") - } - labels := prometheus.Labels(n.labels) - labels["handler"] = handlerName - opts := prometheus.HistogramOpts{ - Namespace: n.name, - Subsystem: n.subsystem, - Name: "request_duration_seconds", - Help: "The HTTP request latencies in seconds.", - Buckets: buckets, - ConstLabels: prometheus.Labels(labels), - } - metric := prometheus.NewHistogramVec(opts, []string{"method"}) - httpMetric := &HTTPMetric{ - Collector: metric, - handlerType: InstrumentHandlerDuration, - } - n.Add(httpMetric) - return httpMetric -} - -func (n *Namespace) NewRequestSizeMetric(handlerName string, buckets []float64) *HTTPMetric { - if len(buckets) == 0 { - panic("RequestSizeBuckets must be provided") - } - labels := prometheus.Labels(n.labels) - labels["handler"] = handlerName - opts := prometheus.HistogramOpts{ - Namespace: n.name, - Subsystem: n.subsystem, - Name: "request_size_bytes", - Help: "The HTTP request sizes in bytes.", - Buckets: buckets, - ConstLabels: prometheus.Labels(labels), - } - metric := prometheus.NewHistogramVec(opts, []string{}) - httpMetric := &HTTPMetric{ - Collector: metric, - handlerType: InstrumentHandlerRequestSize, - } - n.Add(httpMetric) - return httpMetric -} - -func (n *Namespace) NewResponseSizeMetric(handlerName string, buckets []float64) *HTTPMetric { - if len(buckets) == 0 { - panic("ResponseSizeBuckets must be provided") - } - labels := prometheus.Labels(n.labels) - labels["handler"] = handlerName - opts := prometheus.HistogramOpts{ - Namespace: n.name, - Subsystem: n.subsystem, - Name: "response_size_bytes", - Help: "The HTTP response sizes in bytes.", - Buckets: buckets, - ConstLabels: prometheus.Labels(labels), - } - metrics := prometheus.NewHistogramVec(opts, []string{}) - httpMetric := &HTTPMetric{ - Collector: metrics, - handlerType: InstrumentHandlerResponseSize, - } - n.Add(httpMetric) - return httpMetric -} diff --git a/ui/wasm/vendor/github.com/docker/go-metrics/register.go b/ui/wasm/vendor/github.com/docker/go-metrics/register.go deleted file mode 100644 index 708358df01d..00000000000 --- a/ui/wasm/vendor/github.com/docker/go-metrics/register.go +++ /dev/null @@ -1,15 +0,0 @@ -package metrics - -import "github.com/prometheus/client_golang/prometheus" - -// Register adds all the metrics in the provided namespace to the global -// metrics registry -func Register(n *Namespace) { - prometheus.MustRegister(n) -} - -// Deregister removes all the metrics in the provided namespace from the -// global metrics registry -func Deregister(n *Namespace) { - prometheus.Unregister(n) -} diff --git a/ui/wasm/vendor/github.com/docker/go-metrics/timer.go b/ui/wasm/vendor/github.com/docker/go-metrics/timer.go deleted file mode 100644 index 824c98739cf..00000000000 --- a/ui/wasm/vendor/github.com/docker/go-metrics/timer.go +++ /dev/null @@ -1,85 +0,0 @@ -package metrics - -import ( - "time" - - "github.com/prometheus/client_golang/prometheus" -) - -// StartTimer begins a timer observation at the callsite. When the target -// operation is completed, the caller should call the return done func(). -func StartTimer(timer Timer) (done func()) { - start := time.Now() - return func() { - timer.Update(time.Since(start)) - } -} - -// Timer is a metric that allows collecting the duration of an action in seconds -type Timer interface { - // Update records an observation, duration, and converts to the target - // units. - Update(duration time.Duration) - - // UpdateSince will add the duration from the provided starting time to the - // timer's summary with the precisions that was used in creation of the timer - UpdateSince(time.Time) -} - -// LabeledTimer is a timer that must have label values populated before use. -type LabeledTimer interface { - WithValues(labels ...string) *labeledTimerObserver -} - -type labeledTimer struct { - m *prometheus.HistogramVec -} - -type labeledTimerObserver struct { - m prometheus.Observer -} - -func (lbo *labeledTimerObserver) Update(duration time.Duration) { - lbo.m.Observe(duration.Seconds()) -} - -func (lbo *labeledTimerObserver) UpdateSince(since time.Time) { - lbo.m.Observe(time.Since(since).Seconds()) -} - -func (lt *labeledTimer) WithValues(labels ...string) *labeledTimerObserver { - return &labeledTimerObserver{m: lt.m.WithLabelValues(labels...)} -} - -func (lt *labeledTimer) Describe(c chan<- *prometheus.Desc) { - lt.m.Describe(c) -} - -func (lt *labeledTimer) Collect(c chan<- prometheus.Metric) { - lt.m.Collect(c) -} - -type timer struct { - m prometheus.Observer -} - -func (t *timer) Update(duration time.Duration) { - t.m.Observe(duration.Seconds()) -} - -func (t *timer) UpdateSince(since time.Time) { - t.m.Observe(time.Since(since).Seconds()) -} - -func (t *timer) Describe(c chan<- *prometheus.Desc) { - c <- t.m.(prometheus.Metric).Desc() -} - -func (t *timer) Collect(c chan<- prometheus.Metric) { - // Are there any observers that don't implement Collector? It is really - // unclear what the point of the upstream change was, but we'll let this - // panic if we get an observer that doesn't implement collector. In this - // case, we should almost always see metricVec objects, so this should - // never panic. - t.m.(prometheus.Collector).Collect(c) -} diff --git a/ui/wasm/vendor/github.com/docker/go-metrics/unit.go b/ui/wasm/vendor/github.com/docker/go-metrics/unit.go deleted file mode 100644 index c96622f9031..00000000000 --- a/ui/wasm/vendor/github.com/docker/go-metrics/unit.go +++ /dev/null @@ -1,12 +0,0 @@ -package metrics - -// Unit represents the type or precision of a metric that is appended to -// the metrics fully qualified name -type Unit string - -const ( - Nanoseconds Unit = "nanoseconds" - Seconds Unit = "seconds" - Bytes Unit = "bytes" - Total Unit = "total" -) diff --git a/ui/wasm/vendor/github.com/docker/go-units/CONTRIBUTING.md b/ui/wasm/vendor/github.com/docker/go-units/CONTRIBUTING.md deleted file mode 100644 index 9ea86d784ec..00000000000 --- a/ui/wasm/vendor/github.com/docker/go-units/CONTRIBUTING.md +++ /dev/null @@ -1,67 +0,0 @@ -# Contributing to go-units - -Want to hack on go-units? Awesome! Here are instructions to get you started. - -go-units is a part of the [Docker](https://www.docker.com) project, and follows -the same rules and principles. If you're already familiar with the way -Docker does things, you'll feel right at home. - -Otherwise, go read Docker's -[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), -[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), -[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and -[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). - -### Sign your work - -The sign-off is a simple line at the end of the explanation for the patch. Your -signature certifies that you wrote the patch or otherwise have the right to pass -it on as an open-source patch. The rules are pretty simple: if you can certify -the below (from [developercertificate.org](http://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -660 York Street, Suite 102, -San Francisco, CA 94110 USA - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` - -Then you just add a line to every git commit message: - - Signed-off-by: Joe Smith - -Use your real name (sorry, no pseudonyms or anonymous contributions.) - -If you set your `user.name` and `user.email` git configs, you can sign your -commit automatically with `git commit -s`. diff --git a/ui/wasm/vendor/github.com/docker/go-units/LICENSE b/ui/wasm/vendor/github.com/docker/go-units/LICENSE deleted file mode 100644 index b55b37bc316..00000000000 --- a/ui/wasm/vendor/github.com/docker/go-units/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/ui/wasm/vendor/github.com/docker/go-units/MAINTAINERS b/ui/wasm/vendor/github.com/docker/go-units/MAINTAINERS deleted file mode 100644 index 4aac7c74110..00000000000 --- a/ui/wasm/vendor/github.com/docker/go-units/MAINTAINERS +++ /dev/null @@ -1,46 +0,0 @@ -# go-units maintainers file -# -# This file describes who runs the docker/go-units project and how. -# This is a living document - if you see something out of date or missing, speak up! -# -# It is structured to be consumable by both humans and programs. -# To extract its contents programmatically, use any TOML-compliant parser. -# -# This file is compiled into the MAINTAINERS file in docker/opensource. -# -[Org] - [Org."Core maintainers"] - people = [ - "akihirosuda", - "dnephin", - "thajeztah", - "vdemeester", - ] - -[people] - -# A reference list of all people associated with the project. -# All other sections should refer to people by their canonical key -# in the people section. - - # ADD YOURSELF HERE IN ALPHABETICAL ORDER - - [people.akihirosuda] - Name = "Akihiro Suda" - Email = "akihiro.suda.cz@hco.ntt.co.jp" - GitHub = "AkihiroSuda" - - [people.dnephin] - Name = "Daniel Nephin" - Email = "dnephin@gmail.com" - GitHub = "dnephin" - - [people.thajeztah] - Name = "Sebastiaan van Stijn" - Email = "github@gone.nl" - GitHub = "thaJeztah" - - [people.vdemeester] - Name = "Vincent Demeester" - Email = "vincent@sbr.pm" - GitHub = "vdemeester" \ No newline at end of file diff --git a/ui/wasm/vendor/github.com/docker/go-units/README.md b/ui/wasm/vendor/github.com/docker/go-units/README.md deleted file mode 100644 index 4f70a4e1345..00000000000 --- a/ui/wasm/vendor/github.com/docker/go-units/README.md +++ /dev/null @@ -1,16 +0,0 @@ -[![GoDoc](https://godoc.org/github.com/docker/go-units?status.svg)](https://godoc.org/github.com/docker/go-units) - -# Introduction - -go-units is a library to transform human friendly measurements into machine friendly values. - -## Usage - -See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation. - -## Copyright and license - -Copyright © 2015 Docker, Inc. - -go-units is licensed under the Apache License, Version 2.0. -See [LICENSE](LICENSE) for the full text of the license. diff --git a/ui/wasm/vendor/github.com/docker/go-units/circle.yml b/ui/wasm/vendor/github.com/docker/go-units/circle.yml deleted file mode 100644 index af9d6055293..00000000000 --- a/ui/wasm/vendor/github.com/docker/go-units/circle.yml +++ /dev/null @@ -1,11 +0,0 @@ -dependencies: - post: - # install golint - - go get golang.org/x/lint/golint - -test: - pre: - # run analysis before tests - - go vet ./... - - test -z "$(golint ./... | tee /dev/stderr)" - - test -z "$(gofmt -s -l . | tee /dev/stderr)" diff --git a/ui/wasm/vendor/github.com/docker/go-units/duration.go b/ui/wasm/vendor/github.com/docker/go-units/duration.go deleted file mode 100644 index 48dd8744d43..00000000000 --- a/ui/wasm/vendor/github.com/docker/go-units/duration.go +++ /dev/null @@ -1,35 +0,0 @@ -// Package units provides helper function to parse and print size and time units -// in human-readable format. -package units - -import ( - "fmt" - "time" -) - -// HumanDuration returns a human-readable approximation of a duration -// (eg. "About a minute", "4 hours ago", etc.). -func HumanDuration(d time.Duration) string { - if seconds := int(d.Seconds()); seconds < 1 { - return "Less than a second" - } else if seconds == 1 { - return "1 second" - } else if seconds < 60 { - return fmt.Sprintf("%d seconds", seconds) - } else if minutes := int(d.Minutes()); minutes == 1 { - return "About a minute" - } else if minutes < 60 { - return fmt.Sprintf("%d minutes", minutes) - } else if hours := int(d.Hours() + 0.5); hours == 1 { - return "About an hour" - } else if hours < 48 { - return fmt.Sprintf("%d hours", hours) - } else if hours < 24*7*2 { - return fmt.Sprintf("%d days", hours/24) - } else if hours < 24*30*2 { - return fmt.Sprintf("%d weeks", hours/24/7) - } else if hours < 24*365*2 { - return fmt.Sprintf("%d months", hours/24/30) - } - return fmt.Sprintf("%d years", int(d.Hours())/24/365) -} diff --git a/ui/wasm/vendor/github.com/docker/go-units/size.go b/ui/wasm/vendor/github.com/docker/go-units/size.go deleted file mode 100644 index 85f6ab07155..00000000000 --- a/ui/wasm/vendor/github.com/docker/go-units/size.go +++ /dev/null @@ -1,108 +0,0 @@ -package units - -import ( - "fmt" - "regexp" - "strconv" - "strings" -) - -// See: http://en.wikipedia.org/wiki/Binary_prefix -const ( - // Decimal - - KB = 1000 - MB = 1000 * KB - GB = 1000 * MB - TB = 1000 * GB - PB = 1000 * TB - - // Binary - - KiB = 1024 - MiB = 1024 * KiB - GiB = 1024 * MiB - TiB = 1024 * GiB - PiB = 1024 * TiB -) - -type unitMap map[string]int64 - -var ( - decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB} - binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB} - sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[iI]?[bB]?$`) -) - -var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} -var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} - -func getSizeAndUnit(size float64, base float64, _map []string) (float64, string) { - i := 0 - unitsLimit := len(_map) - 1 - for size >= base && i < unitsLimit { - size = size / base - i++ - } - return size, _map[i] -} - -// CustomSize returns a human-readable approximation of a size -// using custom format. -func CustomSize(format string, size float64, base float64, _map []string) string { - size, unit := getSizeAndUnit(size, base, _map) - return fmt.Sprintf(format, size, unit) -} - -// HumanSizeWithPrecision allows the size to be in any precision, -// instead of 4 digit precision used in units.HumanSize. -func HumanSizeWithPrecision(size float64, precision int) string { - size, unit := getSizeAndUnit(size, 1000.0, decimapAbbrs) - return fmt.Sprintf("%.*g%s", precision, size, unit) -} - -// HumanSize returns a human-readable approximation of a size -// capped at 4 valid numbers (eg. "2.746 MB", "796 KB"). -func HumanSize(size float64) string { - return HumanSizeWithPrecision(size, 4) -} - -// BytesSize returns a human-readable size in bytes, kibibytes, -// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB"). -func BytesSize(size float64) string { - return CustomSize("%.4g%s", size, 1024.0, binaryAbbrs) -} - -// FromHumanSize returns an integer from a human-readable specification of a -// size using SI standard (eg. "44kB", "17MB"). -func FromHumanSize(size string) (int64, error) { - return parseSize(size, decimalMap) -} - -// RAMInBytes parses a human-readable string representing an amount of RAM -// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and -// returns the number of bytes, or -1 if the string is unparseable. -// Units are case-insensitive, and the 'b' suffix is optional. -func RAMInBytes(size string) (int64, error) { - return parseSize(size, binaryMap) -} - -// Parses the human-readable size string into the amount it represents. -func parseSize(sizeStr string, uMap unitMap) (int64, error) { - matches := sizeRegex.FindStringSubmatch(sizeStr) - if len(matches) != 4 { - return -1, fmt.Errorf("invalid size: '%s'", sizeStr) - } - - size, err := strconv.ParseFloat(matches[1], 64) - if err != nil { - return -1, err - } - - unitPrefix := strings.ToLower(matches[3]) - if mul, ok := uMap[unitPrefix]; ok { - size *= float64(mul) - } - - return int64(size), nil -} diff --git a/ui/wasm/vendor/github.com/docker/go-units/ulimit.go b/ui/wasm/vendor/github.com/docker/go-units/ulimit.go deleted file mode 100644 index fca0400cc82..00000000000 --- a/ui/wasm/vendor/github.com/docker/go-units/ulimit.go +++ /dev/null @@ -1,123 +0,0 @@ -package units - -import ( - "fmt" - "strconv" - "strings" -) - -// Ulimit is a human friendly version of Rlimit. -type Ulimit struct { - Name string - Hard int64 - Soft int64 -} - -// Rlimit specifies the resource limits, such as max open files. -type Rlimit struct { - Type int `json:"type,omitempty"` - Hard uint64 `json:"hard,omitempty"` - Soft uint64 `json:"soft,omitempty"` -} - -const ( - // magic numbers for making the syscall - // some of these are defined in the syscall package, but not all. - // Also since Windows client doesn't get access to the syscall package, need to - // define these here - rlimitAs = 9 - rlimitCore = 4 - rlimitCPU = 0 - rlimitData = 2 - rlimitFsize = 1 - rlimitLocks = 10 - rlimitMemlock = 8 - rlimitMsgqueue = 12 - rlimitNice = 13 - rlimitNofile = 7 - rlimitNproc = 6 - rlimitRss = 5 - rlimitRtprio = 14 - rlimitRttime = 15 - rlimitSigpending = 11 - rlimitStack = 3 -) - -var ulimitNameMapping = map[string]int{ - //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container. - "core": rlimitCore, - "cpu": rlimitCPU, - "data": rlimitData, - "fsize": rlimitFsize, - "locks": rlimitLocks, - "memlock": rlimitMemlock, - "msgqueue": rlimitMsgqueue, - "nice": rlimitNice, - "nofile": rlimitNofile, - "nproc": rlimitNproc, - "rss": rlimitRss, - "rtprio": rlimitRtprio, - "rttime": rlimitRttime, - "sigpending": rlimitSigpending, - "stack": rlimitStack, -} - -// ParseUlimit parses and returns a Ulimit from the specified string. -func ParseUlimit(val string) (*Ulimit, error) { - parts := strings.SplitN(val, "=", 2) - if len(parts) != 2 { - return nil, fmt.Errorf("invalid ulimit argument: %s", val) - } - - if _, exists := ulimitNameMapping[parts[0]]; !exists { - return nil, fmt.Errorf("invalid ulimit type: %s", parts[0]) - } - - var ( - soft int64 - hard = &soft // default to soft in case no hard was set - temp int64 - err error - ) - switch limitVals := strings.Split(parts[1], ":"); len(limitVals) { - case 2: - temp, err = strconv.ParseInt(limitVals[1], 10, 64) - if err != nil { - return nil, err - } - hard = &temp - fallthrough - case 1: - soft, err = strconv.ParseInt(limitVals[0], 10, 64) - if err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1]) - } - - if *hard != -1 { - if soft == -1 { - return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: soft: -1 (unlimited), hard: %d", *hard) - } - if soft > *hard { - return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard) - } - } - - return &Ulimit{Name: parts[0], Soft: soft, Hard: *hard}, nil -} - -// GetRlimit returns the RLimit corresponding to Ulimit. -func (u *Ulimit) GetRlimit() (*Rlimit, error) { - t, exists := ulimitNameMapping[u.Name] - if !exists { - return nil, fmt.Errorf("invalid ulimit name %s", u.Name) - } - - return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil -} - -func (u *Ulimit) String() string { - return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard) -} diff --git a/ui/wasm/vendor/github.com/emirpasic/gods/LICENSE b/ui/wasm/vendor/github.com/emirpasic/gods/LICENSE deleted file mode 100644 index e5e449b6eca..00000000000 --- a/ui/wasm/vendor/github.com/emirpasic/gods/LICENSE +++ /dev/null @@ -1,41 +0,0 @@ -Copyright (c) 2015, Emir Pasic -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -------------------------------------------------------------------------------- - -AVL Tree: - -Copyright (c) 2017 Benjamin Scher Purcell - -Permission to use, copy, modify, and distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/ui/wasm/vendor/github.com/emirpasic/gods/containers/containers.go b/ui/wasm/vendor/github.com/emirpasic/gods/containers/containers.go deleted file mode 100644 index c35ab36d2c3..00000000000 --- a/ui/wasm/vendor/github.com/emirpasic/gods/containers/containers.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (c) 2015, Emir Pasic. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package containers provides core interfaces and functions for data structures. -// -// Container is the base interface for all data structures to implement. -// -// Iterators provide stateful iterators. -// -// Enumerable provides Ruby inspired (each, select, map, find, any?, etc.) container functions. -// -// Serialization provides serializers (marshalers) and deserializers (unmarshalers). -package containers - -import "github.com/emirpasic/gods/utils" - -// Container is base interface that all data structures implement. -type Container interface { - Empty() bool - Size() int - Clear() - Values() []interface{} -} - -// GetSortedValues returns sorted container's elements with respect to the passed comparator. -// Does not effect the ordering of elements within the container. -func GetSortedValues(container Container, comparator utils.Comparator) []interface{} { - values := container.Values() - if len(values) < 2 { - return values - } - utils.Sort(values, comparator) - return values -} diff --git a/ui/wasm/vendor/github.com/emirpasic/gods/containers/enumerable.go b/ui/wasm/vendor/github.com/emirpasic/gods/containers/enumerable.go deleted file mode 100644 index ac48b545315..00000000000 --- a/ui/wasm/vendor/github.com/emirpasic/gods/containers/enumerable.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (c) 2015, Emir Pasic. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package containers - -// EnumerableWithIndex provides functions for ordered containers whose values can be fetched by an index. -type EnumerableWithIndex interface { - // Each calls the given function once for each element, passing that element's index and value. - Each(func(index int, value interface{})) - - // Map invokes the given function once for each element and returns a - // container containing the values returned by the given function. - // TODO would appreciate help on how to enforce this in containers (don't want to type assert when chaining) - // Map(func(index int, value interface{}) interface{}) Container - - // Select returns a new container containing all elements for which the given function returns a true value. - // TODO need help on how to enforce this in containers (don't want to type assert when chaining) - // Select(func(index int, value interface{}) bool) Container - - // Any passes each element of the container to the given function and - // returns true if the function ever returns true for any element. - Any(func(index int, value interface{}) bool) bool - - // All passes each element of the container to the given function and - // returns true if the function returns true for all elements. - All(func(index int, value interface{}) bool) bool - - // Find passes each element of the container to the given function and returns - // the first (index,value) for which the function is true or -1,nil otherwise - // if no element matches the criteria. - Find(func(index int, value interface{}) bool) (int, interface{}) -} - -// EnumerableWithKey provides functions for ordered containers whose values whose elements are key/value pairs. -type EnumerableWithKey interface { - // Each calls the given function once for each element, passing that element's key and value. - Each(func(key interface{}, value interface{})) - - // Map invokes the given function once for each element and returns a container - // containing the values returned by the given function as key/value pairs. - // TODO need help on how to enforce this in containers (don't want to type assert when chaining) - // Map(func(key interface{}, value interface{}) (interface{}, interface{})) Container - - // Select returns a new container containing all elements for which the given function returns a true value. - // TODO need help on how to enforce this in containers (don't want to type assert when chaining) - // Select(func(key interface{}, value interface{}) bool) Container - - // Any passes each element of the container to the given function and - // returns true if the function ever returns true for any element. - Any(func(key interface{}, value interface{}) bool) bool - - // All passes each element of the container to the given function and - // returns true if the function returns true for all elements. - All(func(key interface{}, value interface{}) bool) bool - - // Find passes each element of the container to the given function and returns - // the first (key,value) for which the function is true or nil,nil otherwise if no element - // matches the criteria. - Find(func(key interface{}, value interface{}) bool) (interface{}, interface{}) -} diff --git a/ui/wasm/vendor/github.com/emirpasic/gods/containers/iterator.go b/ui/wasm/vendor/github.com/emirpasic/gods/containers/iterator.go deleted file mode 100644 index f1a52a365ac..00000000000 --- a/ui/wasm/vendor/github.com/emirpasic/gods/containers/iterator.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright (c) 2015, Emir Pasic. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package containers - -// IteratorWithIndex is stateful iterator for ordered containers whose values can be fetched by an index. -type IteratorWithIndex interface { - // Next moves the iterator to the next element and returns true if there was a next element in the container. - // If Next() returns true, then next element's index and value can be retrieved by Index() and Value(). - // If Next() was called for the first time, then it will point the iterator to the first element if it exists. - // Modifies the state of the iterator. - Next() bool - - // Value returns the current element's value. - // Does not modify the state of the iterator. - Value() interface{} - - // Index returns the current element's index. - // Does not modify the state of the iterator. - Index() int - - // Begin resets the iterator to its initial state (one-before-first) - // Call Next() to fetch the first element if any. - Begin() - - // First moves the iterator to the first element and returns true if there was a first element in the container. - // If First() returns true, then first element's index and value can be retrieved by Index() and Value(). - // Modifies the state of the iterator. - First() bool -} - -// IteratorWithKey is a stateful iterator for ordered containers whose elements are key value pairs. -type IteratorWithKey interface { - // Next moves the iterator to the next element and returns true if there was a next element in the container. - // If Next() returns true, then next element's key and value can be retrieved by Key() and Value(). - // If Next() was called for the first time, then it will point the iterator to the first element if it exists. - // Modifies the state of the iterator. - Next() bool - - // Value returns the current element's value. - // Does not modify the state of the iterator. - Value() interface{} - - // Key returns the current element's key. - // Does not modify the state of the iterator. - Key() interface{} - - // Begin resets the iterator to its initial state (one-before-first) - // Call Next() to fetch the first element if any. - Begin() - - // First moves the iterator to the first element and returns true if there was a first element in the container. - // If First() returns true, then first element's key and value can be retrieved by Key() and Value(). - // Modifies the state of the iterator. - First() bool -} - -// ReverseIteratorWithIndex is stateful iterator for ordered containers whose values can be fetched by an index. -// -// Essentially it is the same as IteratorWithIndex, but provides additional: -// -// Prev() function to enable traversal in reverse -// -// Last() function to move the iterator to the last element. -// -// End() function to move the iterator past the last element (one-past-the-end). -type ReverseIteratorWithIndex interface { - // Prev moves the iterator to the previous element and returns true if there was a previous element in the container. - // If Prev() returns true, then previous element's index and value can be retrieved by Index() and Value(). - // Modifies the state of the iterator. - Prev() bool - - // End moves the iterator past the last element (one-past-the-end). - // Call Prev() to fetch the last element if any. - End() - - // Last moves the iterator to the last element and returns true if there was a last element in the container. - // If Last() returns true, then last element's index and value can be retrieved by Index() and Value(). - // Modifies the state of the iterator. - Last() bool - - IteratorWithIndex -} - -// ReverseIteratorWithKey is a stateful iterator for ordered containers whose elements are key value pairs. -// -// Essentially it is the same as IteratorWithKey, but provides additional: -// -// Prev() function to enable traversal in reverse -// -// Last() function to move the iterator to the last element. -type ReverseIteratorWithKey interface { - // Prev moves the iterator to the previous element and returns true if there was a previous element in the container. - // If Prev() returns true, then previous element's key and value can be retrieved by Key() and Value(). - // Modifies the state of the iterator. - Prev() bool - - // End moves the iterator past the last element (one-past-the-end). - // Call Prev() to fetch the last element if any. - End() - - // Last moves the iterator to the last element and returns true if there was a last element in the container. - // If Last() returns true, then last element's key and value can be retrieved by Key() and Value(). - // Modifies the state of the iterator. - Last() bool - - IteratorWithKey -} diff --git a/ui/wasm/vendor/github.com/emirpasic/gods/containers/serialization.go b/ui/wasm/vendor/github.com/emirpasic/gods/containers/serialization.go deleted file mode 100644 index d7c90c83a05..00000000000 --- a/ui/wasm/vendor/github.com/emirpasic/gods/containers/serialization.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (c) 2015, Emir Pasic. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package containers - -// JSONSerializer provides JSON serialization -type JSONSerializer interface { - // ToJSON outputs the JSON representation of containers's elements. - ToJSON() ([]byte, error) -} - -// JSONDeserializer provides JSON deserialization -type JSONDeserializer interface { - // FromJSON populates containers's elements from the input JSON representation. - FromJSON([]byte) error -} diff --git a/ui/wasm/vendor/github.com/emirpasic/gods/lists/arraylist/arraylist.go b/ui/wasm/vendor/github.com/emirpasic/gods/lists/arraylist/arraylist.go deleted file mode 100644 index bfedac9eef8..00000000000 --- a/ui/wasm/vendor/github.com/emirpasic/gods/lists/arraylist/arraylist.go +++ /dev/null @@ -1,228 +0,0 @@ -// Copyright (c) 2015, Emir Pasic. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package arraylist implements the array list. -// -// Structure is not thread safe. -// -// Reference: https://en.wikipedia.org/wiki/List_%28abstract_data_type%29 -package arraylist - -import ( - "fmt" - "strings" - - "github.com/emirpasic/gods/lists" - "github.com/emirpasic/gods/utils" -) - -func assertListImplementation() { - var _ lists.List = (*List)(nil) -} - -// List holds the elements in a slice -type List struct { - elements []interface{} - size int -} - -const ( - growthFactor = float32(2.0) // growth by 100% - shrinkFactor = float32(0.25) // shrink when size is 25% of capacity (0 means never shrink) -) - -// New instantiates a new list and adds the passed values, if any, to the list -func New(values ...interface{}) *List { - list := &List{} - if len(values) > 0 { - list.Add(values...) - } - return list -} - -// Add appends a value at the end of the list -func (list *List) Add(values ...interface{}) { - list.growBy(len(values)) - for _, value := range values { - list.elements[list.size] = value - list.size++ - } -} - -// Get returns the element at index. -// Second return parameter is true if index is within bounds of the array and array is not empty, otherwise false. -func (list *List) Get(index int) (interface{}, bool) { - - if !list.withinRange(index) { - return nil, false - } - - return list.elements[index], true -} - -// Remove removes the element at the given index from the list. -func (list *List) Remove(index int) { - - if !list.withinRange(index) { - return - } - - list.elements[index] = nil // cleanup reference - copy(list.elements[index:], list.elements[index+1:list.size]) // shift to the left by one (slow operation, need ways to optimize this) - list.size-- - - list.shrink() -} - -// Contains checks if elements (one or more) are present in the set. -// All elements have to be present in the set for the method to return true. -// Performance time complexity of n^2. -// Returns true if no arguments are passed at all, i.e. set is always super-set of empty set. -func (list *List) Contains(values ...interface{}) bool { - - for _, searchValue := range values { - found := false - for _, element := range list.elements { - if element == searchValue { - found = true - break - } - } - if !found { - return false - } - } - return true -} - -// Values returns all elements in the list. -func (list *List) Values() []interface{} { - newElements := make([]interface{}, list.size, list.size) - copy(newElements, list.elements[:list.size]) - return newElements -} - -//IndexOf returns index of provided element -func (list *List) IndexOf(value interface{}) int { - if list.size == 0 { - return -1 - } - for index, element := range list.elements { - if element == value { - return index - } - } - return -1 -} - -// Empty returns true if list does not contain any elements. -func (list *List) Empty() bool { - return list.size == 0 -} - -// Size returns number of elements within the list. -func (list *List) Size() int { - return list.size -} - -// Clear removes all elements from the list. -func (list *List) Clear() { - list.size = 0 - list.elements = []interface{}{} -} - -// Sort sorts values (in-place) using. -func (list *List) Sort(comparator utils.Comparator) { - if len(list.elements) < 2 { - return - } - utils.Sort(list.elements[:list.size], comparator) -} - -// Swap swaps the two values at the specified positions. -func (list *List) Swap(i, j int) { - if list.withinRange(i) && list.withinRange(j) { - list.elements[i], list.elements[j] = list.elements[j], list.elements[i] - } -} - -// Insert inserts values at specified index position shifting the value at that position (if any) and any subsequent elements to the right. -// Does not do anything if position is negative or bigger than list's size -// Note: position equal to list's size is valid, i.e. append. -func (list *List) Insert(index int, values ...interface{}) { - - if !list.withinRange(index) { - // Append - if index == list.size { - list.Add(values...) - } - return - } - - l := len(values) - list.growBy(l) - list.size += l - copy(list.elements[index+l:], list.elements[index:list.size-l]) - copy(list.elements[index:], values) -} - -// Set the value at specified index -// Does not do anything if position is negative or bigger than list's size -// Note: position equal to list's size is valid, i.e. append. -func (list *List) Set(index int, value interface{}) { - - if !list.withinRange(index) { - // Append - if index == list.size { - list.Add(value) - } - return - } - - list.elements[index] = value -} - -// String returns a string representation of container -func (list *List) String() string { - str := "ArrayList\n" - values := []string{} - for _, value := range list.elements[:list.size] { - values = append(values, fmt.Sprintf("%v", value)) - } - str += strings.Join(values, ", ") - return str -} - -// Check that the index is within bounds of the list -func (list *List) withinRange(index int) bool { - return index >= 0 && index < list.size -} - -func (list *List) resize(cap int) { - newElements := make([]interface{}, cap, cap) - copy(newElements, list.elements) - list.elements = newElements -} - -// Expand the array if necessary, i.e. capacity will be reached if we add n elements -func (list *List) growBy(n int) { - // When capacity is reached, grow by a factor of growthFactor and add number of elements - currentCapacity := cap(list.elements) - if list.size+n >= currentCapacity { - newCapacity := int(growthFactor * float32(currentCapacity+n)) - list.resize(newCapacity) - } -} - -// Shrink the array if necessary, i.e. when size is shrinkFactor percent of current capacity -func (list *List) shrink() { - if shrinkFactor == 0.0 { - return - } - // Shrink when size is at shrinkFactor * capacity - currentCapacity := cap(list.elements) - if list.size <= int(float32(currentCapacity)*shrinkFactor) { - list.resize(list.size) - } -} diff --git a/ui/wasm/vendor/github.com/emirpasic/gods/lists/arraylist/enumerable.go b/ui/wasm/vendor/github.com/emirpasic/gods/lists/arraylist/enumerable.go deleted file mode 100644 index b3a8738825c..00000000000 --- a/ui/wasm/vendor/github.com/emirpasic/gods/lists/arraylist/enumerable.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright (c) 2015, Emir Pasic. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package arraylist - -import "github.com/emirpasic/gods/containers" - -func assertEnumerableImplementation() { - var _ containers.EnumerableWithIndex = (*List)(nil) -} - -// Each calls the given function once for each element, passing that element's index and value. -func (list *List) Each(f func(index int, value interface{})) { - iterator := list.Iterator() - for iterator.Next() { - f(iterator.Index(), iterator.Value()) - } -} - -// Map invokes the given function once for each element and returns a -// container containing the values returned by the given function. -func (list *List) Map(f func(index int, value interface{}) interface{}) *List { - newList := &List{} - iterator := list.Iterator() - for iterator.Next() { - newList.Add(f(iterator.Index(), iterator.Value())) - } - return newList -} - -// Select returns a new container containing all elements for which the given function returns a true value. -func (list *List) Select(f func(index int, value interface{}) bool) *List { - newList := &List{} - iterator := list.Iterator() - for iterator.Next() { - if f(iterator.Index(), iterator.Value()) { - newList.Add(iterator.Value()) - } - } - return newList -} - -// Any passes each element of the collection to the given function and -// returns true if the function ever returns true for any element. -func (list *List) Any(f func(index int, value interface{}) bool) bool { - iterator := list.Iterator() - for iterator.Next() { - if f(iterator.Index(), iterator.Value()) { - return true - } - } - return false -} - -// All passes each element of the collection to the given function and -// returns true if the function returns true for all elements. -func (list *List) All(f func(index int, value interface{}) bool) bool { - iterator := list.Iterator() - for iterator.Next() { - if !f(iterator.Index(), iterator.Value()) { - return false - } - } - return true -} - -// Find passes each element of the container to the given function and returns -// the first (index,value) for which the function is true or -1,nil otherwise -// if no element matches the criteria. -func (list *List) Find(f func(index int, value interface{}) bool) (int, interface{}) { - iterator := list.Iterator() - for iterator.Next() { - if f(iterator.Index(), iterator.Value()) { - return iterator.Index(), iterator.Value() - } - } - return -1, nil -} diff --git a/ui/wasm/vendor/github.com/emirpasic/gods/lists/arraylist/iterator.go b/ui/wasm/vendor/github.com/emirpasic/gods/lists/arraylist/iterator.go deleted file mode 100644 index 38a93f3a8f0..00000000000 --- a/ui/wasm/vendor/github.com/emirpasic/gods/lists/arraylist/iterator.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright (c) 2015, Emir Pasic. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package arraylist - -import "github.com/emirpasic/gods/containers" - -func assertIteratorImplementation() { - var _ containers.ReverseIteratorWithIndex = (*Iterator)(nil) -} - -// Iterator holding the iterator's state -type Iterator struct { - list *List - index int -} - -// Iterator returns a stateful iterator whose values can be fetched by an index. -func (list *List) Iterator() Iterator { - return Iterator{list: list, index: -1} -} - -// Next moves the iterator to the next element and returns true if there was a next element in the container. -// If Next() returns true, then next element's index and value can be retrieved by Index() and Value(). -// If Next() was called for the first time, then it will point the iterator to the first element if it exists. -// Modifies the state of the iterator. -func (iterator *Iterator) Next() bool { - if iterator.index < iterator.list.size { - iterator.index++ - } - return iterator.list.withinRange(iterator.index) -} - -// Prev moves the iterator to the previous element and returns true if there was a previous element in the container. -// If Prev() returns true, then previous element's index and value can be retrieved by Index() and Value(). -// Modifies the state of the iterator. -func (iterator *Iterator) Prev() bool { - if iterator.index >= 0 { - iterator.index-- - } - return iterator.list.withinRange(iterator.index) -} - -// Value returns the current element's value. -// Does not modify the state of the iterator. -func (iterator *Iterator) Value() interface{} { - return iterator.list.elements[iterator.index] -} - -// Index returns the current element's index. -// Does not modify the state of the iterator. -func (iterator *Iterator) Index() int { - return iterator.index -} - -// Begin resets the iterator to its initial state (one-before-first) -// Call Next() to fetch the first element if any. -func (iterator *Iterator) Begin() { - iterator.index = -1 -} - -// End moves the iterator past the last element (one-past-the-end). -// Call Prev() to fetch the last element if any. -func (iterator *Iterator) End() { - iterator.index = iterator.list.size -} - -// First moves the iterator to the first element and returns true if there was a first element in the container. -// If First() returns true, then first element's index and value can be retrieved by Index() and Value(). -// Modifies the state of the iterator. -func (iterator *Iterator) First() bool { - iterator.Begin() - return iterator.Next() -} - -// Last moves the iterator to the last element and returns true if there was a last element in the container. -// If Last() returns true, then last element's index and value can be retrieved by Index() and Value(). -// Modifies the state of the iterator. -func (iterator *Iterator) Last() bool { - iterator.End() - return iterator.Prev() -} diff --git a/ui/wasm/vendor/github.com/emirpasic/gods/lists/arraylist/serialization.go b/ui/wasm/vendor/github.com/emirpasic/gods/lists/arraylist/serialization.go deleted file mode 100644 index 2f283fb97d9..00000000000 --- a/ui/wasm/vendor/github.com/emirpasic/gods/lists/arraylist/serialization.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (c) 2015, Emir Pasic. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package arraylist - -import ( - "encoding/json" - "github.com/emirpasic/gods/containers" -) - -func assertSerializationImplementation() { - var _ containers.JSONSerializer = (*List)(nil) - var _ containers.JSONDeserializer = (*List)(nil) -} - -// ToJSON outputs the JSON representation of list's elements. -func (list *List) ToJSON() ([]byte, error) { - return json.Marshal(list.elements[:list.size]) -} - -// FromJSON populates list's elements from the input JSON representation. -func (list *List) FromJSON(data []byte) error { - err := json.Unmarshal(data, &list.elements) - if err == nil { - list.size = len(list.elements) - } - return err -} diff --git a/ui/wasm/vendor/github.com/emirpasic/gods/lists/lists.go b/ui/wasm/vendor/github.com/emirpasic/gods/lists/lists.go deleted file mode 100644 index 1f6bb08e945..00000000000 --- a/ui/wasm/vendor/github.com/emirpasic/gods/lists/lists.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (c) 2015, Emir Pasic. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package lists provides an abstract List interface. -// -// In computer science, a list or sequence is an abstract data type that represents an ordered sequence of values, where the same value may occur more than once. An instance of a list is a computer representation of the mathematical concept of a finite sequence; the (potentially) infinite analog of a list is a stream. Lists are a basic example of containers, as they contain other values. If the same value occurs multiple times, each occurrence is considered a distinct item. -// -// Reference: https://en.wikipedia.org/wiki/List_%28abstract_data_type%29 -package lists - -import ( - "github.com/emirpasic/gods/containers" - "github.com/emirpasic/gods/utils" -) - -// List interface that all lists implement -type List interface { - Get(index int) (interface{}, bool) - Remove(index int) - Add(values ...interface{}) - Contains(values ...interface{}) bool - Sort(comparator utils.Comparator) - Swap(index1, index2 int) - Insert(index int, values ...interface{}) - Set(index int, value interface{}) - - containers.Container - // Empty() bool - // Size() int - // Clear() - // Values() []interface{} -} diff --git a/ui/wasm/vendor/github.com/emirpasic/gods/trees/binaryheap/binaryheap.go b/ui/wasm/vendor/github.com/emirpasic/gods/trees/binaryheap/binaryheap.go deleted file mode 100644 index 70b28cf52d3..00000000000 --- a/ui/wasm/vendor/github.com/emirpasic/gods/trees/binaryheap/binaryheap.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright (c) 2015, Emir Pasic. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package binaryheap implements a binary heap backed by array list. -// -// Comparator defines this heap as either min or max heap. -// -// Structure is not thread safe. -// -// References: http://en.wikipedia.org/wiki/Binary_heap -package binaryheap - -import ( - "fmt" - "github.com/emirpasic/gods/lists/arraylist" - "github.com/emirpasic/gods/trees" - "github.com/emirpasic/gods/utils" - "strings" -) - -func assertTreeImplementation() { - var _ trees.Tree = (*Heap)(nil) -} - -// Heap holds elements in an array-list -type Heap struct { - list *arraylist.List - Comparator utils.Comparator -} - -// NewWith instantiates a new empty heap tree with the custom comparator. -func NewWith(comparator utils.Comparator) *Heap { - return &Heap{list: arraylist.New(), Comparator: comparator} -} - -// NewWithIntComparator instantiates a new empty heap with the IntComparator, i.e. elements are of type int. -func NewWithIntComparator() *Heap { - return &Heap{list: arraylist.New(), Comparator: utils.IntComparator} -} - -// NewWithStringComparator instantiates a new empty heap with the StringComparator, i.e. elements are of type string. -func NewWithStringComparator() *Heap { - return &Heap{list: arraylist.New(), Comparator: utils.StringComparator} -} - -// Push adds a value onto the heap and bubbles it up accordingly. -func (heap *Heap) Push(values ...interface{}) { - if len(values) == 1 { - heap.list.Add(values[0]) - heap.bubbleUp() - } else { - // Reference: https://en.wikipedia.org/wiki/Binary_heap#Building_a_heap - for _, value := range values { - heap.list.Add(value) - } - size := heap.list.Size()/2 + 1 - for i := size; i >= 0; i-- { - heap.bubbleDownIndex(i) - } - } -} - -// Pop removes top element on heap and returns it, or nil if heap is empty. -// Second return parameter is true, unless the heap was empty and there was nothing to pop. -func (heap *Heap) Pop() (value interface{}, ok bool) { - value, ok = heap.list.Get(0) - if !ok { - return - } - lastIndex := heap.list.Size() - 1 - heap.list.Swap(0, lastIndex) - heap.list.Remove(lastIndex) - heap.bubbleDown() - return -} - -// Peek returns top element on the heap without removing it, or nil if heap is empty. -// Second return parameter is true, unless the heap was empty and there was nothing to peek. -func (heap *Heap) Peek() (value interface{}, ok bool) { - return heap.list.Get(0) -} - -// Empty returns true if heap does not contain any elements. -func (heap *Heap) Empty() bool { - return heap.list.Empty() -} - -// Size returns number of elements within the heap. -func (heap *Heap) Size() int { - return heap.list.Size() -} - -// Clear removes all elements from the heap. -func (heap *Heap) Clear() { - heap.list.Clear() -} - -// Values returns all elements in the heap. -func (heap *Heap) Values() []interface{} { - return heap.list.Values() -} - -// String returns a string representation of container -func (heap *Heap) String() string { - str := "BinaryHeap\n" - values := []string{} - for _, value := range heap.list.Values() { - values = append(values, fmt.Sprintf("%v", value)) - } - str += strings.Join(values, ", ") - return str -} - -// Performs the "bubble down" operation. This is to place the element that is at the root -// of the heap in its correct place so that the heap maintains the min/max-heap order property. -func (heap *Heap) bubbleDown() { - heap.bubbleDownIndex(0) -} - -// Performs the "bubble down" operation. This is to place the element that is at the index -// of the heap in its correct place so that the heap maintains the min/max-heap order property. -func (heap *Heap) bubbleDownIndex(index int) { - size := heap.list.Size() - for leftIndex := index<<1 + 1; leftIndex < size; leftIndex = index<<1 + 1 { - rightIndex := index<<1 + 2 - smallerIndex := leftIndex - leftValue, _ := heap.list.Get(leftIndex) - rightValue, _ := heap.list.Get(rightIndex) - if rightIndex < size && heap.Comparator(leftValue, rightValue) > 0 { - smallerIndex = rightIndex - } - indexValue, _ := heap.list.Get(index) - smallerValue, _ := heap.list.Get(smallerIndex) - if heap.Comparator(indexValue, smallerValue) > 0 { - heap.list.Swap(index, smallerIndex) - } else { - break - } - index = smallerIndex - } -} - -// Performs the "bubble up" operation. This is to place a newly inserted -// element (i.e. last element in the list) in its correct place so that -// the heap maintains the min/max-heap order property. -func (heap *Heap) bubbleUp() { - index := heap.list.Size() - 1 - for parentIndex := (index - 1) >> 1; index > 0; parentIndex = (index - 1) >> 1 { - indexValue, _ := heap.list.Get(index) - parentValue, _ := heap.list.Get(parentIndex) - if heap.Comparator(parentValue, indexValue) <= 0 { - break - } - heap.list.Swap(index, parentIndex) - index = parentIndex - } -} - -// Check that the index is within bounds of the list -func (heap *Heap) withinRange(index int) bool { - return index >= 0 && index < heap.list.Size() -} diff --git a/ui/wasm/vendor/github.com/emirpasic/gods/trees/binaryheap/iterator.go b/ui/wasm/vendor/github.com/emirpasic/gods/trees/binaryheap/iterator.go deleted file mode 100644 index beeb8d70136..00000000000 --- a/ui/wasm/vendor/github.com/emirpasic/gods/trees/binaryheap/iterator.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright (c) 2015, Emir Pasic. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package binaryheap - -import "github.com/emirpasic/gods/containers" - -func assertIteratorImplementation() { - var _ containers.ReverseIteratorWithIndex = (*Iterator)(nil) -} - -// Iterator returns a stateful iterator whose values can be fetched by an index. -type Iterator struct { - heap *Heap - index int -} - -// Iterator returns a stateful iterator whose values can be fetched by an index. -func (heap *Heap) Iterator() Iterator { - return Iterator{heap: heap, index: -1} -} - -// Next moves the iterator to the next element and returns true if there was a next element in the container. -// If Next() returns true, then next element's index and value can be retrieved by Index() and Value(). -// If Next() was called for the first time, then it will point the iterator to the first element if it exists. -// Modifies the state of the iterator. -func (iterator *Iterator) Next() bool { - if iterator.index < iterator.heap.Size() { - iterator.index++ - } - return iterator.heap.withinRange(iterator.index) -} - -// Prev moves the iterator to the previous element and returns true if there was a previous element in the container. -// If Prev() returns true, then previous element's index and value can be retrieved by Index() and Value(). -// Modifies the state of the iterator. -func (iterator *Iterator) Prev() bool { - if iterator.index >= 0 { - iterator.index-- - } - return iterator.heap.withinRange(iterator.index) -} - -// Value returns the current element's value. -// Does not modify the state of the iterator. -func (iterator *Iterator) Value() interface{} { - value, _ := iterator.heap.list.Get(iterator.index) - return value -} - -// Index returns the current element's index. -// Does not modify the state of the iterator. -func (iterator *Iterator) Index() int { - return iterator.index -} - -// Begin resets the iterator to its initial state (one-before-first) -// Call Next() to fetch the first element if any. -func (iterator *Iterator) Begin() { - iterator.index = -1 -} - -// End moves the iterator past the last element (one-past-the-end). -// Call Prev() to fetch the last element if any. -func (iterator *Iterator) End() { - iterator.index = iterator.heap.Size() -} - -// First moves the iterator to the first element and returns true if there was a first element in the container. -// If First() returns true, then first element's index and value can be retrieved by Index() and Value(). -// Modifies the state of the iterator. -func (iterator *Iterator) First() bool { - iterator.Begin() - return iterator.Next() -} - -// Last moves the iterator to the last element and returns true if there was a last element in the container. -// If Last() returns true, then last element's index and value can be retrieved by Index() and Value(). -// Modifies the state of the iterator. -func (iterator *Iterator) Last() bool { - iterator.End() - return iterator.Prev() -} diff --git a/ui/wasm/vendor/github.com/emirpasic/gods/trees/binaryheap/serialization.go b/ui/wasm/vendor/github.com/emirpasic/gods/trees/binaryheap/serialization.go deleted file mode 100644 index 00d0c7719cd..00000000000 --- a/ui/wasm/vendor/github.com/emirpasic/gods/trees/binaryheap/serialization.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright (c) 2015, Emir Pasic. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package binaryheap - -import "github.com/emirpasic/gods/containers" - -func assertSerializationImplementation() { - var _ containers.JSONSerializer = (*Heap)(nil) - var _ containers.JSONDeserializer = (*Heap)(nil) -} - -// ToJSON outputs the JSON representation of the heap. -func (heap *Heap) ToJSON() ([]byte, error) { - return heap.list.ToJSON() -} - -// FromJSON populates the heap from the input JSON representation. -func (heap *Heap) FromJSON(data []byte) error { - return heap.list.FromJSON(data) -} diff --git a/ui/wasm/vendor/github.com/emirpasic/gods/trees/trees.go b/ui/wasm/vendor/github.com/emirpasic/gods/trees/trees.go deleted file mode 100644 index a5a7427d342..00000000000 --- a/ui/wasm/vendor/github.com/emirpasic/gods/trees/trees.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (c) 2015, Emir Pasic. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package trees provides an abstract Tree interface. -// -// In computer science, a tree is a widely used abstract data type (ADT) or data structure implementing this ADT that simulates a hierarchical tree structure, with a root value and subtrees of children with a parent node, represented as a set of linked nodes. -// -// Reference: https://en.wikipedia.org/wiki/Tree_%28data_structure%29 -package trees - -import "github.com/emirpasic/gods/containers" - -// Tree interface that all trees implement -type Tree interface { - containers.Container - // Empty() bool - // Size() int - // Clear() - // Values() []interface{} -} diff --git a/ui/wasm/vendor/github.com/emirpasic/gods/utils/comparator.go b/ui/wasm/vendor/github.com/emirpasic/gods/utils/comparator.go deleted file mode 100644 index 6a9afbf3466..00000000000 --- a/ui/wasm/vendor/github.com/emirpasic/gods/utils/comparator.go +++ /dev/null @@ -1,251 +0,0 @@ -// Copyright (c) 2015, Emir Pasic. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package utils - -import "time" - -// Comparator will make type assertion (see IntComparator for example), -// which will panic if a or b are not of the asserted type. -// -// Should return a number: -// negative , if a < b -// zero , if a == b -// positive , if a > b -type Comparator func(a, b interface{}) int - -// StringComparator provides a fast comparison on strings -func StringComparator(a, b interface{}) int { - s1 := a.(string) - s2 := b.(string) - min := len(s2) - if len(s1) < len(s2) { - min = len(s1) - } - diff := 0 - for i := 0; i < min && diff == 0; i++ { - diff = int(s1[i]) - int(s2[i]) - } - if diff == 0 { - diff = len(s1) - len(s2) - } - if diff < 0 { - return -1 - } - if diff > 0 { - return 1 - } - return 0 -} - -// IntComparator provides a basic comparison on int -func IntComparator(a, b interface{}) int { - aAsserted := a.(int) - bAsserted := b.(int) - switch { - case aAsserted > bAsserted: - return 1 - case aAsserted < bAsserted: - return -1 - default: - return 0 - } -} - -// Int8Comparator provides a basic comparison on int8 -func Int8Comparator(a, b interface{}) int { - aAsserted := a.(int8) - bAsserted := b.(int8) - switch { - case aAsserted > bAsserted: - return 1 - case aAsserted < bAsserted: - return -1 - default: - return 0 - } -} - -// Int16Comparator provides a basic comparison on int16 -func Int16Comparator(a, b interface{}) int { - aAsserted := a.(int16) - bAsserted := b.(int16) - switch { - case aAsserted > bAsserted: - return 1 - case aAsserted < bAsserted: - return -1 - default: - return 0 - } -} - -// Int32Comparator provides a basic comparison on int32 -func Int32Comparator(a, b interface{}) int { - aAsserted := a.(int32) - bAsserted := b.(int32) - switch { - case aAsserted > bAsserted: - return 1 - case aAsserted < bAsserted: - return -1 - default: - return 0 - } -} - -// Int64Comparator provides a basic comparison on int64 -func Int64Comparator(a, b interface{}) int { - aAsserted := a.(int64) - bAsserted := b.(int64) - switch { - case aAsserted > bAsserted: - return 1 - case aAsserted < bAsserted: - return -1 - default: - return 0 - } -} - -// UIntComparator provides a basic comparison on uint -func UIntComparator(a, b interface{}) int { - aAsserted := a.(uint) - bAsserted := b.(uint) - switch { - case aAsserted > bAsserted: - return 1 - case aAsserted < bAsserted: - return -1 - default: - return 0 - } -} - -// UInt8Comparator provides a basic comparison on uint8 -func UInt8Comparator(a, b interface{}) int { - aAsserted := a.(uint8) - bAsserted := b.(uint8) - switch { - case aAsserted > bAsserted: - return 1 - case aAsserted < bAsserted: - return -1 - default: - return 0 - } -} - -// UInt16Comparator provides a basic comparison on uint16 -func UInt16Comparator(a, b interface{}) int { - aAsserted := a.(uint16) - bAsserted := b.(uint16) - switch { - case aAsserted > bAsserted: - return 1 - case aAsserted < bAsserted: - return -1 - default: - return 0 - } -} - -// UInt32Comparator provides a basic comparison on uint32 -func UInt32Comparator(a, b interface{}) int { - aAsserted := a.(uint32) - bAsserted := b.(uint32) - switch { - case aAsserted > bAsserted: - return 1 - case aAsserted < bAsserted: - return -1 - default: - return 0 - } -} - -// UInt64Comparator provides a basic comparison on uint64 -func UInt64Comparator(a, b interface{}) int { - aAsserted := a.(uint64) - bAsserted := b.(uint64) - switch { - case aAsserted > bAsserted: - return 1 - case aAsserted < bAsserted: - return -1 - default: - return 0 - } -} - -// Float32Comparator provides a basic comparison on float32 -func Float32Comparator(a, b interface{}) int { - aAsserted := a.(float32) - bAsserted := b.(float32) - switch { - case aAsserted > bAsserted: - return 1 - case aAsserted < bAsserted: - return -1 - default: - return 0 - } -} - -// Float64Comparator provides a basic comparison on float64 -func Float64Comparator(a, b interface{}) int { - aAsserted := a.(float64) - bAsserted := b.(float64) - switch { - case aAsserted > bAsserted: - return 1 - case aAsserted < bAsserted: - return -1 - default: - return 0 - } -} - -// ByteComparator provides a basic comparison on byte -func ByteComparator(a, b interface{}) int { - aAsserted := a.(byte) - bAsserted := b.(byte) - switch { - case aAsserted > bAsserted: - return 1 - case aAsserted < bAsserted: - return -1 - default: - return 0 - } -} - -// RuneComparator provides a basic comparison on rune -func RuneComparator(a, b interface{}) int { - aAsserted := a.(rune) - bAsserted := b.(rune) - switch { - case aAsserted > bAsserted: - return 1 - case aAsserted < bAsserted: - return -1 - default: - return 0 - } -} - -// TimeComparator provides a basic comparison on time.Time -func TimeComparator(a, b interface{}) int { - aAsserted := a.(time.Time) - bAsserted := b.(time.Time) - - switch { - case aAsserted.After(bAsserted): - return 1 - case aAsserted.Before(bAsserted): - return -1 - default: - return 0 - } -} diff --git a/ui/wasm/vendor/github.com/emirpasic/gods/utils/sort.go b/ui/wasm/vendor/github.com/emirpasic/gods/utils/sort.go deleted file mode 100644 index 79ced1f5d26..00000000000 --- a/ui/wasm/vendor/github.com/emirpasic/gods/utils/sort.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (c) 2015, Emir Pasic. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package utils - -import "sort" - -// Sort sorts values (in-place) with respect to the given comparator. -// -// Uses Go's sort (hybrid of quicksort for large and then insertion sort for smaller slices). -func Sort(values []interface{}, comparator Comparator) { - sort.Sort(sortable{values, comparator}) -} - -type sortable struct { - values []interface{} - comparator Comparator -} - -func (s sortable) Len() int { - return len(s.values) -} -func (s sortable) Swap(i, j int) { - s.values[i], s.values[j] = s.values[j], s.values[i] -} -func (s sortable) Less(i, j int) bool { - return s.comparator(s.values[i], s.values[j]) < 0 -} diff --git a/ui/wasm/vendor/github.com/emirpasic/gods/utils/utils.go b/ui/wasm/vendor/github.com/emirpasic/gods/utils/utils.go deleted file mode 100644 index 1ad49cbc072..00000000000 --- a/ui/wasm/vendor/github.com/emirpasic/gods/utils/utils.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (c) 2015, Emir Pasic. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package utils provides common utility functions. -// -// Provided functionalities: -// - sorting -// - comparators -package utils - -import ( - "fmt" - "strconv" -) - -// ToString converts a value to string. -func ToString(value interface{}) string { - switch value.(type) { - case string: - return value.(string) - case int8: - return strconv.FormatInt(int64(value.(int8)), 10) - case int16: - return strconv.FormatInt(int64(value.(int16)), 10) - case int32: - return strconv.FormatInt(int64(value.(int32)), 10) - case int64: - return strconv.FormatInt(int64(value.(int64)), 10) - case uint8: - return strconv.FormatUint(uint64(value.(uint8)), 10) - case uint16: - return strconv.FormatUint(uint64(value.(uint16)), 10) - case uint32: - return strconv.FormatUint(uint64(value.(uint32)), 10) - case uint64: - return strconv.FormatUint(uint64(value.(uint64)), 10) - case float32: - return strconv.FormatFloat(float64(value.(float32)), 'g', -1, 64) - case float64: - return strconv.FormatFloat(float64(value.(float64)), 'g', -1, 64) - case bool: - return strconv.FormatBool(value.(bool)) - default: - return fmt.Sprintf("%+v", value) - } -} diff --git a/ui/wasm/vendor/github.com/evanphx/json-patch/LICENSE b/ui/wasm/vendor/github.com/evanphx/json-patch/LICENSE deleted file mode 100644 index df76d7d7716..00000000000 --- a/ui/wasm/vendor/github.com/evanphx/json-patch/LICENSE +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2014, Evan Phoenix -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. -* Neither the name of the Evan Phoenix nor the names of its contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/ui/wasm/vendor/github.com/evanphx/json-patch/README.md b/ui/wasm/vendor/github.com/evanphx/json-patch/README.md deleted file mode 100644 index 28e35169375..00000000000 --- a/ui/wasm/vendor/github.com/evanphx/json-patch/README.md +++ /dev/null @@ -1,317 +0,0 @@ -# JSON-Patch -`jsonpatch` is a library which provides functionality for both applying -[RFC6902 JSON patches](http://tools.ietf.org/html/rfc6902) against documents, as -well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ietf.org/html/rfc7396). - -[![GoDoc](https://godoc.org/github.com/evanphx/json-patch?status.svg)](http://godoc.org/github.com/evanphx/json-patch) -[![Build Status](https://travis-ci.org/evanphx/json-patch.svg?branch=master)](https://travis-ci.org/evanphx/json-patch) -[![Report Card](https://goreportcard.com/badge/github.com/evanphx/json-patch)](https://goreportcard.com/report/github.com/evanphx/json-patch) - -# Get It! - -**Latest and greatest**: -```bash -go get -u github.com/evanphx/json-patch/v5 -``` - -**Stable Versions**: -* Version 5: `go get -u gopkg.in/evanphx/json-patch.v5` -* Version 4: `go get -u gopkg.in/evanphx/json-patch.v4` - -(previous versions below `v3` are unavailable) - -# Use It! -* [Create and apply a merge patch](#create-and-apply-a-merge-patch) -* [Create and apply a JSON Patch](#create-and-apply-a-json-patch) -* [Comparing JSON documents](#comparing-json-documents) -* [Combine merge patches](#combine-merge-patches) - - -# Configuration - -* There is a global configuration variable `jsonpatch.SupportNegativeIndices`. - This defaults to `true` and enables the non-standard practice of allowing - negative indices to mean indices starting at the end of an array. This - functionality can be disabled by setting `jsonpatch.SupportNegativeIndices = - false`. - -* There is a global configuration variable `jsonpatch.AccumulatedCopySizeLimit`, - which limits the total size increase in bytes caused by "copy" operations in a - patch. It defaults to 0, which means there is no limit. - -These global variables control the behavior of `jsonpatch.Apply`. - -An alternative to `jsonpatch.Apply` is `jsonpatch.ApplyWithOptions` whose behavior -is controlled by an `options` parameter of type `*jsonpatch.ApplyOptions`. - -Structure `jsonpatch.ApplyOptions` includes the configuration options above -and adds two new options: `AllowMissingPathOnRemove` and `EnsurePathExistsOnAdd`. - -When `AllowMissingPathOnRemove` is set to `true`, `jsonpatch.ApplyWithOptions` will ignore -`remove` operations whose `path` points to a non-existent location in the JSON document. -`AllowMissingPathOnRemove` defaults to `false` which will lead to `jsonpatch.ApplyWithOptions` -returning an error when hitting a missing `path` on `remove`. - -When `EnsurePathExistsOnAdd` is set to `true`, `jsonpatch.ApplyWithOptions` will make sure -that `add` operations produce all the `path` elements that are missing from the target object. - -Use `jsonpatch.NewApplyOptions` to create an instance of `jsonpatch.ApplyOptions` -whose values are populated from the global configuration variables. - -## Create and apply a merge patch -Given both an original JSON document and a modified JSON document, you can create -a [Merge Patch](https://tools.ietf.org/html/rfc7396) document. - -It can describe the changes needed to convert from the original to the -modified JSON document. - -Once you have a merge patch, you can apply it to other JSON documents using the -`jsonpatch.MergePatch(document, patch)` function. - -```go -package main - -import ( - "fmt" - - jsonpatch "github.com/evanphx/json-patch" -) - -func main() { - // Let's create a merge patch from these two documents... - original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) - target := []byte(`{"name": "Jane", "age": 24}`) - - patch, err := jsonpatch.CreateMergePatch(original, target) - if err != nil { - panic(err) - } - - // Now lets apply the patch against a different JSON document... - - alternative := []byte(`{"name": "Tina", "age": 28, "height": 3.75}`) - modifiedAlternative, err := jsonpatch.MergePatch(alternative, patch) - - fmt.Printf("patch document: %s\n", patch) - fmt.Printf("updated alternative doc: %s\n", modifiedAlternative) -} -``` - -When ran, you get the following output: - -```bash -$ go run main.go -patch document: {"height":null,"name":"Jane"} -updated alternative doc: {"age":28,"name":"Jane"} -``` - -## Create and apply a JSON Patch -You can create patch objects using `DecodePatch([]byte)`, which can then -be applied against JSON documents. - -The following is an example of creating a patch from two operations, and -applying it against a JSON document. - -```go -package main - -import ( - "fmt" - - jsonpatch "github.com/evanphx/json-patch" -) - -func main() { - original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) - patchJSON := []byte(`[ - {"op": "replace", "path": "/name", "value": "Jane"}, - {"op": "remove", "path": "/height"} - ]`) - - patch, err := jsonpatch.DecodePatch(patchJSON) - if err != nil { - panic(err) - } - - modified, err := patch.Apply(original) - if err != nil { - panic(err) - } - - fmt.Printf("Original document: %s\n", original) - fmt.Printf("Modified document: %s\n", modified) -} -``` - -When ran, you get the following output: - -```bash -$ go run main.go -Original document: {"name": "John", "age": 24, "height": 3.21} -Modified document: {"age":24,"name":"Jane"} -``` - -## Comparing JSON documents -Due to potential whitespace and ordering differences, one cannot simply compare -JSON strings or byte-arrays directly. - -As such, you can instead use `jsonpatch.Equal(document1, document2)` to -determine if two JSON documents are _structurally_ equal. This ignores -whitespace differences, and key-value ordering. - -```go -package main - -import ( - "fmt" - - jsonpatch "github.com/evanphx/json-patch" -) - -func main() { - original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) - similar := []byte(` - { - "age": 24, - "height": 3.21, - "name": "John" - } - `) - different := []byte(`{"name": "Jane", "age": 20, "height": 3.37}`) - - if jsonpatch.Equal(original, similar) { - fmt.Println(`"original" is structurally equal to "similar"`) - } - - if !jsonpatch.Equal(original, different) { - fmt.Println(`"original" is _not_ structurally equal to "different"`) - } -} -``` - -When ran, you get the following output: -```bash -$ go run main.go -"original" is structurally equal to "similar" -"original" is _not_ structurally equal to "different" -``` - -## Combine merge patches -Given two JSON merge patch documents, it is possible to combine them into a -single merge patch which can describe both set of changes. - -The resulting merge patch can be used such that applying it results in a -document structurally similar as merging each merge patch to the document -in succession. - -```go -package main - -import ( - "fmt" - - jsonpatch "github.com/evanphx/json-patch" -) - -func main() { - original := []byte(`{"name": "John", "age": 24, "height": 3.21}`) - - nameAndHeight := []byte(`{"height":null,"name":"Jane"}`) - ageAndEyes := []byte(`{"age":4.23,"eyes":"blue"}`) - - // Let's combine these merge patch documents... - combinedPatch, err := jsonpatch.MergeMergePatches(nameAndHeight, ageAndEyes) - if err != nil { - panic(err) - } - - // Apply each patch individual against the original document - withoutCombinedPatch, err := jsonpatch.MergePatch(original, nameAndHeight) - if err != nil { - panic(err) - } - - withoutCombinedPatch, err = jsonpatch.MergePatch(withoutCombinedPatch, ageAndEyes) - if err != nil { - panic(err) - } - - // Apply the combined patch against the original document - - withCombinedPatch, err := jsonpatch.MergePatch(original, combinedPatch) - if err != nil { - panic(err) - } - - // Do both result in the same thing? They should! - if jsonpatch.Equal(withCombinedPatch, withoutCombinedPatch) { - fmt.Println("Both JSON documents are structurally the same!") - } - - fmt.Printf("combined merge patch: %s", combinedPatch) -} -``` - -When ran, you get the following output: -```bash -$ go run main.go -Both JSON documents are structurally the same! -combined merge patch: {"age":4.23,"eyes":"blue","height":null,"name":"Jane"} -``` - -# CLI for comparing JSON documents -You can install the commandline program `json-patch`. - -This program can take multiple JSON patch documents as arguments, -and fed a JSON document from `stdin`. It will apply the patch(es) against -the document and output the modified doc. - -**patch.1.json** -```json -[ - {"op": "replace", "path": "/name", "value": "Jane"}, - {"op": "remove", "path": "/height"} -] -``` - -**patch.2.json** -```json -[ - {"op": "add", "path": "/address", "value": "123 Main St"}, - {"op": "replace", "path": "/age", "value": "21"} -] -``` - -**document.json** -```json -{ - "name": "John", - "age": 24, - "height": 3.21 -} -``` - -You can then run: - -```bash -$ go install github.com/evanphx/json-patch/cmd/json-patch -$ cat document.json | json-patch -p patch.1.json -p patch.2.json -{"address":"123 Main St","age":"21","name":"Jane"} -``` - -# Help It! -Contributions are welcomed! Leave [an issue](https://github.com/evanphx/json-patch/issues) -or [create a PR](https://github.com/evanphx/json-patch/compare). - - -Before creating a pull request, we'd ask that you make sure tests are passing -and that you have added new tests when applicable. - -Contributors can run tests using: - -```bash -go test -cover ./... -``` - -Builds for pull requests are tested automatically -using [TravisCI](https://travis-ci.org/evanphx/json-patch). diff --git a/ui/wasm/vendor/github.com/evanphx/json-patch/errors.go b/ui/wasm/vendor/github.com/evanphx/json-patch/errors.go deleted file mode 100644 index 75304b4437c..00000000000 --- a/ui/wasm/vendor/github.com/evanphx/json-patch/errors.go +++ /dev/null @@ -1,38 +0,0 @@ -package jsonpatch - -import "fmt" - -// AccumulatedCopySizeError is an error type returned when the accumulated size -// increase caused by copy operations in a patch operation has exceeded the -// limit. -type AccumulatedCopySizeError struct { - limit int64 - accumulated int64 -} - -// NewAccumulatedCopySizeError returns an AccumulatedCopySizeError. -func NewAccumulatedCopySizeError(l, a int64) *AccumulatedCopySizeError { - return &AccumulatedCopySizeError{limit: l, accumulated: a} -} - -// Error implements the error interface. -func (a *AccumulatedCopySizeError) Error() string { - return fmt.Sprintf("Unable to complete the copy, the accumulated size increase of copy is %d, exceeding the limit %d", a.accumulated, a.limit) -} - -// ArraySizeError is an error type returned when the array size has exceeded -// the limit. -type ArraySizeError struct { - limit int - size int -} - -// NewArraySizeError returns an ArraySizeError. -func NewArraySizeError(l, s int) *ArraySizeError { - return &ArraySizeError{limit: l, size: s} -} - -// Error implements the error interface. -func (a *ArraySizeError) Error() string { - return fmt.Sprintf("Unable to create array of size %d, limit is %d", a.size, a.limit) -} diff --git a/ui/wasm/vendor/github.com/evanphx/json-patch/merge.go b/ui/wasm/vendor/github.com/evanphx/json-patch/merge.go deleted file mode 100644 index ad88d40181c..00000000000 --- a/ui/wasm/vendor/github.com/evanphx/json-patch/merge.go +++ /dev/null @@ -1,389 +0,0 @@ -package jsonpatch - -import ( - "bytes" - "encoding/json" - "fmt" - "reflect" -) - -func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode { - curDoc, err := cur.intoDoc() - - if err != nil { - pruneNulls(patch) - return patch - } - - patchDoc, err := patch.intoDoc() - - if err != nil { - return patch - } - - mergeDocs(curDoc, patchDoc, mergeMerge) - - return cur -} - -func mergeDocs(doc, patch *partialDoc, mergeMerge bool) { - for k, v := range *patch { - if v == nil { - if mergeMerge { - (*doc)[k] = nil - } else { - delete(*doc, k) - } - } else { - cur, ok := (*doc)[k] - - if !ok || cur == nil { - if !mergeMerge { - pruneNulls(v) - } - - (*doc)[k] = v - } else { - (*doc)[k] = merge(cur, v, mergeMerge) - } - } - } -} - -func pruneNulls(n *lazyNode) { - sub, err := n.intoDoc() - - if err == nil { - pruneDocNulls(sub) - } else { - ary, err := n.intoAry() - - if err == nil { - pruneAryNulls(ary) - } - } -} - -func pruneDocNulls(doc *partialDoc) *partialDoc { - for k, v := range *doc { - if v == nil { - delete(*doc, k) - } else { - pruneNulls(v) - } - } - - return doc -} - -func pruneAryNulls(ary *partialArray) *partialArray { - newAry := []*lazyNode{} - - for _, v := range *ary { - if v != nil { - pruneNulls(v) - } - newAry = append(newAry, v) - } - - *ary = newAry - - return ary -} - -var ErrBadJSONDoc = fmt.Errorf("Invalid JSON Document") -var ErrBadJSONPatch = fmt.Errorf("Invalid JSON Patch") -var errBadMergeTypes = fmt.Errorf("Mismatched JSON Documents") - -// MergeMergePatches merges two merge patches together, such that -// applying this resulting merged merge patch to a document yields the same -// as merging each merge patch to the document in succession. -func MergeMergePatches(patch1Data, patch2Data []byte) ([]byte, error) { - return doMergePatch(patch1Data, patch2Data, true) -} - -// MergePatch merges the patchData into the docData. -func MergePatch(docData, patchData []byte) ([]byte, error) { - return doMergePatch(docData, patchData, false) -} - -func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) { - doc := &partialDoc{} - - docErr := json.Unmarshal(docData, doc) - - patch := &partialDoc{} - - patchErr := json.Unmarshal(patchData, patch) - - if _, ok := docErr.(*json.SyntaxError); ok { - return nil, ErrBadJSONDoc - } - - if _, ok := patchErr.(*json.SyntaxError); ok { - return nil, ErrBadJSONPatch - } - - if docErr == nil && *doc == nil { - return nil, ErrBadJSONDoc - } - - if patchErr == nil && *patch == nil { - return nil, ErrBadJSONPatch - } - - if docErr != nil || patchErr != nil { - // Not an error, just not a doc, so we turn straight into the patch - if patchErr == nil { - if mergeMerge { - doc = patch - } else { - doc = pruneDocNulls(patch) - } - } else { - patchAry := &partialArray{} - patchErr = json.Unmarshal(patchData, patchAry) - - if patchErr != nil { - return nil, ErrBadJSONPatch - } - - pruneAryNulls(patchAry) - - out, patchErr := json.Marshal(patchAry) - - if patchErr != nil { - return nil, ErrBadJSONPatch - } - - return out, nil - } - } else { - mergeDocs(doc, patch, mergeMerge) - } - - return json.Marshal(doc) -} - -// resemblesJSONArray indicates whether the byte-slice "appears" to be -// a JSON array or not. -// False-positives are possible, as this function does not check the internal -// structure of the array. It only checks that the outer syntax is present and -// correct. -func resemblesJSONArray(input []byte) bool { - input = bytes.TrimSpace(input) - - hasPrefix := bytes.HasPrefix(input, []byte("[")) - hasSuffix := bytes.HasSuffix(input, []byte("]")) - - return hasPrefix && hasSuffix -} - -// CreateMergePatch will return a merge patch document capable of converting -// the original document(s) to the modified document(s). -// The parameters can be bytes of either two JSON Documents, or two arrays of -// JSON documents. -// The merge patch returned follows the specification defined at http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07 -func CreateMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { - originalResemblesArray := resemblesJSONArray(originalJSON) - modifiedResemblesArray := resemblesJSONArray(modifiedJSON) - - // Do both byte-slices seem like JSON arrays? - if originalResemblesArray && modifiedResemblesArray { - return createArrayMergePatch(originalJSON, modifiedJSON) - } - - // Are both byte-slices are not arrays? Then they are likely JSON objects... - if !originalResemblesArray && !modifiedResemblesArray { - return createObjectMergePatch(originalJSON, modifiedJSON) - } - - // None of the above? Then return an error because of mismatched types. - return nil, errBadMergeTypes -} - -// createObjectMergePatch will return a merge-patch document capable of -// converting the original document to the modified document. -func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { - originalDoc := map[string]interface{}{} - modifiedDoc := map[string]interface{}{} - - err := json.Unmarshal(originalJSON, &originalDoc) - if err != nil { - return nil, ErrBadJSONDoc - } - - err = json.Unmarshal(modifiedJSON, &modifiedDoc) - if err != nil { - return nil, ErrBadJSONDoc - } - - dest, err := getDiff(originalDoc, modifiedDoc) - if err != nil { - return nil, err - } - - return json.Marshal(dest) -} - -// createArrayMergePatch will return an array of merge-patch documents capable -// of converting the original document to the modified document for each -// pair of JSON documents provided in the arrays. -// Arrays of mismatched sizes will result in an error. -func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { - originalDocs := []json.RawMessage{} - modifiedDocs := []json.RawMessage{} - - err := json.Unmarshal(originalJSON, &originalDocs) - if err != nil { - return nil, ErrBadJSONDoc - } - - err = json.Unmarshal(modifiedJSON, &modifiedDocs) - if err != nil { - return nil, ErrBadJSONDoc - } - - total := len(originalDocs) - if len(modifiedDocs) != total { - return nil, ErrBadJSONDoc - } - - result := []json.RawMessage{} - for i := 0; i < len(originalDocs); i++ { - original := originalDocs[i] - modified := modifiedDocs[i] - - patch, err := createObjectMergePatch(original, modified) - if err != nil { - return nil, err - } - - result = append(result, json.RawMessage(patch)) - } - - return json.Marshal(result) -} - -// Returns true if the array matches (must be json types). -// As is idiomatic for go, an empty array is not the same as a nil array. -func matchesArray(a, b []interface{}) bool { - if len(a) != len(b) { - return false - } - if (a == nil && b != nil) || (a != nil && b == nil) { - return false - } - for i := range a { - if !matchesValue(a[i], b[i]) { - return false - } - } - return true -} - -// Returns true if the values matches (must be json types) -// The types of the values must match, otherwise it will always return false -// If two map[string]interface{} are given, all elements must match. -func matchesValue(av, bv interface{}) bool { - if reflect.TypeOf(av) != reflect.TypeOf(bv) { - return false - } - switch at := av.(type) { - case string: - bt := bv.(string) - if bt == at { - return true - } - case float64: - bt := bv.(float64) - if bt == at { - return true - } - case bool: - bt := bv.(bool) - if bt == at { - return true - } - case nil: - // Both nil, fine. - return true - case map[string]interface{}: - bt := bv.(map[string]interface{}) - if len(bt) != len(at) { - return false - } - for key := range bt { - av, aOK := at[key] - bv, bOK := bt[key] - if aOK != bOK { - return false - } - if !matchesValue(av, bv) { - return false - } - } - return true - case []interface{}: - bt := bv.([]interface{}) - return matchesArray(at, bt) - } - return false -} - -// getDiff returns the (recursive) difference between a and b as a map[string]interface{}. -func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) { - into := map[string]interface{}{} - for key, bv := range b { - av, ok := a[key] - // value was added - if !ok { - into[key] = bv - continue - } - // If types have changed, replace completely - if reflect.TypeOf(av) != reflect.TypeOf(bv) { - into[key] = bv - continue - } - // Types are the same, compare values - switch at := av.(type) { - case map[string]interface{}: - bt := bv.(map[string]interface{}) - dst := make(map[string]interface{}, len(bt)) - dst, err := getDiff(at, bt) - if err != nil { - return nil, err - } - if len(dst) > 0 { - into[key] = dst - } - case string, float64, bool: - if !matchesValue(av, bv) { - into[key] = bv - } - case []interface{}: - bt := bv.([]interface{}) - if !matchesArray(at, bt) { - into[key] = bv - } - case nil: - switch bv.(type) { - case nil: - // Both nil, fine. - default: - into[key] = bv - } - default: - panic(fmt.Sprintf("Unknown type:%T in key %s", av, key)) - } - } - // Now add all deleted values as nil - for key := range a { - _, found := b[key] - if !found { - into[key] = nil - } - } - return into, nil -} diff --git a/ui/wasm/vendor/github.com/evanphx/json-patch/patch.go b/ui/wasm/vendor/github.com/evanphx/json-patch/patch.go deleted file mode 100644 index 18298549076..00000000000 --- a/ui/wasm/vendor/github.com/evanphx/json-patch/patch.go +++ /dev/null @@ -1,788 +0,0 @@ -package jsonpatch - -import ( - "bytes" - "encoding/json" - "fmt" - "strconv" - "strings" - - "github.com/pkg/errors" -) - -const ( - eRaw = iota - eDoc - eAry -) - -var ( - // SupportNegativeIndices decides whether to support non-standard practice of - // allowing negative indices to mean indices starting at the end of an array. - // Default to true. - SupportNegativeIndices bool = true - // AccumulatedCopySizeLimit limits the total size increase in bytes caused by - // "copy" operations in a patch. - AccumulatedCopySizeLimit int64 = 0 -) - -var ( - ErrTestFailed = errors.New("test failed") - ErrMissing = errors.New("missing value") - ErrUnknownType = errors.New("unknown object type") - ErrInvalid = errors.New("invalid state detected") - ErrInvalidIndex = errors.New("invalid index referenced") -) - -type lazyNode struct { - raw *json.RawMessage - doc partialDoc - ary partialArray - which int -} - -// Operation is a single JSON-Patch step, such as a single 'add' operation. -type Operation map[string]*json.RawMessage - -// Patch is an ordered collection of Operations. -type Patch []Operation - -type partialDoc map[string]*lazyNode -type partialArray []*lazyNode - -type container interface { - get(key string) (*lazyNode, error) - set(key string, val *lazyNode) error - add(key string, val *lazyNode) error - remove(key string) error -} - -func newLazyNode(raw *json.RawMessage) *lazyNode { - return &lazyNode{raw: raw, doc: nil, ary: nil, which: eRaw} -} - -func (n *lazyNode) MarshalJSON() ([]byte, error) { - switch n.which { - case eRaw: - return json.Marshal(n.raw) - case eDoc: - return json.Marshal(n.doc) - case eAry: - return json.Marshal(n.ary) - default: - return nil, ErrUnknownType - } -} - -func (n *lazyNode) UnmarshalJSON(data []byte) error { - dest := make(json.RawMessage, len(data)) - copy(dest, data) - n.raw = &dest - n.which = eRaw - return nil -} - -func deepCopy(src *lazyNode) (*lazyNode, int, error) { - if src == nil { - return nil, 0, nil - } - a, err := src.MarshalJSON() - if err != nil { - return nil, 0, err - } - sz := len(a) - ra := make(json.RawMessage, sz) - copy(ra, a) - return newLazyNode(&ra), sz, nil -} - -func (n *lazyNode) intoDoc() (*partialDoc, error) { - if n.which == eDoc { - return &n.doc, nil - } - - if n.raw == nil { - return nil, ErrInvalid - } - - err := json.Unmarshal(*n.raw, &n.doc) - - if err != nil { - return nil, err - } - - n.which = eDoc - return &n.doc, nil -} - -func (n *lazyNode) intoAry() (*partialArray, error) { - if n.which == eAry { - return &n.ary, nil - } - - if n.raw == nil { - return nil, ErrInvalid - } - - err := json.Unmarshal(*n.raw, &n.ary) - - if err != nil { - return nil, err - } - - n.which = eAry - return &n.ary, nil -} - -func (n *lazyNode) compact() []byte { - buf := &bytes.Buffer{} - - if n.raw == nil { - return nil - } - - err := json.Compact(buf, *n.raw) - - if err != nil { - return *n.raw - } - - return buf.Bytes() -} - -func (n *lazyNode) tryDoc() bool { - if n.raw == nil { - return false - } - - err := json.Unmarshal(*n.raw, &n.doc) - - if err != nil { - return false - } - - n.which = eDoc - return true -} - -func (n *lazyNode) tryAry() bool { - if n.raw == nil { - return false - } - - err := json.Unmarshal(*n.raw, &n.ary) - - if err != nil { - return false - } - - n.which = eAry - return true -} - -func (n *lazyNode) equal(o *lazyNode) bool { - if n.which == eRaw { - if !n.tryDoc() && !n.tryAry() { - if o.which != eRaw { - return false - } - - return bytes.Equal(n.compact(), o.compact()) - } - } - - if n.which == eDoc { - if o.which == eRaw { - if !o.tryDoc() { - return false - } - } - - if o.which != eDoc { - return false - } - - if len(n.doc) != len(o.doc) { - return false - } - - for k, v := range n.doc { - ov, ok := o.doc[k] - - if !ok { - return false - } - - if (v == nil) != (ov == nil) { - return false - } - - if v == nil && ov == nil { - continue - } - - if !v.equal(ov) { - return false - } - } - - return true - } - - if o.which != eAry && !o.tryAry() { - return false - } - - if len(n.ary) != len(o.ary) { - return false - } - - for idx, val := range n.ary { - if !val.equal(o.ary[idx]) { - return false - } - } - - return true -} - -// Kind reads the "op" field of the Operation. -func (o Operation) Kind() string { - if obj, ok := o["op"]; ok && obj != nil { - var op string - - err := json.Unmarshal(*obj, &op) - - if err != nil { - return "unknown" - } - - return op - } - - return "unknown" -} - -// Path reads the "path" field of the Operation. -func (o Operation) Path() (string, error) { - if obj, ok := o["path"]; ok && obj != nil { - var op string - - err := json.Unmarshal(*obj, &op) - - if err != nil { - return "unknown", err - } - - return op, nil - } - - return "unknown", errors.Wrapf(ErrMissing, "operation missing path field") -} - -// From reads the "from" field of the Operation. -func (o Operation) From() (string, error) { - if obj, ok := o["from"]; ok && obj != nil { - var op string - - err := json.Unmarshal(*obj, &op) - - if err != nil { - return "unknown", err - } - - return op, nil - } - - return "unknown", errors.Wrapf(ErrMissing, "operation, missing from field") -} - -func (o Operation) value() *lazyNode { - if obj, ok := o["value"]; ok { - return newLazyNode(obj) - } - - return nil -} - -// ValueInterface decodes the operation value into an interface. -func (o Operation) ValueInterface() (interface{}, error) { - if obj, ok := o["value"]; ok && obj != nil { - var v interface{} - - err := json.Unmarshal(*obj, &v) - - if err != nil { - return nil, err - } - - return v, nil - } - - return nil, errors.Wrapf(ErrMissing, "operation, missing value field") -} - -func isArray(buf []byte) bool { -Loop: - for _, c := range buf { - switch c { - case ' ': - case '\n': - case '\t': - continue - case '[': - return true - default: - break Loop - } - } - - return false -} - -func findObject(pd *container, path string) (container, string) { - doc := *pd - - split := strings.Split(path, "/") - - if len(split) < 2 { - return nil, "" - } - - parts := split[1 : len(split)-1] - - key := split[len(split)-1] - - var err error - - for _, part := range parts { - - next, ok := doc.get(decodePatchKey(part)) - - if next == nil || ok != nil { - return nil, "" - } - - if isArray(*next.raw) { - doc, err = next.intoAry() - - if err != nil { - return nil, "" - } - } else { - doc, err = next.intoDoc() - - if err != nil { - return nil, "" - } - } - } - - return doc, decodePatchKey(key) -} - -func (d *partialDoc) set(key string, val *lazyNode) error { - (*d)[key] = val - return nil -} - -func (d *partialDoc) add(key string, val *lazyNode) error { - (*d)[key] = val - return nil -} - -func (d *partialDoc) get(key string) (*lazyNode, error) { - return (*d)[key], nil -} - -func (d *partialDoc) remove(key string) error { - _, ok := (*d)[key] - if !ok { - return errors.Wrapf(ErrMissing, "Unable to remove nonexistent key: %s", key) - } - - delete(*d, key) - return nil -} - -// set should only be used to implement the "replace" operation, so "key" must -// be an already existing index in "d". -func (d *partialArray) set(key string, val *lazyNode) error { - idx, err := strconv.Atoi(key) - if err != nil { - return err - } - (*d)[idx] = val - return nil -} - -func (d *partialArray) add(key string, val *lazyNode) error { - if key == "-" { - *d = append(*d, val) - return nil - } - - idx, err := strconv.Atoi(key) - if err != nil { - return errors.Wrapf(err, "value was not a proper array index: '%s'", key) - } - - sz := len(*d) + 1 - - ary := make([]*lazyNode, sz) - - cur := *d - - if idx >= len(ary) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - - if idx < 0 { - if !SupportNegativeIndices { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - if idx < -len(ary) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - idx += len(ary) - } - - copy(ary[0:idx], cur[0:idx]) - ary[idx] = val - copy(ary[idx+1:], cur[idx:]) - - *d = ary - return nil -} - -func (d *partialArray) get(key string) (*lazyNode, error) { - idx, err := strconv.Atoi(key) - - if err != nil { - return nil, err - } - - if idx >= len(*d) { - return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - - return (*d)[idx], nil -} - -func (d *partialArray) remove(key string) error { - idx, err := strconv.Atoi(key) - if err != nil { - return err - } - - cur := *d - - if idx >= len(cur) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - - if idx < 0 { - if !SupportNegativeIndices { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - if idx < -len(cur) { - return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) - } - idx += len(cur) - } - - ary := make([]*lazyNode, len(cur)-1) - - copy(ary[0:idx], cur[0:idx]) - copy(ary[idx:], cur[idx+1:]) - - *d = ary - return nil - -} - -func (p Patch) add(doc *container, op Operation) error { - path, err := op.Path() - if err != nil { - return errors.Wrapf(ErrMissing, "add operation failed to decode path") - } - - con, key := findObject(doc, path) - - if con == nil { - return errors.Wrapf(ErrMissing, "add operation does not apply: doc is missing path: \"%s\"", path) - } - - err = con.add(key, op.value()) - if err != nil { - return errors.Wrapf(err, "error in add for path: '%s'", path) - } - - return nil -} - -func (p Patch) remove(doc *container, op Operation) error { - path, err := op.Path() - if err != nil { - return errors.Wrapf(ErrMissing, "remove operation failed to decode path") - } - - con, key := findObject(doc, path) - - if con == nil { - return errors.Wrapf(ErrMissing, "remove operation does not apply: doc is missing path: \"%s\"", path) - } - - err = con.remove(key) - if err != nil { - return errors.Wrapf(err, "error in remove for path: '%s'", path) - } - - return nil -} - -func (p Patch) replace(doc *container, op Operation) error { - path, err := op.Path() - if err != nil { - return errors.Wrapf(err, "replace operation failed to decode path") - } - - con, key := findObject(doc, path) - - if con == nil { - return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing path: %s", path) - } - - _, ok := con.get(key) - if ok != nil { - return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing key: %s", path) - } - - err = con.set(key, op.value()) - if err != nil { - return errors.Wrapf(err, "error in remove for path: '%s'", path) - } - - return nil -} - -func (p Patch) move(doc *container, op Operation) error { - from, err := op.From() - if err != nil { - return errors.Wrapf(err, "move operation failed to decode from") - } - - con, key := findObject(doc, from) - - if con == nil { - return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing from path: %s", from) - } - - val, err := con.get(key) - if err != nil { - return errors.Wrapf(err, "error in move for path: '%s'", key) - } - - err = con.remove(key) - if err != nil { - return errors.Wrapf(err, "error in move for path: '%s'", key) - } - - path, err := op.Path() - if err != nil { - return errors.Wrapf(err, "move operation failed to decode path") - } - - con, key = findObject(doc, path) - - if con == nil { - return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing destination path: %s", path) - } - - err = con.add(key, val) - if err != nil { - return errors.Wrapf(err, "error in move for path: '%s'", path) - } - - return nil -} - -func (p Patch) test(doc *container, op Operation) error { - path, err := op.Path() - if err != nil { - return errors.Wrapf(err, "test operation failed to decode path") - } - - con, key := findObject(doc, path) - - if con == nil { - return errors.Wrapf(ErrMissing, "test operation does not apply: is missing path: %s", path) - } - - val, err := con.get(key) - if err != nil { - return errors.Wrapf(err, "error in test for path: '%s'", path) - } - - if val == nil { - if op.value().raw == nil { - return nil - } - return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) - } else if op.value() == nil { - return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) - } - - if val.equal(op.value()) { - return nil - } - - return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) -} - -func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) error { - from, err := op.From() - if err != nil { - return errors.Wrapf(err, "copy operation failed to decode from") - } - - con, key := findObject(doc, from) - - if con == nil { - return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing from path: %s", from) - } - - val, err := con.get(key) - if err != nil { - return errors.Wrapf(err, "error in copy for from: '%s'", from) - } - - path, err := op.Path() - if err != nil { - return errors.Wrapf(ErrMissing, "copy operation failed to decode path") - } - - con, key = findObject(doc, path) - - if con == nil { - return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing destination path: %s", path) - } - - valCopy, sz, err := deepCopy(val) - if err != nil { - return errors.Wrapf(err, "error while performing deep copy") - } - - (*accumulatedCopySize) += int64(sz) - if AccumulatedCopySizeLimit > 0 && *accumulatedCopySize > AccumulatedCopySizeLimit { - return NewAccumulatedCopySizeError(AccumulatedCopySizeLimit, *accumulatedCopySize) - } - - err = con.add(key, valCopy) - if err != nil { - return errors.Wrapf(err, "error while adding value during copy") - } - - return nil -} - -// Equal indicates if 2 JSON documents have the same structural equality. -func Equal(a, b []byte) bool { - ra := make(json.RawMessage, len(a)) - copy(ra, a) - la := newLazyNode(&ra) - - rb := make(json.RawMessage, len(b)) - copy(rb, b) - lb := newLazyNode(&rb) - - return la.equal(lb) -} - -// DecodePatch decodes the passed JSON document as an RFC 6902 patch. -func DecodePatch(buf []byte) (Patch, error) { - var p Patch - - err := json.Unmarshal(buf, &p) - - if err != nil { - return nil, err - } - - return p, nil -} - -// Apply mutates a JSON document according to the patch, and returns the new -// document. -func (p Patch) Apply(doc []byte) ([]byte, error) { - return p.ApplyIndent(doc, "") -} - -// ApplyIndent mutates a JSON document according to the patch, and returns the new -// document indented. -func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) { - if len(doc) == 0 { - return doc, nil - } - - var pd container - if doc[0] == '[' { - pd = &partialArray{} - } else { - pd = &partialDoc{} - } - - err := json.Unmarshal(doc, pd) - - if err != nil { - return nil, err - } - - err = nil - - var accumulatedCopySize int64 - - for _, op := range p { - switch op.Kind() { - case "add": - err = p.add(&pd, op) - case "remove": - err = p.remove(&pd, op) - case "replace": - err = p.replace(&pd, op) - case "move": - err = p.move(&pd, op) - case "test": - err = p.test(&pd, op) - case "copy": - err = p.copy(&pd, op, &accumulatedCopySize) - default: - err = fmt.Errorf("Unexpected kind: %s", op.Kind()) - } - - if err != nil { - return nil, err - } - } - - if indent != "" { - return json.MarshalIndent(pd, "", indent) - } - - return json.Marshal(pd) -} - -// From http://tools.ietf.org/html/rfc6901#section-4 : -// -// Evaluation of each reference token begins by decoding any escaped -// character sequence. This is performed by first transforming any -// occurrence of the sequence '~1' to '/', and then transforming any -// occurrence of the sequence '~0' to '~'. - -var ( - rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~") -) - -func decodePatchKey(k string) string { - return rfc6901Decoder.Replace(k) -} diff --git a/ui/wasm/vendor/github.com/fatih/color/.travis.yml b/ui/wasm/vendor/github.com/fatih/color/.travis.yml deleted file mode 100644 index 95f8a1ff5c7..00000000000 --- a/ui/wasm/vendor/github.com/fatih/color/.travis.yml +++ /dev/null @@ -1,5 +0,0 @@ -language: go -go: - - 1.8.x - - tip - diff --git a/ui/wasm/vendor/github.com/fatih/color/Gopkg.lock b/ui/wasm/vendor/github.com/fatih/color/Gopkg.lock deleted file mode 100644 index 7d879e9caf0..00000000000 --- a/ui/wasm/vendor/github.com/fatih/color/Gopkg.lock +++ /dev/null @@ -1,27 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - name = "github.com/mattn/go-colorable" - packages = ["."] - revision = "167de6bfdfba052fa6b2d3664c8f5272e23c9072" - version = "v0.0.9" - -[[projects]] - name = "github.com/mattn/go-isatty" - packages = ["."] - revision = "0360b2af4f38e8d38c7fce2a9f4e702702d73a39" - version = "v0.0.3" - -[[projects]] - branch = "master" - name = "golang.org/x/sys" - packages = ["unix"] - revision = "37707fdb30a5b38865cfb95e5aab41707daec7fd" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - inputs-digest = "e8a50671c3cb93ea935bf210b1cd20702876b9d9226129be581ef646d1565cdc" - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/ui/wasm/vendor/github.com/fatih/color/Gopkg.toml b/ui/wasm/vendor/github.com/fatih/color/Gopkg.toml deleted file mode 100644 index ff1617f71da..00000000000 --- a/ui/wasm/vendor/github.com/fatih/color/Gopkg.toml +++ /dev/null @@ -1,30 +0,0 @@ - -# Gopkg.toml example -# -# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" - - -[[constraint]] - name = "github.com/mattn/go-colorable" - version = "0.0.9" - -[[constraint]] - name = "github.com/mattn/go-isatty" - version = "0.0.3" diff --git a/ui/wasm/vendor/github.com/fatih/color/LICENSE.md b/ui/wasm/vendor/github.com/fatih/color/LICENSE.md deleted file mode 100644 index 25fdaf639df..00000000000 --- a/ui/wasm/vendor/github.com/fatih/color/LICENSE.md +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Fatih Arslan - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/ui/wasm/vendor/github.com/fatih/color/README.md b/ui/wasm/vendor/github.com/fatih/color/README.md deleted file mode 100644 index 3fc95446028..00000000000 --- a/ui/wasm/vendor/github.com/fatih/color/README.md +++ /dev/null @@ -1,179 +0,0 @@ -# Color [![GoDoc](https://godoc.org/github.com/fatih/color?status.svg)](https://godoc.org/github.com/fatih/color) [![Build Status](https://img.shields.io/travis/fatih/color.svg?style=flat-square)](https://travis-ci.org/fatih/color) - - - -Color lets you use colorized outputs in terms of [ANSI Escape -Codes](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) in Go (Golang). It -has support for Windows too! The API can be used in several ways, pick one that -suits you. - - -![Color](https://i.imgur.com/c1JI0lA.png) - - -## Install - -```bash -go get github.com/fatih/color -``` - -Note that the `vendor` folder is here for stability. Remove the folder if you -already have the dependencies in your GOPATH. - -## Examples - -### Standard colors - -```go -// Print with default helper functions -color.Cyan("Prints text in cyan.") - -// A newline will be appended automatically -color.Blue("Prints %s in blue.", "text") - -// These are using the default foreground colors -color.Red("We have red") -color.Magenta("And many others ..") - -``` - -### Mix and reuse colors - -```go -// Create a new color object -c := color.New(color.FgCyan).Add(color.Underline) -c.Println("Prints cyan text with an underline.") - -// Or just add them to New() -d := color.New(color.FgCyan, color.Bold) -d.Printf("This prints bold cyan %s\n", "too!.") - -// Mix up foreground and background colors, create new mixes! -red := color.New(color.FgRed) - -boldRed := red.Add(color.Bold) -boldRed.Println("This will print text in bold red.") - -whiteBackground := red.Add(color.BgWhite) -whiteBackground.Println("Red text with white background.") -``` - -### Use your own output (io.Writer) - -```go -// Use your own io.Writer output -color.New(color.FgBlue).Fprintln(myWriter, "blue color!") - -blue := color.New(color.FgBlue) -blue.Fprint(writer, "This will print text in blue.") -``` - -### Custom print functions (PrintFunc) - -```go -// Create a custom print function for convenience -red := color.New(color.FgRed).PrintfFunc() -red("Warning") -red("Error: %s", err) - -// Mix up multiple attributes -notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() -notice("Don't forget this...") -``` - -### Custom fprint functions (FprintFunc) - -```go -blue := color.New(FgBlue).FprintfFunc() -blue(myWriter, "important notice: %s", stars) - -// Mix up with multiple attributes -success := color.New(color.Bold, color.FgGreen).FprintlnFunc() -success(myWriter, "Don't forget this...") -``` - -### Insert into noncolor strings (SprintFunc) - -```go -// Create SprintXxx functions to mix strings with other non-colorized strings: -yellow := color.New(color.FgYellow).SprintFunc() -red := color.New(color.FgRed).SprintFunc() -fmt.Printf("This is a %s and this is %s.\n", yellow("warning"), red("error")) - -info := color.New(color.FgWhite, color.BgGreen).SprintFunc() -fmt.Printf("This %s rocks!\n", info("package")) - -// Use helper functions -fmt.Println("This", color.RedString("warning"), "should be not neglected.") -fmt.Printf("%v %v\n", color.GreenString("Info:"), "an important message.") - -// Windows supported too! Just don't forget to change the output to color.Output -fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) -``` - -### Plug into existing code - -```go -// Use handy standard colors -color.Set(color.FgYellow) - -fmt.Println("Existing text will now be in yellow") -fmt.Printf("This one %s\n", "too") - -color.Unset() // Don't forget to unset - -// You can mix up parameters -color.Set(color.FgMagenta, color.Bold) -defer color.Unset() // Use it in your function - -fmt.Println("All text will now be bold magenta.") -``` - -### Disable/Enable color - -There might be a case where you want to explicitly disable/enable color output. the -`go-isatty` package will automatically disable color output for non-tty output streams -(for example if the output were piped directly to `less`) - -`Color` has support to disable/enable colors both globally and for single color -definitions. For example suppose you have a CLI app and a `--no-color` bool flag. You -can easily disable the color output with: - -```go - -var flagNoColor = flag.Bool("no-color", false, "Disable color output") - -if *flagNoColor { - color.NoColor = true // disables colorized output -} -``` - -It also has support for single color definitions (local). You can -disable/enable color output on the fly: - -```go -c := color.New(color.FgCyan) -c.Println("Prints cyan text") - -c.DisableColor() -c.Println("This is printed without any color") - -c.EnableColor() -c.Println("This prints again cyan...") -``` - -## Todo - -* Save/Return previous values -* Evaluate fmt.Formatter interface - - -## Credits - - * [Fatih Arslan](https://github.com/fatih) - * Windows support via @mattn: [colorable](https://github.com/mattn/go-colorable) - -## License - -The MIT License (MIT) - see [`LICENSE.md`](https://github.com/fatih/color/blob/master/LICENSE.md) for more details - diff --git a/ui/wasm/vendor/github.com/fatih/color/color.go b/ui/wasm/vendor/github.com/fatih/color/color.go deleted file mode 100644 index 91c8e9f0620..00000000000 --- a/ui/wasm/vendor/github.com/fatih/color/color.go +++ /dev/null @@ -1,603 +0,0 @@ -package color - -import ( - "fmt" - "io" - "os" - "strconv" - "strings" - "sync" - - "github.com/mattn/go-colorable" - "github.com/mattn/go-isatty" -) - -var ( - // NoColor defines if the output is colorized or not. It's dynamically set to - // false or true based on the stdout's file descriptor referring to a terminal - // or not. This is a global option and affects all colors. For more control - // over each color block use the methods DisableColor() individually. - NoColor = os.Getenv("TERM") == "dumb" || - (!isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd())) - - // Output defines the standard output of the print functions. By default - // os.Stdout is used. - Output = colorable.NewColorableStdout() - - // Error defines a color supporting writer for os.Stderr. - Error = colorable.NewColorableStderr() - - // colorsCache is used to reduce the count of created Color objects and - // allows to reuse already created objects with required Attribute. - colorsCache = make(map[Attribute]*Color) - colorsCacheMu sync.Mutex // protects colorsCache -) - -// Color defines a custom color object which is defined by SGR parameters. -type Color struct { - params []Attribute - noColor *bool -} - -// Attribute defines a single SGR Code -type Attribute int - -const escape = "\x1b" - -// Base attributes -const ( - Reset Attribute = iota - Bold - Faint - Italic - Underline - BlinkSlow - BlinkRapid - ReverseVideo - Concealed - CrossedOut -) - -// Foreground text colors -const ( - FgBlack Attribute = iota + 30 - FgRed - FgGreen - FgYellow - FgBlue - FgMagenta - FgCyan - FgWhite -) - -// Foreground Hi-Intensity text colors -const ( - FgHiBlack Attribute = iota + 90 - FgHiRed - FgHiGreen - FgHiYellow - FgHiBlue - FgHiMagenta - FgHiCyan - FgHiWhite -) - -// Background text colors -const ( - BgBlack Attribute = iota + 40 - BgRed - BgGreen - BgYellow - BgBlue - BgMagenta - BgCyan - BgWhite -) - -// Background Hi-Intensity text colors -const ( - BgHiBlack Attribute = iota + 100 - BgHiRed - BgHiGreen - BgHiYellow - BgHiBlue - BgHiMagenta - BgHiCyan - BgHiWhite -) - -// New returns a newly created color object. -func New(value ...Attribute) *Color { - c := &Color{params: make([]Attribute, 0)} - c.Add(value...) - return c -} - -// Set sets the given parameters immediately. It will change the color of -// output with the given SGR parameters until color.Unset() is called. -func Set(p ...Attribute) *Color { - c := New(p...) - c.Set() - return c -} - -// Unset resets all escape attributes and clears the output. Usually should -// be called after Set(). -func Unset() { - if NoColor { - return - } - - fmt.Fprintf(Output, "%s[%dm", escape, Reset) -} - -// Set sets the SGR sequence. -func (c *Color) Set() *Color { - if c.isNoColorSet() { - return c - } - - fmt.Fprintf(Output, c.format()) - return c -} - -func (c *Color) unset() { - if c.isNoColorSet() { - return - } - - Unset() -} - -func (c *Color) setWriter(w io.Writer) *Color { - if c.isNoColorSet() { - return c - } - - fmt.Fprintf(w, c.format()) - return c -} - -func (c *Color) unsetWriter(w io.Writer) { - if c.isNoColorSet() { - return - } - - if NoColor { - return - } - - fmt.Fprintf(w, "%s[%dm", escape, Reset) -} - -// Add is used to chain SGR parameters. Use as many as parameters to combine -// and create custom color objects. Example: Add(color.FgRed, color.Underline). -func (c *Color) Add(value ...Attribute) *Color { - c.params = append(c.params, value...) - return c -} - -func (c *Color) prepend(value Attribute) { - c.params = append(c.params, 0) - copy(c.params[1:], c.params[0:]) - c.params[0] = value -} - -// Fprint formats using the default formats for its operands and writes to w. -// Spaces are added between operands when neither is a string. -// It returns the number of bytes written and any write error encountered. -// On Windows, users should wrap w with colorable.NewColorable() if w is of -// type *os.File. -func (c *Color) Fprint(w io.Writer, a ...interface{}) (n int, err error) { - c.setWriter(w) - defer c.unsetWriter(w) - - return fmt.Fprint(w, a...) -} - -// Print formats using the default formats for its operands and writes to -// standard output. Spaces are added between operands when neither is a -// string. It returns the number of bytes written and any write error -// encountered. This is the standard fmt.Print() method wrapped with the given -// color. -func (c *Color) Print(a ...interface{}) (n int, err error) { - c.Set() - defer c.unset() - - return fmt.Fprint(Output, a...) -} - -// Fprintf formats according to a format specifier and writes to w. -// It returns the number of bytes written and any write error encountered. -// On Windows, users should wrap w with colorable.NewColorable() if w is of -// type *os.File. -func (c *Color) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - c.setWriter(w) - defer c.unsetWriter(w) - - return fmt.Fprintf(w, format, a...) -} - -// Printf formats according to a format specifier and writes to standard output. -// It returns the number of bytes written and any write error encountered. -// This is the standard fmt.Printf() method wrapped with the given color. -func (c *Color) Printf(format string, a ...interface{}) (n int, err error) { - c.Set() - defer c.unset() - - return fmt.Fprintf(Output, format, a...) -} - -// Fprintln formats using the default formats for its operands and writes to w. -// Spaces are always added between operands and a newline is appended. -// On Windows, users should wrap w with colorable.NewColorable() if w is of -// type *os.File. -func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - c.setWriter(w) - defer c.unsetWriter(w) - - return fmt.Fprintln(w, a...) -} - -// Println formats using the default formats for its operands and writes to -// standard output. Spaces are always added between operands and a newline is -// appended. It returns the number of bytes written and any write error -// encountered. This is the standard fmt.Print() method wrapped with the given -// color. -func (c *Color) Println(a ...interface{}) (n int, err error) { - c.Set() - defer c.unset() - - return fmt.Fprintln(Output, a...) -} - -// Sprint is just like Print, but returns a string instead of printing it. -func (c *Color) Sprint(a ...interface{}) string { - return c.wrap(fmt.Sprint(a...)) -} - -// Sprintln is just like Println, but returns a string instead of printing it. -func (c *Color) Sprintln(a ...interface{}) string { - return c.wrap(fmt.Sprintln(a...)) -} - -// Sprintf is just like Printf, but returns a string instead of printing it. -func (c *Color) Sprintf(format string, a ...interface{}) string { - return c.wrap(fmt.Sprintf(format, a...)) -} - -// FprintFunc returns a new function that prints the passed arguments as -// colorized with color.Fprint(). -func (c *Color) FprintFunc() func(w io.Writer, a ...interface{}) { - return func(w io.Writer, a ...interface{}) { - c.Fprint(w, a...) - } -} - -// PrintFunc returns a new function that prints the passed arguments as -// colorized with color.Print(). -func (c *Color) PrintFunc() func(a ...interface{}) { - return func(a ...interface{}) { - c.Print(a...) - } -} - -// FprintfFunc returns a new function that prints the passed arguments as -// colorized with color.Fprintf(). -func (c *Color) FprintfFunc() func(w io.Writer, format string, a ...interface{}) { - return func(w io.Writer, format string, a ...interface{}) { - c.Fprintf(w, format, a...) - } -} - -// PrintfFunc returns a new function that prints the passed arguments as -// colorized with color.Printf(). -func (c *Color) PrintfFunc() func(format string, a ...interface{}) { - return func(format string, a ...interface{}) { - c.Printf(format, a...) - } -} - -// FprintlnFunc returns a new function that prints the passed arguments as -// colorized with color.Fprintln(). -func (c *Color) FprintlnFunc() func(w io.Writer, a ...interface{}) { - return func(w io.Writer, a ...interface{}) { - c.Fprintln(w, a...) - } -} - -// PrintlnFunc returns a new function that prints the passed arguments as -// colorized with color.Println(). -func (c *Color) PrintlnFunc() func(a ...interface{}) { - return func(a ...interface{}) { - c.Println(a...) - } -} - -// SprintFunc returns a new function that returns colorized strings for the -// given arguments with fmt.Sprint(). Useful to put into or mix into other -// string. Windows users should use this in conjunction with color.Output, example: -// -// put := New(FgYellow).SprintFunc() -// fmt.Fprintf(color.Output, "This is a %s", put("warning")) -func (c *Color) SprintFunc() func(a ...interface{}) string { - return func(a ...interface{}) string { - return c.wrap(fmt.Sprint(a...)) - } -} - -// SprintfFunc returns a new function that returns colorized strings for the -// given arguments with fmt.Sprintf(). Useful to put into or mix into other -// string. Windows users should use this in conjunction with color.Output. -func (c *Color) SprintfFunc() func(format string, a ...interface{}) string { - return func(format string, a ...interface{}) string { - return c.wrap(fmt.Sprintf(format, a...)) - } -} - -// SprintlnFunc returns a new function that returns colorized strings for the -// given arguments with fmt.Sprintln(). Useful to put into or mix into other -// string. Windows users should use this in conjunction with color.Output. -func (c *Color) SprintlnFunc() func(a ...interface{}) string { - return func(a ...interface{}) string { - return c.wrap(fmt.Sprintln(a...)) - } -} - -// sequence returns a formatted SGR sequence to be plugged into a "\x1b[...m" -// an example output might be: "1;36" -> bold cyan -func (c *Color) sequence() string { - format := make([]string, len(c.params)) - for i, v := range c.params { - format[i] = strconv.Itoa(int(v)) - } - - return strings.Join(format, ";") -} - -// wrap wraps the s string with the colors attributes. The string is ready to -// be printed. -func (c *Color) wrap(s string) string { - if c.isNoColorSet() { - return s - } - - return c.format() + s + c.unformat() -} - -func (c *Color) format() string { - return fmt.Sprintf("%s[%sm", escape, c.sequence()) -} - -func (c *Color) unformat() string { - return fmt.Sprintf("%s[%dm", escape, Reset) -} - -// DisableColor disables the color output. Useful to not change any existing -// code and still being able to output. Can be used for flags like -// "--no-color". To enable back use EnableColor() method. -func (c *Color) DisableColor() { - c.noColor = boolPtr(true) -} - -// EnableColor enables the color output. Use it in conjunction with -// DisableColor(). Otherwise this method has no side effects. -func (c *Color) EnableColor() { - c.noColor = boolPtr(false) -} - -func (c *Color) isNoColorSet() bool { - // check first if we have user setted action - if c.noColor != nil { - return *c.noColor - } - - // if not return the global option, which is disabled by default - return NoColor -} - -// Equals returns a boolean value indicating whether two colors are equal. -func (c *Color) Equals(c2 *Color) bool { - if len(c.params) != len(c2.params) { - return false - } - - for _, attr := range c.params { - if !c2.attrExists(attr) { - return false - } - } - - return true -} - -func (c *Color) attrExists(a Attribute) bool { - for _, attr := range c.params { - if attr == a { - return true - } - } - - return false -} - -func boolPtr(v bool) *bool { - return &v -} - -func getCachedColor(p Attribute) *Color { - colorsCacheMu.Lock() - defer colorsCacheMu.Unlock() - - c, ok := colorsCache[p] - if !ok { - c = New(p) - colorsCache[p] = c - } - - return c -} - -func colorPrint(format string, p Attribute, a ...interface{}) { - c := getCachedColor(p) - - if !strings.HasSuffix(format, "\n") { - format += "\n" - } - - if len(a) == 0 { - c.Print(format) - } else { - c.Printf(format, a...) - } -} - -func colorString(format string, p Attribute, a ...interface{}) string { - c := getCachedColor(p) - - if len(a) == 0 { - return c.SprintFunc()(format) - } - - return c.SprintfFunc()(format, a...) -} - -// Black is a convenient helper function to print with black foreground. A -// newline is appended to format by default. -func Black(format string, a ...interface{}) { colorPrint(format, FgBlack, a...) } - -// Red is a convenient helper function to print with red foreground. A -// newline is appended to format by default. -func Red(format string, a ...interface{}) { colorPrint(format, FgRed, a...) } - -// Green is a convenient helper function to print with green foreground. A -// newline is appended to format by default. -func Green(format string, a ...interface{}) { colorPrint(format, FgGreen, a...) } - -// Yellow is a convenient helper function to print with yellow foreground. -// A newline is appended to format by default. -func Yellow(format string, a ...interface{}) { colorPrint(format, FgYellow, a...) } - -// Blue is a convenient helper function to print with blue foreground. A -// newline is appended to format by default. -func Blue(format string, a ...interface{}) { colorPrint(format, FgBlue, a...) } - -// Magenta is a convenient helper function to print with magenta foreground. -// A newline is appended to format by default. -func Magenta(format string, a ...interface{}) { colorPrint(format, FgMagenta, a...) } - -// Cyan is a convenient helper function to print with cyan foreground. A -// newline is appended to format by default. -func Cyan(format string, a ...interface{}) { colorPrint(format, FgCyan, a...) } - -// White is a convenient helper function to print with white foreground. A -// newline is appended to format by default. -func White(format string, a ...interface{}) { colorPrint(format, FgWhite, a...) } - -// BlackString is a convenient helper function to return a string with black -// foreground. -func BlackString(format string, a ...interface{}) string { return colorString(format, FgBlack, a...) } - -// RedString is a convenient helper function to return a string with red -// foreground. -func RedString(format string, a ...interface{}) string { return colorString(format, FgRed, a...) } - -// GreenString is a convenient helper function to return a string with green -// foreground. -func GreenString(format string, a ...interface{}) string { return colorString(format, FgGreen, a...) } - -// YellowString is a convenient helper function to return a string with yellow -// foreground. -func YellowString(format string, a ...interface{}) string { return colorString(format, FgYellow, a...) } - -// BlueString is a convenient helper function to return a string with blue -// foreground. -func BlueString(format string, a ...interface{}) string { return colorString(format, FgBlue, a...) } - -// MagentaString is a convenient helper function to return a string with magenta -// foreground. -func MagentaString(format string, a ...interface{}) string { - return colorString(format, FgMagenta, a...) -} - -// CyanString is a convenient helper function to return a string with cyan -// foreground. -func CyanString(format string, a ...interface{}) string { return colorString(format, FgCyan, a...) } - -// WhiteString is a convenient helper function to return a string with white -// foreground. -func WhiteString(format string, a ...interface{}) string { return colorString(format, FgWhite, a...) } - -// HiBlack is a convenient helper function to print with hi-intensity black foreground. A -// newline is appended to format by default. -func HiBlack(format string, a ...interface{}) { colorPrint(format, FgHiBlack, a...) } - -// HiRed is a convenient helper function to print with hi-intensity red foreground. A -// newline is appended to format by default. -func HiRed(format string, a ...interface{}) { colorPrint(format, FgHiRed, a...) } - -// HiGreen is a convenient helper function to print with hi-intensity green foreground. A -// newline is appended to format by default. -func HiGreen(format string, a ...interface{}) { colorPrint(format, FgHiGreen, a...) } - -// HiYellow is a convenient helper function to print with hi-intensity yellow foreground. -// A newline is appended to format by default. -func HiYellow(format string, a ...interface{}) { colorPrint(format, FgHiYellow, a...) } - -// HiBlue is a convenient helper function to print with hi-intensity blue foreground. A -// newline is appended to format by default. -func HiBlue(format string, a ...interface{}) { colorPrint(format, FgHiBlue, a...) } - -// HiMagenta is a convenient helper function to print with hi-intensity magenta foreground. -// A newline is appended to format by default. -func HiMagenta(format string, a ...interface{}) { colorPrint(format, FgHiMagenta, a...) } - -// HiCyan is a convenient helper function to print with hi-intensity cyan foreground. A -// newline is appended to format by default. -func HiCyan(format string, a ...interface{}) { colorPrint(format, FgHiCyan, a...) } - -// HiWhite is a convenient helper function to print with hi-intensity white foreground. A -// newline is appended to format by default. -func HiWhite(format string, a ...interface{}) { colorPrint(format, FgHiWhite, a...) } - -// HiBlackString is a convenient helper function to return a string with hi-intensity black -// foreground. -func HiBlackString(format string, a ...interface{}) string { - return colorString(format, FgHiBlack, a...) -} - -// HiRedString is a convenient helper function to return a string with hi-intensity red -// foreground. -func HiRedString(format string, a ...interface{}) string { return colorString(format, FgHiRed, a...) } - -// HiGreenString is a convenient helper function to return a string with hi-intensity green -// foreground. -func HiGreenString(format string, a ...interface{}) string { - return colorString(format, FgHiGreen, a...) -} - -// HiYellowString is a convenient helper function to return a string with hi-intensity yellow -// foreground. -func HiYellowString(format string, a ...interface{}) string { - return colorString(format, FgHiYellow, a...) -} - -// HiBlueString is a convenient helper function to return a string with hi-intensity blue -// foreground. -func HiBlueString(format string, a ...interface{}) string { return colorString(format, FgHiBlue, a...) } - -// HiMagentaString is a convenient helper function to return a string with hi-intensity magenta -// foreground. -func HiMagentaString(format string, a ...interface{}) string { - return colorString(format, FgHiMagenta, a...) -} - -// HiCyanString is a convenient helper function to return a string with hi-intensity cyan -// foreground. -func HiCyanString(format string, a ...interface{}) string { return colorString(format, FgHiCyan, a...) } - -// HiWhiteString is a convenient helper function to return a string with hi-intensity white -// foreground. -func HiWhiteString(format string, a ...interface{}) string { - return colorString(format, FgHiWhite, a...) -} diff --git a/ui/wasm/vendor/github.com/fatih/color/doc.go b/ui/wasm/vendor/github.com/fatih/color/doc.go deleted file mode 100644 index cf1e96500f4..00000000000 --- a/ui/wasm/vendor/github.com/fatih/color/doc.go +++ /dev/null @@ -1,133 +0,0 @@ -/* -Package color is an ANSI color package to output colorized or SGR defined -output to the standard output. The API can be used in several way, pick one -that suits you. - -Use simple and default helper functions with predefined foreground colors: - - color.Cyan("Prints text in cyan.") - - // a newline will be appended automatically - color.Blue("Prints %s in blue.", "text") - - // More default foreground colors.. - color.Red("We have red") - color.Yellow("Yellow color too!") - color.Magenta("And many others ..") - - // Hi-intensity colors - color.HiGreen("Bright green color.") - color.HiBlack("Bright black means gray..") - color.HiWhite("Shiny white color!") - -However there are times where custom color mixes are required. Below are some -examples to create custom color objects and use the print functions of each -separate color object. - - // Create a new color object - c := color.New(color.FgCyan).Add(color.Underline) - c.Println("Prints cyan text with an underline.") - - // Or just add them to New() - d := color.New(color.FgCyan, color.Bold) - d.Printf("This prints bold cyan %s\n", "too!.") - - - // Mix up foreground and background colors, create new mixes! - red := color.New(color.FgRed) - - boldRed := red.Add(color.Bold) - boldRed.Println("This will print text in bold red.") - - whiteBackground := red.Add(color.BgWhite) - whiteBackground.Println("Red text with White background.") - - // Use your own io.Writer output - color.New(color.FgBlue).Fprintln(myWriter, "blue color!") - - blue := color.New(color.FgBlue) - blue.Fprint(myWriter, "This will print text in blue.") - -You can create PrintXxx functions to simplify even more: - - // Create a custom print function for convenient - red := color.New(color.FgRed).PrintfFunc() - red("warning") - red("error: %s", err) - - // Mix up multiple attributes - notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() - notice("don't forget this...") - -You can also FprintXxx functions to pass your own io.Writer: - - blue := color.New(FgBlue).FprintfFunc() - blue(myWriter, "important notice: %s", stars) - - // Mix up with multiple attributes - success := color.New(color.Bold, color.FgGreen).FprintlnFunc() - success(myWriter, don't forget this...") - - -Or create SprintXxx functions to mix strings with other non-colorized strings: - - yellow := New(FgYellow).SprintFunc() - red := New(FgRed).SprintFunc() - - fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error")) - - info := New(FgWhite, BgGreen).SprintFunc() - fmt.Printf("this %s rocks!\n", info("package")) - -Windows support is enabled by default. All Print functions work as intended. -However only for color.SprintXXX functions, user should use fmt.FprintXXX and -set the output to color.Output: - - fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) - - info := New(FgWhite, BgGreen).SprintFunc() - fmt.Fprintf(color.Output, "this %s rocks!\n", info("package")) - -Using with existing code is possible. Just use the Set() method to set the -standard output to the given parameters. That way a rewrite of an existing -code is not required. - - // Use handy standard colors. - color.Set(color.FgYellow) - - fmt.Println("Existing text will be now in Yellow") - fmt.Printf("This one %s\n", "too") - - color.Unset() // don't forget to unset - - // You can mix up parameters - color.Set(color.FgMagenta, color.Bold) - defer color.Unset() // use it in your function - - fmt.Println("All text will be now bold magenta.") - -There might be a case where you want to disable color output (for example to -pipe the standard output of your app to somewhere else). `Color` has support to -disable colors both globally and for single color definition. For example -suppose you have a CLI app and a `--no-color` bool flag. You can easily disable -the color output with: - - var flagNoColor = flag.Bool("no-color", false, "Disable color output") - - if *flagNoColor { - color.NoColor = true // disables colorized output - } - -It also has support for single color definitions (local). You can -disable/enable color output on the fly: - - c := color.New(color.FgCyan) - c.Println("Prints cyan text") - - c.DisableColor() - c.Println("This is printed without any color") - - c.EnableColor() - c.Println("This prints again cyan...") -*/ -package color diff --git a/ui/wasm/vendor/github.com/feloy/devfile-lifecycle/pkg/graph/graph.go b/ui/wasm/vendor/github.com/feloy/devfile-lifecycle/pkg/graph/graph.go deleted file mode 100644 index 08ef758f0e6..00000000000 --- a/ui/wasm/vendor/github.com/feloy/devfile-lifecycle/pkg/graph/graph.go +++ /dev/null @@ -1,75 +0,0 @@ -package graph - -import ( - "sort" - - "github.com/Heiko-san/mermaidgen/flowchart" -) - -type Graph struct { - EntryNodeID string - nodes map[string]*Node - edges []*Edge -} - -func NewGraph() *Graph { - return &Graph{ - nodes: make(map[string]*Node), - } -} - -func (o *Graph) AddNode(id string, text ...string) *Node { - node := Node{ - ID: id, - Text: text, - } - o.nodes[id] = &node - return &node -} - -func (o *Graph) AddEdge(from *Node, to *Node, text ...string) *Edge { - edge := Edge{ - From: from, - To: to, - Text: text, - } - o.edges = append(o.edges, &edge) - return &edge -} - -func (o *Graph) ToFlowchart() *flowchart.Flowchart { - f := flowchart.NewFlowchart() - n := f.AddNode(o.EntryNodeID) - n.Text = o.nodes[o.EntryNodeID].Text - keys := make([]string, 0, len(o.nodes)) - for k := range o.nodes { - keys = append(keys, k) - - } - sort.Strings(keys) - for _, key := range keys { - node := o.nodes[key] - if node.ID == o.EntryNodeID { - continue - } - n := f.AddNode(node.ID) - n.Text = node.Text - } - - for _, edge := range o.edges { - e := f.AddEdge(f.GetNode(edge.From.ID), f.GetNode(edge.To.ID)) - e.Text = edge.Text - } - return f -} - -type Node struct { - ID string - Text []string -} - -type Edge struct { - From *Node - To *Node - Text []string -} diff --git a/ui/wasm/vendor/github.com/fsnotify/fsnotify/.editorconfig b/ui/wasm/vendor/github.com/fsnotify/fsnotify/.editorconfig deleted file mode 100644 index fad895851e5..00000000000 --- a/ui/wasm/vendor/github.com/fsnotify/fsnotify/.editorconfig +++ /dev/null @@ -1,12 +0,0 @@ -root = true - -[*.go] -indent_style = tab -indent_size = 4 -insert_final_newline = true - -[*.{yml,yaml}] -indent_style = space -indent_size = 2 -insert_final_newline = true -trim_trailing_whitespace = true diff --git a/ui/wasm/vendor/github.com/fsnotify/fsnotify/.gitattributes b/ui/wasm/vendor/github.com/fsnotify/fsnotify/.gitattributes deleted file mode 100644 index 32f1001be0a..00000000000 --- a/ui/wasm/vendor/github.com/fsnotify/fsnotify/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -go.sum linguist-generated diff --git a/ui/wasm/vendor/github.com/fsnotify/fsnotify/.gitignore b/ui/wasm/vendor/github.com/fsnotify/fsnotify/.gitignore deleted file mode 100644 index 4cd0cbaf432..00000000000 --- a/ui/wasm/vendor/github.com/fsnotify/fsnotify/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -# Setup a Global .gitignore for OS and editor generated files: -# https://help.github.com/articles/ignoring-files -# git config --global core.excludesfile ~/.gitignore_global - -.vagrant -*.sublime-project diff --git a/ui/wasm/vendor/github.com/fsnotify/fsnotify/.travis.yml b/ui/wasm/vendor/github.com/fsnotify/fsnotify/.travis.yml deleted file mode 100644 index a9c30165cdd..00000000000 --- a/ui/wasm/vendor/github.com/fsnotify/fsnotify/.travis.yml +++ /dev/null @@ -1,36 +0,0 @@ -sudo: false -language: go - -go: - - "stable" - - "1.11.x" - - "1.10.x" - - "1.9.x" - -matrix: - include: - - go: "stable" - env: GOLINT=true - allow_failures: - - go: tip - fast_finish: true - - -before_install: - - if [ ! -z "${GOLINT}" ]; then go get -u golang.org/x/lint/golint; fi - -script: - - go test --race ./... - -after_script: - - test -z "$(gofmt -s -l -w . | tee /dev/stderr)" - - if [ ! -z "${GOLINT}" ]; then echo running golint; golint --set_exit_status ./...; else echo skipping golint; fi - - go vet ./... - -os: - - linux - - osx - - windows - -notifications: - email: false diff --git a/ui/wasm/vendor/github.com/fsnotify/fsnotify/AUTHORS b/ui/wasm/vendor/github.com/fsnotify/fsnotify/AUTHORS deleted file mode 100644 index 5ab5d41c547..00000000000 --- a/ui/wasm/vendor/github.com/fsnotify/fsnotify/AUTHORS +++ /dev/null @@ -1,52 +0,0 @@ -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# You can update this list using the following command: -# -# $ git shortlog -se | awk '{print $2 " " $3 " " $4}' - -# Please keep the list sorted. - -Aaron L -Adrien Bustany -Amit Krishnan -Anmol Sethi -Bjørn Erik Pedersen -Bruno Bigras -Caleb Spare -Case Nelson -Chris Howey -Christoffer Buchholz -Daniel Wagner-Hall -Dave Cheney -Evan Phoenix -Francisco Souza -Hari haran -John C Barstow -Kelvin Fo -Ken-ichirou MATSUZAWA -Matt Layher -Nathan Youngman -Nickolai Zeldovich -Patrick -Paul Hammond -Pawel Knap -Pieter Droogendijk -Pursuit92 -Riku Voipio -Rob Figueiredo -Rodrigo Chiossi -Slawek Ligus -Soge Zhang -Tiffany Jernigan -Tilak Sharma -Tom Payne -Travis Cline -Tudor Golubenco -Vahe Khachikyan -Yukang -bronze1man -debrando -henrikedwards -铁哥 diff --git a/ui/wasm/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/ui/wasm/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md deleted file mode 100644 index be4d7ea2c14..00000000000 --- a/ui/wasm/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ /dev/null @@ -1,317 +0,0 @@ -# Changelog - -## v1.4.7 / 2018-01-09 - -* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine) -* Tests: Fix missing verb on format string (thanks @rchiossi) -* Linux: Fix deadlock in Remove (thanks @aarondl) -* Linux: Watch.Add improvements (avoid race, fix consistency, reduce garbage) (thanks @twpayne) -* Docs: Moved FAQ into the README (thanks @vahe) -* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich) -* Docs: replace references to OS X with macOS - -## v1.4.2 / 2016-10-10 - -* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack) - -## v1.4.1 / 2016-10-04 - -* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack) - -## v1.4.0 / 2016-10-01 - -* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie) - -## v1.3.1 / 2016-06-28 - -* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc) - -## v1.3.0 / 2016-04-19 - -* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135) - -## v1.2.10 / 2016-03-02 - -* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj) - -## v1.2.9 / 2016-01-13 - -kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep) - -## v1.2.8 / 2015-12-17 - -* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test) -* inotify: fix race in test -* enable race detection for continuous integration (Linux, Mac, Windows) - -## v1.2.5 / 2015-10-17 - -* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki) -* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken) -* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie) -* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion) - -## v1.2.1 / 2015-10-14 - -* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx) - -## v1.2.0 / 2015-02-08 - -* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD) -* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD) -* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59) - -## v1.1.1 / 2015-02-05 - -* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD) - -## v1.1.0 / 2014-12-12 - -* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43) - * add low-level functions - * only need to store flags on directories - * less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13) - * done can be an unbuffered channel - * remove calls to os.NewSyscallError -* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher) -* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48) -* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) - -## v1.0.4 / 2014-09-07 - -* kqueue: add dragonfly to the build tags. -* Rename source code files, rearrange code so exported APIs are at the top. -* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang) - -## v1.0.3 / 2014-08-19 - -* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36) - -## v1.0.2 / 2014-08-17 - -* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) -* [Fix] Make ./path and path equivalent. (thanks @zhsso) - -## v1.0.0 / 2014-08-15 - -* [API] Remove AddWatch on Windows, use Add. -* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30) -* Minor updates based on feedback from golint. - -## dev / 2014-07-09 - -* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify). -* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno) - -## dev / 2014-07-04 - -* kqueue: fix incorrect mutex used in Close() -* Update example to demonstrate usage of Op. - -## dev / 2014-06-28 - -* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4) -* Fix for String() method on Event (thanks Alex Brainman) -* Don't build on Plan 9 or Solaris (thanks @4ad) - -## dev / 2014-06-21 - -* Events channel of type Event rather than *Event. -* [internal] use syscall constants directly for inotify and kqueue. -* [internal] kqueue: rename events to kevents and fileEvent to event. - -## dev / 2014-06-19 - -* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally). -* [internal] remove cookie from Event struct (unused). -* [internal] Event struct has the same definition across every OS. -* [internal] remove internal watch and removeWatch methods. - -## dev / 2014-06-12 - -* [API] Renamed Watch() to Add() and RemoveWatch() to Remove(). -* [API] Pluralized channel names: Events and Errors. -* [API] Renamed FileEvent struct to Event. -* [API] Op constants replace methods like IsCreate(). - -## dev / 2014-06-12 - -* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) - -## dev / 2014-05-23 - -* [API] Remove current implementation of WatchFlags. - * current implementation doesn't take advantage of OS for efficiency - * provides little benefit over filtering events as they are received, but has extra bookkeeping and mutexes - * no tests for the current implementation - * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195) - -## v0.9.3 / 2014-12-31 - -* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51) - -## v0.9.2 / 2014-08-17 - -* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso) - -## v0.9.1 / 2014-06-12 - -* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98) - -## v0.9.0 / 2014-01-17 - -* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany) -* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare) -* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library. - -## v0.8.12 / 2013-11-13 - -* [API] Remove FD_SET and friends from Linux adapter - -## v0.8.11 / 2013-11-02 - -* [Doc] Add Changelog [#72][] (thanks @nathany) -* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond) - -## v0.8.10 / 2013-10-19 - -* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott) -* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer) -* [Doc] specify OS-specific limits in README (thanks @debrando) - -## v0.8.9 / 2013-09-08 - -* [Doc] Contributing (thanks @nathany) -* [Doc] update package path in example code [#63][] (thanks @paulhammond) -* [Doc] GoCI badge in README (Linux only) [#60][] -* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany) - -## v0.8.8 / 2013-06-17 - -* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie) - -## v0.8.7 / 2013-06-03 - -* [API] Make syscall flags internal -* [Fix] inotify: ignore event changes -* [Fix] race in symlink test [#45][] (reported by @srid) -* [Fix] tests on Windows -* lower case error messages - -## v0.8.6 / 2013-05-23 - -* kqueue: Use EVT_ONLY flag on Darwin -* [Doc] Update README with full example - -## v0.8.5 / 2013-05-09 - -* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg) - -## v0.8.4 / 2013-04-07 - -* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz) - -## v0.8.3 / 2013-03-13 - -* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin) -* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin) - -## v0.8.2 / 2013-02-07 - -* [Doc] add Authors -* [Fix] fix data races for map access [#29][] (thanks @fsouza) - -## v0.8.1 / 2013-01-09 - -* [Fix] Windows path separators -* [Doc] BSD License - -## v0.8.0 / 2012-11-09 - -* kqueue: directory watching improvements (thanks @vmirage) -* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto) -* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr) - -## v0.7.4 / 2012-10-09 - -* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji) -* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig) -* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig) -* [Fix] kqueue: modify after recreation of file - -## v0.7.3 / 2012-09-27 - -* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage) -* [Fix] kqueue: no longer get duplicate CREATE events - -## v0.7.2 / 2012-09-01 - -* kqueue: events for created directories - -## v0.7.1 / 2012-07-14 - -* [Fix] for renaming files - -## v0.7.0 / 2012-07-02 - -* [Feature] FSNotify flags -* [Fix] inotify: Added file name back to event path - -## v0.6.0 / 2012-06-06 - -* kqueue: watch files after directory created (thanks @tmc) - -## v0.5.1 / 2012-05-22 - -* [Fix] inotify: remove all watches before Close() - -## v0.5.0 / 2012-05-03 - -* [API] kqueue: return errors during watch instead of sending over channel -* kqueue: match symlink behavior on Linux -* inotify: add `DELETE_SELF` (requested by @taralx) -* [Fix] kqueue: handle EINTR (reported by @robfig) -* [Doc] Godoc example [#1][] (thanks @davecheney) - -## v0.4.0 / 2012-03-30 - -* Go 1 released: build with go tool -* [Feature] Windows support using winfsnotify -* Windows does not have attribute change notifications -* Roll attribute notifications into IsModify - -## v0.3.0 / 2012-02-19 - -* kqueue: add files when watch directory - -## v0.2.0 / 2011-12-30 - -* update to latest Go weekly code - -## v0.1.0 / 2011-10-19 - -* kqueue: add watch on file creation to match inotify -* kqueue: create file event -* inotify: ignore `IN_IGNORED` events -* event String() -* linux: common FileEvent functions -* initial commit - -[#79]: https://github.com/howeyc/fsnotify/pull/79 -[#77]: https://github.com/howeyc/fsnotify/pull/77 -[#72]: https://github.com/howeyc/fsnotify/issues/72 -[#71]: https://github.com/howeyc/fsnotify/issues/71 -[#70]: https://github.com/howeyc/fsnotify/issues/70 -[#63]: https://github.com/howeyc/fsnotify/issues/63 -[#62]: https://github.com/howeyc/fsnotify/issues/62 -[#60]: https://github.com/howeyc/fsnotify/issues/60 -[#59]: https://github.com/howeyc/fsnotify/issues/59 -[#49]: https://github.com/howeyc/fsnotify/issues/49 -[#45]: https://github.com/howeyc/fsnotify/issues/45 -[#40]: https://github.com/howeyc/fsnotify/issues/40 -[#36]: https://github.com/howeyc/fsnotify/issues/36 -[#33]: https://github.com/howeyc/fsnotify/issues/33 -[#29]: https://github.com/howeyc/fsnotify/issues/29 -[#25]: https://github.com/howeyc/fsnotify/issues/25 -[#24]: https://github.com/howeyc/fsnotify/issues/24 -[#21]: https://github.com/howeyc/fsnotify/issues/21 diff --git a/ui/wasm/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/ui/wasm/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md deleted file mode 100644 index 828a60b24ba..00000000000 --- a/ui/wasm/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md +++ /dev/null @@ -1,77 +0,0 @@ -# Contributing - -## Issues - -* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues). -* Please indicate the platform you are using fsnotify on. -* A code example to reproduce the problem is appreciated. - -## Pull Requests - -### Contributor License Agreement - -fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual). - -Please indicate that you have signed the CLA in your pull request. - -### How fsnotify is Developed - -* Development is done on feature branches. -* Tests are run on BSD, Linux, macOS and Windows. -* Pull requests are reviewed and [applied to master][am] using [hub][]. - * Maintainers may modify or squash commits rather than asking contributors to. -* To issue a new release, the maintainers will: - * Update the CHANGELOG - * Tag a version, which will become available through gopkg.in. - -### How to Fork - -For smooth sailing, always use the original import path. Installing with `go get` makes this easy. - -1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`) -2. Create your feature branch (`git checkout -b my-new-feature`) -3. Ensure everything works and the tests pass (see below) -4. Commit your changes (`git commit -am 'Add some feature'`) - -Contribute upstream: - -1. Fork fsnotify on GitHub -2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`) -3. Push to the branch (`git push fork my-new-feature`) -4. Create a new Pull Request on GitHub - -This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/). - -### Testing - -fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows. - -Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on. - -To aid in cross-platform testing there is a Vagrantfile for Linux and BSD. - -* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/) -* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder. -* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password) -* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`. -* When you're done, you will want to halt or destroy the Vagrant boxes. - -Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory. - -Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads). - -### Maintainers - -Help maintaining fsnotify is welcome. To be a maintainer: - -* Submit a pull request and sign the CLA as above. -* You must be able to run the test suite on Mac, Windows, Linux and BSD. - -To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][]. - -All code changes should be internal pull requests. - -Releases are tagged using [Semantic Versioning](http://semver.org/). - -[hub]: https://github.com/github/hub -[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs diff --git a/ui/wasm/vendor/github.com/fsnotify/fsnotify/LICENSE b/ui/wasm/vendor/github.com/fsnotify/fsnotify/LICENSE deleted file mode 100644 index e180c8fb059..00000000000 --- a/ui/wasm/vendor/github.com/fsnotify/fsnotify/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. -Copyright (c) 2012-2019 fsnotify Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/ui/wasm/vendor/github.com/fsnotify/fsnotify/README.md b/ui/wasm/vendor/github.com/fsnotify/fsnotify/README.md deleted file mode 100644 index b2629e5229c..00000000000 --- a/ui/wasm/vendor/github.com/fsnotify/fsnotify/README.md +++ /dev/null @@ -1,130 +0,0 @@ -# File system notifications for Go - -[![GoDoc](https://godoc.org/github.com/fsnotify/fsnotify?status.svg)](https://godoc.org/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify) - -fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running: - -```console -go get -u golang.org/x/sys/... -``` - -Cross platform: Windows, Linux, BSD and macOS. - -| Adapter | OS | Status | -| --------------------- | -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- | -| inotify | Linux 2.6.27 or later, Android\* | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | -| kqueue | BSD, macOS, iOS\* | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | -| ReadDirectoryChangesW | Windows | Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify) | -| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) | -| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/issues/12) | -| fanotify | Linux 2.6.37+ | [Planned](https://github.com/fsnotify/fsnotify/issues/114) | -| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) | -| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) | - -\* Android and iOS are untested. - -Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information. - -## API stability - -fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA). - -All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number. - -Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`. - -## Usage - -```go -package main - -import ( - "log" - - "github.com/fsnotify/fsnotify" -) - -func main() { - watcher, err := fsnotify.NewWatcher() - if err != nil { - log.Fatal(err) - } - defer watcher.Close() - - done := make(chan bool) - go func() { - for { - select { - case event, ok := <-watcher.Events: - if !ok { - return - } - log.Println("event:", event) - if event.Op&fsnotify.Write == fsnotify.Write { - log.Println("modified file:", event.Name) - } - case err, ok := <-watcher.Errors: - if !ok { - return - } - log.Println("error:", err) - } - } - }() - - err = watcher.Add("/tmp/foo") - if err != nil { - log.Fatal(err) - } - <-done -} -``` - -## Contributing - -Please refer to [CONTRIBUTING][] before opening an issue or pull request. - -## Example - -See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go). - -## FAQ - -**When a file is moved to another directory is it still being watched?** - -No (it shouldn't be, unless you are watching where it was moved to). - -**When I watch a directory, are all subdirectories watched as well?** - -No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]). - -**Do I have to watch the Error and Event channels in a separate goroutine?** - -As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7]) - -**Why am I receiving multiple events for the same file on OS X?** - -Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]). - -**How many files can be watched at once?** - -There are OS-specific limits as to how many watches can be created: -* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error. -* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error. - -**Why don't notifications work with NFS filesystems or filesystem in userspace (FUSE)?** - -fsnotify requires support from underlying OS to work. The current NFS protocol does not provide network level support for file notifications. - -[#62]: https://github.com/howeyc/fsnotify/issues/62 -[#18]: https://github.com/fsnotify/fsnotify/issues/18 -[#11]: https://github.com/fsnotify/fsnotify/issues/11 -[#7]: https://github.com/howeyc/fsnotify/issues/7 - -[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md - -## Related Projects - -* [notify](https://github.com/rjeczalik/notify) -* [fsevents](https://github.com/fsnotify/fsevents) - diff --git a/ui/wasm/vendor/github.com/fsnotify/fsnotify/fen.go b/ui/wasm/vendor/github.com/fsnotify/fsnotify/fen.go deleted file mode 100644 index ced39cb881e..00000000000 --- a/ui/wasm/vendor/github.com/fsnotify/fsnotify/fen.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build solaris - -package fsnotify - -import ( - "errors" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - return nil, errors.New("FEN based watcher not yet supported for fsnotify\n") -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - return nil -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - return nil -} diff --git a/ui/wasm/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/ui/wasm/vendor/github.com/fsnotify/fsnotify/fsnotify.go deleted file mode 100644 index 89cab046d12..00000000000 --- a/ui/wasm/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !plan9 - -// Package fsnotify provides a platform-independent interface for file system notifications. -package fsnotify - -import ( - "bytes" - "errors" - "fmt" -) - -// Event represents a single file system notification. -type Event struct { - Name string // Relative path to the file or directory. - Op Op // File operation that triggered the event. -} - -// Op describes a set of file operations. -type Op uint32 - -// These are the generalized file operations that can trigger a notification. -const ( - Create Op = 1 << iota - Write - Remove - Rename - Chmod -) - -func (op Op) String() string { - // Use a buffer for efficient string concatenation - var buffer bytes.Buffer - - if op&Create == Create { - buffer.WriteString("|CREATE") - } - if op&Remove == Remove { - buffer.WriteString("|REMOVE") - } - if op&Write == Write { - buffer.WriteString("|WRITE") - } - if op&Rename == Rename { - buffer.WriteString("|RENAME") - } - if op&Chmod == Chmod { - buffer.WriteString("|CHMOD") - } - if buffer.Len() == 0 { - return "" - } - return buffer.String()[1:] // Strip leading pipe -} - -// String returns a string representation of the event in the form -// "file: REMOVE|WRITE|..." -func (e Event) String() string { - return fmt.Sprintf("%q: %s", e.Name, e.Op.String()) -} - -// Common errors that can be reported by a watcher -var ( - ErrEventOverflow = errors.New("fsnotify queue overflow") -) diff --git a/ui/wasm/vendor/github.com/fsnotify/fsnotify/inotify.go b/ui/wasm/vendor/github.com/fsnotify/fsnotify/inotify.go deleted file mode 100644 index d9fd1b88a05..00000000000 --- a/ui/wasm/vendor/github.com/fsnotify/fsnotify/inotify.go +++ /dev/null @@ -1,337 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux - -package fsnotify - -import ( - "errors" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "sync" - "unsafe" - - "golang.org/x/sys/unix" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - mu sync.Mutex // Map access - fd int - poller *fdPoller - watches map[string]*watch // Map of inotify watches (key: path) - paths map[int]string // Map of watched paths (key: watch descriptor) - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - doneResp chan struct{} // Channel to respond to Close -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - // Create inotify fd - fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC) - if fd == -1 { - return nil, errno - } - // Create epoll - poller, err := newFdPoller(fd) - if err != nil { - unix.Close(fd) - return nil, err - } - w := &Watcher{ - fd: fd, - poller: poller, - watches: make(map[string]*watch), - paths: make(map[int]string), - Events: make(chan Event), - Errors: make(chan error), - done: make(chan struct{}), - doneResp: make(chan struct{}), - } - - go w.readEvents() - return w, nil -} - -func (w *Watcher) isClosed() bool { - select { - case <-w.done: - return true - default: - return false - } -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - if w.isClosed() { - return nil - } - - // Send 'close' signal to goroutine, and set the Watcher to closed. - close(w.done) - - // Wake up goroutine - w.poller.wake() - - // Wait for goroutine to close - <-w.doneResp - - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - name = filepath.Clean(name) - if w.isClosed() { - return errors.New("inotify instance already closed") - } - - const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | - unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | - unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF - - var flags uint32 = agnosticEvents - - w.mu.Lock() - defer w.mu.Unlock() - watchEntry := w.watches[name] - if watchEntry != nil { - flags |= watchEntry.flags | unix.IN_MASK_ADD - } - wd, errno := unix.InotifyAddWatch(w.fd, name, flags) - if wd == -1 { - return errno - } - - if watchEntry == nil { - w.watches[name] = &watch{wd: uint32(wd), flags: flags} - w.paths[wd] = name - } else { - watchEntry.wd = uint32(wd) - watchEntry.flags = flags - } - - return nil -} - -// Remove stops watching the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - name = filepath.Clean(name) - - // Fetch the watch. - w.mu.Lock() - defer w.mu.Unlock() - watch, ok := w.watches[name] - - // Remove it from inotify. - if !ok { - return fmt.Errorf("can't remove non-existent inotify watch for: %s", name) - } - - // We successfully removed the watch if InotifyRmWatch doesn't return an - // error, we need to clean up our internal state to ensure it matches - // inotify's kernel state. - delete(w.paths, int(watch.wd)) - delete(w.watches, name) - - // inotify_rm_watch will return EINVAL if the file has been deleted; - // the inotify will already have been removed. - // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously - // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE - // so that EINVAL means that the wd is being rm_watch()ed or its file removed - // by another thread and we have not received IN_IGNORE event. - success, errno := unix.InotifyRmWatch(w.fd, watch.wd) - if success == -1 { - // TODO: Perhaps it's not helpful to return an error here in every case. - // the only two possible errors are: - // EBADF, which happens when w.fd is not a valid file descriptor of any kind. - // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor. - // Watch descriptors are invalidated when they are removed explicitly or implicitly; - // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted. - return errno - } - - return nil -} - -type watch struct { - wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) - flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) -} - -// readEvents reads from the inotify file descriptor, converts the -// received events into Event objects and sends them via the Events channel -func (w *Watcher) readEvents() { - var ( - buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events - n int // Number of bytes read with read() - errno error // Syscall errno - ok bool // For poller.wait - ) - - defer close(w.doneResp) - defer close(w.Errors) - defer close(w.Events) - defer unix.Close(w.fd) - defer w.poller.close() - - for { - // See if we have been closed. - if w.isClosed() { - return - } - - ok, errno = w.poller.wait() - if errno != nil { - select { - case w.Errors <- errno: - case <-w.done: - return - } - continue - } - - if !ok { - continue - } - - n, errno = unix.Read(w.fd, buf[:]) - // If a signal interrupted execution, see if we've been asked to close, and try again. - // http://man7.org/linux/man-pages/man7/signal.7.html : - // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable" - if errno == unix.EINTR { - continue - } - - // unix.Read might have been woken up by Close. If so, we're done. - if w.isClosed() { - return - } - - if n < unix.SizeofInotifyEvent { - var err error - if n == 0 { - // If EOF is received. This should really never happen. - err = io.EOF - } else if n < 0 { - // If an error occurred while reading. - err = errno - } else { - // Read was too short. - err = errors.New("notify: short read in readEvents()") - } - select { - case w.Errors <- err: - case <-w.done: - return - } - continue - } - - var offset uint32 - // We don't know how many events we just read into the buffer - // While the offset points to at least one whole event... - for offset <= uint32(n-unix.SizeofInotifyEvent) { - // Point "raw" to the event in the buffer - raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) - - mask := uint32(raw.Mask) - nameLen := uint32(raw.Len) - - if mask&unix.IN_Q_OVERFLOW != 0 { - select { - case w.Errors <- ErrEventOverflow: - case <-w.done: - return - } - } - - // If the event happened to the watched directory or the watched file, the kernel - // doesn't append the filename to the event, but we would like to always fill the - // the "Name" field with a valid filename. We retrieve the path of the watch from - // the "paths" map. - w.mu.Lock() - name, ok := w.paths[int(raw.Wd)] - // IN_DELETE_SELF occurs when the file/directory being watched is removed. - // This is a sign to clean up the maps, otherwise we are no longer in sync - // with the inotify kernel state which has already deleted the watch - // automatically. - if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { - delete(w.paths, int(raw.Wd)) - delete(w.watches, name) - } - w.mu.Unlock() - - if nameLen > 0 { - // Point "bytes" at the first byte of the filename - bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent])) - // The filename is padded with NULL bytes. TrimRight() gets rid of those. - name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") - } - - event := newEvent(name, mask) - - // Send the events that are not ignored on the events channel - if !event.ignoreLinux(mask) { - select { - case w.Events <- event: - case <-w.done: - return - } - } - - // Move to the next event in the buffer - offset += unix.SizeofInotifyEvent + nameLen - } - } -} - -// Certain types of events can be "ignored" and not sent over the Events -// channel. Such as events marked ignore by the kernel, or MODIFY events -// against files that do not exist. -func (e *Event) ignoreLinux(mask uint32) bool { - // Ignore anything the inotify API says to ignore - if mask&unix.IN_IGNORED == unix.IN_IGNORED { - return true - } - - // If the event is not a DELETE or RENAME, the file must exist. - // Otherwise the event is ignored. - // *Note*: this was put in place because it was seen that a MODIFY - // event was sent after the DELETE. This ignores that MODIFY and - // assumes a DELETE will come or has come if the file doesn't exist. - if !(e.Op&Remove == Remove || e.Op&Rename == Rename) { - _, statErr := os.Lstat(e.Name) - return os.IsNotExist(statErr) - } - return false -} - -// newEvent returns an platform-independent Event based on an inotify mask. -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { - e.Op |= Create - } - if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { - e.Op |= Remove - } - if mask&unix.IN_MODIFY == unix.IN_MODIFY { - e.Op |= Write - } - if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { - e.Op |= Rename - } - if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { - e.Op |= Chmod - } - return e -} diff --git a/ui/wasm/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/ui/wasm/vendor/github.com/fsnotify/fsnotify/inotify_poller.go deleted file mode 100644 index b33f2b4d4b7..00000000000 --- a/ui/wasm/vendor/github.com/fsnotify/fsnotify/inotify_poller.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux - -package fsnotify - -import ( - "errors" - - "golang.org/x/sys/unix" -) - -type fdPoller struct { - fd int // File descriptor (as returned by the inotify_init() syscall) - epfd int // Epoll file descriptor - pipe [2]int // Pipe for waking up -} - -func emptyPoller(fd int) *fdPoller { - poller := new(fdPoller) - poller.fd = fd - poller.epfd = -1 - poller.pipe[0] = -1 - poller.pipe[1] = -1 - return poller -} - -// Create a new inotify poller. -// This creates an inotify handler, and an epoll handler. -func newFdPoller(fd int) (*fdPoller, error) { - var errno error - poller := emptyPoller(fd) - defer func() { - if errno != nil { - poller.close() - } - }() - poller.fd = fd - - // Create epoll fd - poller.epfd, errno = unix.EpollCreate1(unix.EPOLL_CLOEXEC) - if poller.epfd == -1 { - return nil, errno - } - // Create pipe; pipe[0] is the read end, pipe[1] the write end. - errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK|unix.O_CLOEXEC) - if errno != nil { - return nil, errno - } - - // Register inotify fd with epoll - event := unix.EpollEvent{ - Fd: int32(poller.fd), - Events: unix.EPOLLIN, - } - errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event) - if errno != nil { - return nil, errno - } - - // Register pipe fd with epoll - event = unix.EpollEvent{ - Fd: int32(poller.pipe[0]), - Events: unix.EPOLLIN, - } - errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event) - if errno != nil { - return nil, errno - } - - return poller, nil -} - -// Wait using epoll. -// Returns true if something is ready to be read, -// false if there is not. -func (poller *fdPoller) wait() (bool, error) { - // 3 possible events per fd, and 2 fds, makes a maximum of 6 events. - // I don't know whether epoll_wait returns the number of events returned, - // or the total number of events ready. - // I decided to catch both by making the buffer one larger than the maximum. - events := make([]unix.EpollEvent, 7) - for { - n, errno := unix.EpollWait(poller.epfd, events, -1) - if n == -1 { - if errno == unix.EINTR { - continue - } - return false, errno - } - if n == 0 { - // If there are no events, try again. - continue - } - if n > 6 { - // This should never happen. More events were returned than should be possible. - return false, errors.New("epoll_wait returned more events than I know what to do with") - } - ready := events[:n] - epollhup := false - epollerr := false - epollin := false - for _, event := range ready { - if event.Fd == int32(poller.fd) { - if event.Events&unix.EPOLLHUP != 0 { - // This should not happen, but if it does, treat it as a wakeup. - epollhup = true - } - if event.Events&unix.EPOLLERR != 0 { - // If an error is waiting on the file descriptor, we should pretend - // something is ready to read, and let unix.Read pick up the error. - epollerr = true - } - if event.Events&unix.EPOLLIN != 0 { - // There is data to read. - epollin = true - } - } - if event.Fd == int32(poller.pipe[0]) { - if event.Events&unix.EPOLLHUP != 0 { - // Write pipe descriptor was closed, by us. This means we're closing down the - // watcher, and we should wake up. - } - if event.Events&unix.EPOLLERR != 0 { - // If an error is waiting on the pipe file descriptor. - // This is an absolute mystery, and should never ever happen. - return false, errors.New("Error on the pipe descriptor.") - } - if event.Events&unix.EPOLLIN != 0 { - // This is a regular wakeup, so we have to clear the buffer. - err := poller.clearWake() - if err != nil { - return false, err - } - } - } - } - - if epollhup || epollerr || epollin { - return true, nil - } - return false, nil - } -} - -// Close the write end of the poller. -func (poller *fdPoller) wake() error { - buf := make([]byte, 1) - n, errno := unix.Write(poller.pipe[1], buf) - if n == -1 { - if errno == unix.EAGAIN { - // Buffer is full, poller will wake. - return nil - } - return errno - } - return nil -} - -func (poller *fdPoller) clearWake() error { - // You have to be woken up a LOT in order to get to 100! - buf := make([]byte, 100) - n, errno := unix.Read(poller.pipe[0], buf) - if n == -1 { - if errno == unix.EAGAIN { - // Buffer is empty, someone else cleared our wake. - return nil - } - return errno - } - return nil -} - -// Close all poller file descriptors, but not the one passed to it. -func (poller *fdPoller) close() { - if poller.pipe[1] != -1 { - unix.Close(poller.pipe[1]) - } - if poller.pipe[0] != -1 { - unix.Close(poller.pipe[0]) - } - if poller.epfd != -1 { - unix.Close(poller.epfd) - } -} diff --git a/ui/wasm/vendor/github.com/fsnotify/fsnotify/kqueue.go b/ui/wasm/vendor/github.com/fsnotify/fsnotify/kqueue.go deleted file mode 100644 index 86e76a3d676..00000000000 --- a/ui/wasm/vendor/github.com/fsnotify/fsnotify/kqueue.go +++ /dev/null @@ -1,521 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build freebsd openbsd netbsd dragonfly darwin - -package fsnotify - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "sync" - "time" - - "golang.org/x/sys/unix" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - - kq int // File descriptor (as returned by the kqueue() syscall). - - mu sync.Mutex // Protects access to watcher data - watches map[string]int // Map of watched file descriptors (key: path). - externalWatches map[string]bool // Map of watches added by user of the library. - dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue. - paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events. - fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events). - isClosed bool // Set to true when Close() is first called -} - -type pathInfo struct { - name string - isDir bool -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - kq, err := kqueue() - if err != nil { - return nil, err - } - - w := &Watcher{ - kq: kq, - watches: make(map[string]int), - dirFlags: make(map[string]uint32), - paths: make(map[int]pathInfo), - fileExists: make(map[string]bool), - externalWatches: make(map[string]bool), - Events: make(chan Event), - Errors: make(chan error), - done: make(chan struct{}), - } - - go w.readEvents() - return w, nil -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return nil - } - w.isClosed = true - - // copy paths to remove while locked - var pathsToRemove = make([]string, 0, len(w.watches)) - for name := range w.watches { - pathsToRemove = append(pathsToRemove, name) - } - w.mu.Unlock() - // unlock before calling Remove, which also locks - - for _, name := range pathsToRemove { - w.Remove(name) - } - - // send a "quit" message to the reader goroutine - close(w.done) - - return nil -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - w.mu.Lock() - w.externalWatches[name] = true - w.mu.Unlock() - _, err := w.addWatch(name, noteAllEvents) - return err -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - name = filepath.Clean(name) - w.mu.Lock() - watchfd, ok := w.watches[name] - w.mu.Unlock() - if !ok { - return fmt.Errorf("can't remove non-existent kevent watch for: %s", name) - } - - const registerRemove = unix.EV_DELETE - if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil { - return err - } - - unix.Close(watchfd) - - w.mu.Lock() - isDir := w.paths[watchfd].isDir - delete(w.watches, name) - delete(w.paths, watchfd) - delete(w.dirFlags, name) - w.mu.Unlock() - - // Find all watched paths that are in this directory that are not external. - if isDir { - var pathsToRemove []string - w.mu.Lock() - for _, path := range w.paths { - wdir, _ := filepath.Split(path.name) - if filepath.Clean(wdir) == name { - if !w.externalWatches[path.name] { - pathsToRemove = append(pathsToRemove, path.name) - } - } - } - w.mu.Unlock() - for _, name := range pathsToRemove { - // Since these are internal, not much sense in propagating error - // to the user, as that will just confuse them with an error about - // a path they did not explicitly watch themselves. - w.Remove(name) - } - } - - return nil -} - -// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) -const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME - -// keventWaitTime to block on each read from kevent -var keventWaitTime = durationToTimespec(100 * time.Millisecond) - -// addWatch adds name to the watched file set. -// The flags are interpreted as described in kevent(2). -// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks. -func (w *Watcher) addWatch(name string, flags uint32) (string, error) { - var isDir bool - // Make ./name and name equivalent - name = filepath.Clean(name) - - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() - return "", errors.New("kevent instance already closed") - } - watchfd, alreadyWatching := w.watches[name] - // We already have a watch, but we can still override flags. - if alreadyWatching { - isDir = w.paths[watchfd].isDir - } - w.mu.Unlock() - - if !alreadyWatching { - fi, err := os.Lstat(name) - if err != nil { - return "", err - } - - // Don't watch sockets. - if fi.Mode()&os.ModeSocket == os.ModeSocket { - return "", nil - } - - // Don't watch named pipes. - if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe { - return "", nil - } - - // Follow Symlinks - // Unfortunately, Linux can add bogus symlinks to watch list without - // issue, and Windows can't do symlinks period (AFAIK). To maintain - // consistency, we will act like everything is fine. There will simply - // be no file events for broken symlinks. - // Hence the returns of nil on errors. - if fi.Mode()&os.ModeSymlink == os.ModeSymlink { - name, err = filepath.EvalSymlinks(name) - if err != nil { - return "", nil - } - - w.mu.Lock() - _, alreadyWatching = w.watches[name] - w.mu.Unlock() - - if alreadyWatching { - return name, nil - } - - fi, err = os.Lstat(name) - if err != nil { - return "", nil - } - } - - watchfd, err = unix.Open(name, openMode, 0700) - if watchfd == -1 { - return "", err - } - - isDir = fi.IsDir() - } - - const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE - if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil { - unix.Close(watchfd) - return "", err - } - - if !alreadyWatching { - w.mu.Lock() - w.watches[name] = watchfd - w.paths[watchfd] = pathInfo{name: name, isDir: isDir} - w.mu.Unlock() - } - - if isDir { - // Watch the directory if it has not been watched before, - // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) - w.mu.Lock() - - watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && - (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) - // Store flags so this watch can be updated later - w.dirFlags[name] = flags - w.mu.Unlock() - - if watchDir { - if err := w.watchDirectoryFiles(name); err != nil { - return "", err - } - } - } - return name, nil -} - -// readEvents reads from kqueue and converts the received kevents into -// Event values that it sends down the Events channel. -func (w *Watcher) readEvents() { - eventBuffer := make([]unix.Kevent_t, 10) - -loop: - for { - // See if there is a message on the "done" channel - select { - case <-w.done: - break loop - default: - } - - // Get new events - kevents, err := read(w.kq, eventBuffer, &keventWaitTime) - // EINTR is okay, the syscall was interrupted before timeout expired. - if err != nil && err != unix.EINTR { - select { - case w.Errors <- err: - case <-w.done: - break loop - } - continue - } - - // Flush the events we received to the Events channel - for len(kevents) > 0 { - kevent := &kevents[0] - watchfd := int(kevent.Ident) - mask := uint32(kevent.Fflags) - w.mu.Lock() - path := w.paths[watchfd] - w.mu.Unlock() - event := newEvent(path.name, mask) - - if path.isDir && !(event.Op&Remove == Remove) { - // Double check to make sure the directory exists. This can happen when - // we do a rm -fr on a recursively watched folders and we receive a - // modification event first but the folder has been deleted and later - // receive the delete event - if _, err := os.Lstat(event.Name); os.IsNotExist(err) { - // mark is as delete event - event.Op |= Remove - } - } - - if event.Op&Rename == Rename || event.Op&Remove == Remove { - w.Remove(event.Name) - w.mu.Lock() - delete(w.fileExists, event.Name) - w.mu.Unlock() - } - - if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) { - w.sendDirectoryChangeEvents(event.Name) - } else { - // Send the event on the Events channel. - select { - case w.Events <- event: - case <-w.done: - break loop - } - } - - if event.Op&Remove == Remove { - // Look for a file that may have overwritten this. - // For example, mv f1 f2 will delete f2, then create f2. - if path.isDir { - fileDir := filepath.Clean(event.Name) - w.mu.Lock() - _, found := w.watches[fileDir] - w.mu.Unlock() - if found { - // make sure the directory exists before we watch for changes. When we - // do a recursive watch and perform rm -fr, the parent directory might - // have gone missing, ignore the missing directory and let the - // upcoming delete event remove the watch from the parent directory. - if _, err := os.Lstat(fileDir); err == nil { - w.sendDirectoryChangeEvents(fileDir) - } - } - } else { - filePath := filepath.Clean(event.Name) - if fileInfo, err := os.Lstat(filePath); err == nil { - w.sendFileCreatedEventIfNew(filePath, fileInfo) - } - } - } - - // Move to next event - kevents = kevents[1:] - } - } - - // cleanup - err := unix.Close(w.kq) - if err != nil { - // only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors. - select { - case w.Errors <- err: - default: - } - } - close(w.Events) - close(w.Errors) -} - -// newEvent returns an platform-independent Event based on kqueue Fflags. -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { - e.Op |= Remove - } - if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { - e.Op |= Write - } - if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { - e.Op |= Rename - } - if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { - e.Op |= Chmod - } - return e -} - -func newCreateEvent(name string) Event { - return Event{Name: name, Op: Create} -} - -// watchDirectoryFiles to mimic inotify when adding a watch on a directory -func (w *Watcher) watchDirectoryFiles(dirPath string) error { - // Get all files - files, err := ioutil.ReadDir(dirPath) - if err != nil { - return err - } - - for _, fileInfo := range files { - filePath := filepath.Join(dirPath, fileInfo.Name()) - filePath, err = w.internalWatch(filePath, fileInfo) - if err != nil { - return err - } - - w.mu.Lock() - w.fileExists[filePath] = true - w.mu.Unlock() - } - - return nil -} - -// sendDirectoryEvents searches the directory for newly created files -// and sends them over the event channel. This functionality is to have -// the BSD version of fsnotify match Linux inotify which provides a -// create event for files created in a watched directory. -func (w *Watcher) sendDirectoryChangeEvents(dirPath string) { - // Get all files - files, err := ioutil.ReadDir(dirPath) - if err != nil { - select { - case w.Errors <- err: - case <-w.done: - return - } - } - - // Search for new files - for _, fileInfo := range files { - filePath := filepath.Join(dirPath, fileInfo.Name()) - err := w.sendFileCreatedEventIfNew(filePath, fileInfo) - - if err != nil { - return - } - } -} - -// sendFileCreatedEvent sends a create event if the file isn't already being tracked. -func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) { - w.mu.Lock() - _, doesExist := w.fileExists[filePath] - w.mu.Unlock() - if !doesExist { - // Send create event - select { - case w.Events <- newCreateEvent(filePath): - case <-w.done: - return - } - } - - // like watchDirectoryFiles (but without doing another ReadDir) - filePath, err = w.internalWatch(filePath, fileInfo) - if err != nil { - return err - } - - w.mu.Lock() - w.fileExists[filePath] = true - w.mu.Unlock() - - return nil -} - -func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) { - if fileInfo.IsDir() { - // mimic Linux providing delete events for subdirectories - // but preserve the flags used if currently watching subdirectory - w.mu.Lock() - flags := w.dirFlags[name] - w.mu.Unlock() - - flags |= unix.NOTE_DELETE | unix.NOTE_RENAME - return w.addWatch(name, flags) - } - - // watch file to mimic Linux inotify - return w.addWatch(name, noteAllEvents) -} - -// kqueue creates a new kernel event queue and returns a descriptor. -func kqueue() (kq int, err error) { - kq, err = unix.Kqueue() - if kq == -1 { - return kq, err - } - return kq, nil -} - -// register events with the queue -func register(kq int, fds []int, flags int, fflags uint32) error { - changes := make([]unix.Kevent_t, len(fds)) - - for i, fd := range fds { - // SetKevent converts int to the platform-specific types: - unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) - changes[i].Fflags = fflags - } - - // register the events - success, err := unix.Kevent(kq, changes, nil, nil) - if success == -1 { - return err - } - return nil -} - -// read retrieves pending events, or waits until an event occurs. -// A timeout of nil blocks indefinitely, while 0 polls the queue. -func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) { - n, err := unix.Kevent(kq, nil, events, timeout) - if err != nil { - return nil, err - } - return events[0:n], nil -} - -// durationToTimespec prepares a timeout value -func durationToTimespec(d time.Duration) unix.Timespec { - return unix.NsecToTimespec(d.Nanoseconds()) -} diff --git a/ui/wasm/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go b/ui/wasm/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go deleted file mode 100644 index 2306c4620bf..00000000000 --- a/ui/wasm/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build freebsd openbsd netbsd dragonfly - -package fsnotify - -import "golang.org/x/sys/unix" - -const openMode = unix.O_NONBLOCK | unix.O_RDONLY | unix.O_CLOEXEC diff --git a/ui/wasm/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go b/ui/wasm/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go deleted file mode 100644 index 870c4d6d184..00000000000 --- a/ui/wasm/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin - -package fsnotify - -import "golang.org/x/sys/unix" - -// note: this constant is not defined on BSD -const openMode = unix.O_EVTONLY | unix.O_CLOEXEC diff --git a/ui/wasm/vendor/github.com/fsnotify/fsnotify/windows.go b/ui/wasm/vendor/github.com/fsnotify/fsnotify/windows.go deleted file mode 100644 index 09436f31d82..00000000000 --- a/ui/wasm/vendor/github.com/fsnotify/fsnotify/windows.go +++ /dev/null @@ -1,561 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package fsnotify - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "runtime" - "sync" - "syscall" - "unsafe" -) - -// Watcher watches a set of files, delivering events to a channel. -type Watcher struct { - Events chan Event - Errors chan error - isClosed bool // Set to true when Close() is first called - mu sync.Mutex // Map access - port syscall.Handle // Handle to completion port - watches watchMap // Map of watches (key: i-number) - input chan *input // Inputs to the reader are sent on this channel - quit chan chan<- error -} - -// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. -func NewWatcher() (*Watcher, error) { - port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0) - if e != nil { - return nil, os.NewSyscallError("CreateIoCompletionPort", e) - } - w := &Watcher{ - port: port, - watches: make(watchMap), - input: make(chan *input, 1), - Events: make(chan Event, 50), - Errors: make(chan error), - quit: make(chan chan<- error, 1), - } - go w.readEvents() - return w, nil -} - -// Close removes all watches and closes the events channel. -func (w *Watcher) Close() error { - if w.isClosed { - return nil - } - w.isClosed = true - - // Send "quit" message to the reader goroutine - ch := make(chan error) - w.quit <- ch - if err := w.wakeupReader(); err != nil { - return err - } - return <-ch -} - -// Add starts watching the named file or directory (non-recursively). -func (w *Watcher) Add(name string) error { - if w.isClosed { - return errors.New("watcher already closed") - } - in := &input{ - op: opAddWatch, - path: filepath.Clean(name), - flags: sysFSALLEVENTS, - reply: make(chan error), - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -// Remove stops watching the the named file or directory (non-recursively). -func (w *Watcher) Remove(name string) error { - in := &input{ - op: opRemoveWatch, - path: filepath.Clean(name), - reply: make(chan error), - } - w.input <- in - if err := w.wakeupReader(); err != nil { - return err - } - return <-in.reply -} - -const ( - // Options for AddWatch - sysFSONESHOT = 0x80000000 - sysFSONLYDIR = 0x1000000 - - // Events - sysFSACCESS = 0x1 - sysFSALLEVENTS = 0xfff - sysFSATTRIB = 0x4 - sysFSCLOSE = 0x18 - sysFSCREATE = 0x100 - sysFSDELETE = 0x200 - sysFSDELETESELF = 0x400 - sysFSMODIFY = 0x2 - sysFSMOVE = 0xc0 - sysFSMOVEDFROM = 0x40 - sysFSMOVEDTO = 0x80 - sysFSMOVESELF = 0x800 - - // Special events - sysFSIGNORED = 0x8000 - sysFSQOVERFLOW = 0x4000 -) - -func newEvent(name string, mask uint32) Event { - e := Event{Name: name} - if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { - e.Op |= Create - } - if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { - e.Op |= Remove - } - if mask&sysFSMODIFY == sysFSMODIFY { - e.Op |= Write - } - if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { - e.Op |= Rename - } - if mask&sysFSATTRIB == sysFSATTRIB { - e.Op |= Chmod - } - return e -} - -const ( - opAddWatch = iota - opRemoveWatch -) - -const ( - provisional uint64 = 1 << (32 + iota) -) - -type input struct { - op int - path string - flags uint32 - reply chan error -} - -type inode struct { - handle syscall.Handle - volume uint32 - index uint64 -} - -type watch struct { - ov syscall.Overlapped - ino *inode // i-number - path string // Directory path - mask uint64 // Directory itself is being watched with these notify flags - names map[string]uint64 // Map of names being watched and their notify flags - rename string // Remembers the old name while renaming a file - buf [4096]byte -} - -type indexMap map[uint64]*watch -type watchMap map[uint32]indexMap - -func (w *Watcher) wakeupReader() error { - e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil) - if e != nil { - return os.NewSyscallError("PostQueuedCompletionStatus", e) - } - return nil -} - -func getDir(pathname string) (dir string, err error) { - attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname)) - if e != nil { - return "", os.NewSyscallError("GetFileAttributes", e) - } - if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { - dir = pathname - } else { - dir, _ = filepath.Split(pathname) - dir = filepath.Clean(dir) - } - return -} - -func getIno(path string) (ino *inode, err error) { - h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path), - syscall.FILE_LIST_DIRECTORY, - syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, - nil, syscall.OPEN_EXISTING, - syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0) - if e != nil { - return nil, os.NewSyscallError("CreateFile", e) - } - var fi syscall.ByHandleFileInformation - if e = syscall.GetFileInformationByHandle(h, &fi); e != nil { - syscall.CloseHandle(h) - return nil, os.NewSyscallError("GetFileInformationByHandle", e) - } - ino = &inode{ - handle: h, - volume: fi.VolumeSerialNumber, - index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), - } - return ino, nil -} - -// Must run within the I/O thread. -func (m watchMap) get(ino *inode) *watch { - if i := m[ino.volume]; i != nil { - return i[ino.index] - } - return nil -} - -// Must run within the I/O thread. -func (m watchMap) set(ino *inode, watch *watch) { - i := m[ino.volume] - if i == nil { - i = make(indexMap) - m[ino.volume] = i - } - i[ino.index] = watch -} - -// Must run within the I/O thread. -func (w *Watcher) addWatch(pathname string, flags uint64) error { - dir, err := getDir(pathname) - if err != nil { - return err - } - if flags&sysFSONLYDIR != 0 && pathname != dir { - return nil - } - ino, err := getIno(dir) - if err != nil { - return err - } - w.mu.Lock() - watchEntry := w.watches.get(ino) - w.mu.Unlock() - if watchEntry == nil { - if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil { - syscall.CloseHandle(ino.handle) - return os.NewSyscallError("CreateIoCompletionPort", e) - } - watchEntry = &watch{ - ino: ino, - path: dir, - names: make(map[string]uint64), - } - w.mu.Lock() - w.watches.set(ino, watchEntry) - w.mu.Unlock() - flags |= provisional - } else { - syscall.CloseHandle(ino.handle) - } - if pathname == dir { - watchEntry.mask |= flags - } else { - watchEntry.names[filepath.Base(pathname)] |= flags - } - if err = w.startRead(watchEntry); err != nil { - return err - } - if pathname == dir { - watchEntry.mask &= ^provisional - } else { - watchEntry.names[filepath.Base(pathname)] &= ^provisional - } - return nil -} - -// Must run within the I/O thread. -func (w *Watcher) remWatch(pathname string) error { - dir, err := getDir(pathname) - if err != nil { - return err - } - ino, err := getIno(dir) - if err != nil { - return err - } - w.mu.Lock() - watch := w.watches.get(ino) - w.mu.Unlock() - if watch == nil { - return fmt.Errorf("can't remove non-existent watch for: %s", pathname) - } - if pathname == dir { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) - watch.mask = 0 - } else { - name := filepath.Base(pathname) - w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) - delete(watch.names, name) - } - return w.startRead(watch) -} - -// Must run within the I/O thread. -func (w *Watcher) deleteWatch(watch *watch) { - for name, mask := range watch.names { - if mask&provisional == 0 { - w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) - } - delete(watch.names, name) - } - if watch.mask != 0 { - if watch.mask&provisional == 0 { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) - } - watch.mask = 0 - } -} - -// Must run within the I/O thread. -func (w *Watcher) startRead(watch *watch) error { - if e := syscall.CancelIo(watch.ino.handle); e != nil { - w.Errors <- os.NewSyscallError("CancelIo", e) - w.deleteWatch(watch) - } - mask := toWindowsFlags(watch.mask) - for _, m := range watch.names { - mask |= toWindowsFlags(m) - } - if mask == 0 { - if e := syscall.CloseHandle(watch.ino.handle); e != nil { - w.Errors <- os.NewSyscallError("CloseHandle", e) - } - w.mu.Lock() - delete(w.watches[watch.ino.volume], watch.ino.index) - w.mu.Unlock() - return nil - } - e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], - uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) - if e != nil { - err := os.NewSyscallError("ReadDirectoryChanges", e) - if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { - // Watched directory was probably removed - if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) { - if watch.mask&sysFSONESHOT != 0 { - watch.mask = 0 - } - } - err = nil - } - w.deleteWatch(watch) - w.startRead(watch) - return err - } - return nil -} - -// readEvents reads from the I/O completion port, converts the -// received events into Event objects and sends them via the Events channel. -// Entry point to the I/O thread. -func (w *Watcher) readEvents() { - var ( - n, key uint32 - ov *syscall.Overlapped - ) - runtime.LockOSThread() - - for { - e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE) - watch := (*watch)(unsafe.Pointer(ov)) - - if watch == nil { - select { - case ch := <-w.quit: - w.mu.Lock() - var indexes []indexMap - for _, index := range w.watches { - indexes = append(indexes, index) - } - w.mu.Unlock() - for _, index := range indexes { - for _, watch := range index { - w.deleteWatch(watch) - w.startRead(watch) - } - } - var err error - if e := syscall.CloseHandle(w.port); e != nil { - err = os.NewSyscallError("CloseHandle", e) - } - close(w.Events) - close(w.Errors) - ch <- err - return - case in := <-w.input: - switch in.op { - case opAddWatch: - in.reply <- w.addWatch(in.path, uint64(in.flags)) - case opRemoveWatch: - in.reply <- w.remWatch(in.path) - } - default: - } - continue - } - - switch e { - case syscall.ERROR_MORE_DATA: - if watch == nil { - w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer") - } else { - // The i/o succeeded but the buffer is full. - // In theory we should be building up a full packet. - // In practice we can get away with just carrying on. - n = uint32(unsafe.Sizeof(watch.buf)) - } - case syscall.ERROR_ACCESS_DENIED: - // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) - w.deleteWatch(watch) - w.startRead(watch) - continue - case syscall.ERROR_OPERATION_ABORTED: - // CancelIo was called on this handle - continue - default: - w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e) - continue - case nil: - } - - var offset uint32 - for { - if n == 0 { - w.Events <- newEvent("", sysFSQOVERFLOW) - w.Errors <- errors.New("short read in readEvents()") - break - } - - // Point "raw" to the event in the buffer - raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) - buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName)) - name := syscall.UTF16ToString(buf[:raw.FileNameLength/2]) - fullname := filepath.Join(watch.path, name) - - var mask uint64 - switch raw.Action { - case syscall.FILE_ACTION_REMOVED: - mask = sysFSDELETESELF - case syscall.FILE_ACTION_MODIFIED: - mask = sysFSMODIFY - case syscall.FILE_ACTION_RENAMED_OLD_NAME: - watch.rename = name - case syscall.FILE_ACTION_RENAMED_NEW_NAME: - if watch.names[watch.rename] != 0 { - watch.names[name] |= watch.names[watch.rename] - delete(watch.names, watch.rename) - mask = sysFSMOVESELF - } - } - - sendNameEvent := func() { - if w.sendEvent(fullname, watch.names[name]&mask) { - if watch.names[name]&sysFSONESHOT != 0 { - delete(watch.names, name) - } - } - } - if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME { - sendNameEvent() - } - if raw.Action == syscall.FILE_ACTION_REMOVED { - w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) - delete(watch.names, name) - } - if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) { - if watch.mask&sysFSONESHOT != 0 { - watch.mask = 0 - } - } - if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME { - fullname = filepath.Join(watch.path, watch.rename) - sendNameEvent() - } - - // Move to the next event in the buffer - if raw.NextEntryOffset == 0 { - break - } - offset += raw.NextEntryOffset - - // Error! - if offset >= n { - w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.") - break - } - } - - if err := w.startRead(watch); err != nil { - w.Errors <- err - } - } -} - -func (w *Watcher) sendEvent(name string, mask uint64) bool { - if mask == 0 { - return false - } - event := newEvent(name, uint32(mask)) - select { - case ch := <-w.quit: - w.quit <- ch - case w.Events <- event: - } - return true -} - -func toWindowsFlags(mask uint64) uint32 { - var m uint32 - if mask&sysFSACCESS != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS - } - if mask&sysFSMODIFY != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE - } - if mask&sysFSATTRIB != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES - } - if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { - m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME - } - return m -} - -func toFSnotifyFlags(action uint32) uint64 { - switch action { - case syscall.FILE_ACTION_ADDED: - return sysFSCREATE - case syscall.FILE_ACTION_REMOVED: - return sysFSDELETE - case syscall.FILE_ACTION_MODIFIED: - return sysFSMODIFY - case syscall.FILE_ACTION_RENAMED_OLD_NAME: - return sysFSMOVEDFROM - case syscall.FILE_ACTION_RENAMED_NEW_NAME: - return sysFSMOVEDTO - } - return 0 -} diff --git a/ui/wasm/vendor/github.com/go-git/gcfg/LICENSE b/ui/wasm/vendor/github.com/go-git/gcfg/LICENSE deleted file mode 100644 index 87a5cede339..00000000000 --- a/ui/wasm/vendor/github.com/go-git/gcfg/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go -Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/ui/wasm/vendor/github.com/go-git/gcfg/README b/ui/wasm/vendor/github.com/go-git/gcfg/README deleted file mode 100644 index 1ff233a529d..00000000000 --- a/ui/wasm/vendor/github.com/go-git/gcfg/README +++ /dev/null @@ -1,4 +0,0 @@ -Gcfg reads INI-style configuration files into Go structs; -supports user-defined types and subsections. - -Package docs: https://godoc.org/gopkg.in/gcfg.v1 diff --git a/ui/wasm/vendor/github.com/go-git/gcfg/doc.go b/ui/wasm/vendor/github.com/go-git/gcfg/doc.go deleted file mode 100644 index 7bdefbf0203..00000000000 --- a/ui/wasm/vendor/github.com/go-git/gcfg/doc.go +++ /dev/null @@ -1,145 +0,0 @@ -// Package gcfg reads "INI-style" text-based configuration files with -// "name=value" pairs grouped into sections (gcfg files). -// -// This package is still a work in progress; see the sections below for planned -// changes. -// -// Syntax -// -// The syntax is based on that used by git config: -// http://git-scm.com/docs/git-config#_syntax . -// There are some (planned) differences compared to the git config format: -// - improve data portability: -// - must be encoded in UTF-8 (for now) and must not contain the 0 byte -// - include and "path" type is not supported -// (path type may be implementable as a user-defined type) -// - internationalization -// - section and variable names can contain unicode letters, unicode digits -// (as defined in http://golang.org/ref/spec#Characters ) and hyphens -// (U+002D), starting with a unicode letter -// - disallow potentially ambiguous or misleading definitions: -// - `[sec.sub]` format is not allowed (deprecated in gitconfig) -// - `[sec ""]` is not allowed -// - use `[sec]` for section name "sec" and empty subsection name -// - (planned) within a single file, definitions must be contiguous for each: -// - section: '[secA]' -> '[secB]' -> '[secA]' is an error -// - subsection: '[sec "A"]' -> '[sec "B"]' -> '[sec "A"]' is an error -// - multivalued variable: 'multi=a' -> 'other=x' -> 'multi=b' is an error -// -// Data structure -// -// The functions in this package read values into a user-defined struct. -// Each section corresponds to a struct field in the config struct, and each -// variable in a section corresponds to a data field in the section struct. -// The mapping of each section or variable name to fields is done either based -// on the "gcfg" struct tag or by matching the name of the section or variable, -// ignoring case. In the latter case, hyphens '-' in section and variable names -// correspond to underscores '_' in field names. -// Fields must be exported; to use a section or variable name starting with a -// letter that is neither upper- or lower-case, prefix the field name with 'X'. -// (See https://code.google.com/p/go/issues/detail?id=5763#c4 .) -// -// For sections with subsections, the corresponding field in config must be a -// map, rather than a struct, with string keys and pointer-to-struct values. -// Values for subsection variables are stored in the map with the subsection -// name used as the map key. -// (Note that unlike section and variable names, subsection names are case -// sensitive.) -// When using a map, and there is a section with the same section name but -// without a subsection name, its values are stored with the empty string used -// as the key. -// It is possible to provide default values for subsections in the section -// "default-" (or by setting values in the corresponding struct -// field "Default_"). -// -// The functions in this package panic if config is not a pointer to a struct, -// or when a field is not of a suitable type (either a struct or a map with -// string keys and pointer-to-struct values). -// -// Parsing of values -// -// The section structs in the config struct may contain single-valued or -// multi-valued variables. Variables of unnamed slice type (that is, a type -// starting with `[]`) are treated as multi-value; all others (including named -// slice types) are treated as single-valued variables. -// -// Single-valued variables are handled based on the type as follows. -// Unnamed pointer types (that is, types starting with `*`) are dereferenced, -// and if necessary, a new instance is allocated. -// -// For types implementing the encoding.TextUnmarshaler interface, the -// UnmarshalText method is used to set the value. Implementing this method is -// the recommended way for parsing user-defined types. -// -// For fields of string kind, the value string is assigned to the field, after -// unquoting and unescaping as needed. -// For fields of bool kind, the field is set to true if the value is "true", -// "yes", "on" or "1", and set to false if the value is "false", "no", "off" or -// "0", ignoring case. In addition, single-valued bool fields can be specified -// with a "blank" value (variable name without equals sign and value); in such -// case the value is set to true. -// -// Predefined integer types [u]int(|8|16|32|64) and big.Int are parsed as -// decimal or hexadecimal (if having '0x' prefix). (This is to prevent -// unintuitively handling zero-padded numbers as octal.) Other types having -// [u]int* as the underlying type, such as os.FileMode and uintptr allow -// decimal, hexadecimal, or octal values. -// Parsing mode for integer types can be overridden using the struct tag option -// ",int=mode" where mode is a combination of the 'd', 'h', and 'o' characters -// (each standing for decimal, hexadecimal, and octal, respectively.) -// -// All other types are parsed using fmt.Sscanf with the "%v" verb. -// -// For multi-valued variables, each individual value is parsed as above and -// appended to the slice. If the first value is specified as a "blank" value -// (variable name without equals sign and value), a new slice is allocated; -// that is any values previously set in the slice will be ignored. -// -// The types subpackage for provides helpers for parsing "enum-like" and integer -// types. -// -// Error handling -// -// There are 3 types of errors: -// -// - programmer errors / panics: -// - invalid configuration structure -// - data errors: -// - fatal errors: -// - invalid configuration syntax -// - warnings: -// - data that doesn't belong to any part of the config structure -// -// Programmer errors trigger panics. These are should be fixed by the programmer -// before releasing code that uses gcfg. -// -// Data errors cause gcfg to return a non-nil error value. This includes the -// case when there are extra unknown key-value definitions in the configuration -// data (extra data). -// However, in some occasions it is desirable to be able to proceed in -// situations when the only data error is that of extra data. -// These errors are handled at a different (warning) priority and can be -// filtered out programmatically. To ignore extra data warnings, wrap the -// gcfg.Read*Into invocation into a call to gcfg.FatalOnly. -// -// TODO -// -// The following is a list of changes under consideration: -// - documentation -// - self-contained syntax documentation -// - more practical examples -// - move TODOs to issue tracker (eventually) -// - syntax -// - reconsider valid escape sequences -// (gitconfig doesn't support \r in value, \t in subsection name, etc.) -// - reading / parsing gcfg files -// - define internal representation structure -// - support multiple inputs (readers, strings, files) -// - support declaring encoding (?) -// - support varying fields sets for subsections (?) -// - writing gcfg files -// - error handling -// - make error context accessible programmatically? -// - limit input size? -// -package gcfg // import "github.com/go-git/gcfg" diff --git a/ui/wasm/vendor/github.com/go-git/gcfg/errors.go b/ui/wasm/vendor/github.com/go-git/gcfg/errors.go deleted file mode 100644 index 853c76021de..00000000000 --- a/ui/wasm/vendor/github.com/go-git/gcfg/errors.go +++ /dev/null @@ -1,41 +0,0 @@ -package gcfg - -import ( - "gopkg.in/warnings.v0" -) - -// FatalOnly filters the results of a Read*Into invocation and returns only -// fatal errors. That is, errors (warnings) indicating data for unknown -// sections / variables is ignored. Example invocation: -// -// err := gcfg.FatalOnly(gcfg.ReadFileInto(&cfg, configFile)) -// if err != nil { -// ... -// -func FatalOnly(err error) error { - return warnings.FatalOnly(err) -} - -func isFatal(err error) bool { - _, ok := err.(extraData) - return !ok -} - -type extraData struct { - section string - subsection *string - variable *string -} - -func (e extraData) Error() string { - s := "can't store data at section \"" + e.section + "\"" - if e.subsection != nil { - s += ", subsection \"" + *e.subsection + "\"" - } - if e.variable != nil { - s += ", variable \"" + *e.variable + "\"" - } - return s -} - -var _ error = extraData{} diff --git a/ui/wasm/vendor/github.com/go-git/gcfg/go1_0.go b/ui/wasm/vendor/github.com/go-git/gcfg/go1_0.go deleted file mode 100644 index 6670210791d..00000000000 --- a/ui/wasm/vendor/github.com/go-git/gcfg/go1_0.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !go1.2 - -package gcfg - -type textUnmarshaler interface { - UnmarshalText(text []byte) error -} diff --git a/ui/wasm/vendor/github.com/go-git/gcfg/go1_2.go b/ui/wasm/vendor/github.com/go-git/gcfg/go1_2.go deleted file mode 100644 index 6f5843bc7cd..00000000000 --- a/ui/wasm/vendor/github.com/go-git/gcfg/go1_2.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build go1.2 - -package gcfg - -import ( - "encoding" -) - -type textUnmarshaler encoding.TextUnmarshaler diff --git a/ui/wasm/vendor/github.com/go-git/gcfg/read.go b/ui/wasm/vendor/github.com/go-git/gcfg/read.go deleted file mode 100644 index 4dfdc5cf301..00000000000 --- a/ui/wasm/vendor/github.com/go-git/gcfg/read.go +++ /dev/null @@ -1,273 +0,0 @@ -package gcfg - -import ( - "fmt" - "io" - "io/ioutil" - "os" - "strings" - - "github.com/go-git/gcfg/scanner" - "github.com/go-git/gcfg/token" - "gopkg.in/warnings.v0" -) - -var unescape = map[rune]rune{'\\': '\\', '"': '"', 'n': '\n', 't': '\t', 'b': '\b'} - -// no error: invalid literals should be caught by scanner -func unquote(s string) string { - u, q, esc := make([]rune, 0, len(s)), false, false - for _, c := range s { - if esc { - uc, ok := unescape[c] - switch { - case ok: - u = append(u, uc) - fallthrough - case !q && c == '\n': - esc = false - continue - } - panic("invalid escape sequence") - } - switch c { - case '"': - q = !q - case '\\': - esc = true - default: - u = append(u, c) - } - } - if q { - panic("missing end quote") - } - if esc { - panic("invalid escape sequence") - } - return string(u) -} - -func read(c *warnings.Collector, callback func(string, string, string, string, bool) error, - fset *token.FileSet, file *token.File, src []byte) error { - // - var s scanner.Scanner - var errs scanner.ErrorList - s.Init(file, src, func(p token.Position, m string) { errs.Add(p, m) }, 0) - sect, sectsub := "", "" - pos, tok, lit := s.Scan() - errfn := func(msg string) error { - return fmt.Errorf("%s: %s", fset.Position(pos), msg) - } - for { - if errs.Len() > 0 { - if err := c.Collect(errs.Err()); err != nil { - return err - } - } - switch tok { - case token.EOF: - return nil - case token.EOL, token.COMMENT: - pos, tok, lit = s.Scan() - case token.LBRACK: - pos, tok, lit = s.Scan() - if errs.Len() > 0 { - if err := c.Collect(errs.Err()); err != nil { - return err - } - } - if tok != token.IDENT { - if err := c.Collect(errfn("expected section name")); err != nil { - return err - } - } - sect, sectsub = lit, "" - pos, tok, lit = s.Scan() - if errs.Len() > 0 { - if err := c.Collect(errs.Err()); err != nil { - return err - } - } - if tok == token.STRING { - sectsub = unquote(lit) - if sectsub == "" { - if err := c.Collect(errfn("empty subsection name")); err != nil { - return err - } - } - pos, tok, lit = s.Scan() - if errs.Len() > 0 { - if err := c.Collect(errs.Err()); err != nil { - return err - } - } - } - if tok != token.RBRACK { - if sectsub == "" { - if err := c.Collect(errfn("expected subsection name or right bracket")); err != nil { - return err - } - } - if err := c.Collect(errfn("expected right bracket")); err != nil { - return err - } - } - pos, tok, lit = s.Scan() - if tok != token.EOL && tok != token.EOF && tok != token.COMMENT { - if err := c.Collect(errfn("expected EOL, EOF, or comment")); err != nil { - return err - } - } - // If a section/subsection header was found, ensure a - // container object is created, even if there are no - // variables further down. - err := c.Collect(callback(sect, sectsub, "", "", true)) - if err != nil { - return err - } - case token.IDENT: - if sect == "" { - if err := c.Collect(errfn("expected section header")); err != nil { - return err - } - } - n := lit - pos, tok, lit = s.Scan() - if errs.Len() > 0 { - return errs.Err() - } - blank, v := tok == token.EOF || tok == token.EOL || tok == token.COMMENT, "" - if !blank { - if tok != token.ASSIGN { - if err := c.Collect(errfn("expected '='")); err != nil { - return err - } - } - pos, tok, lit = s.Scan() - if errs.Len() > 0 { - if err := c.Collect(errs.Err()); err != nil { - return err - } - } - if tok != token.STRING { - if err := c.Collect(errfn("expected value")); err != nil { - return err - } - } - v = unquote(lit) - pos, tok, lit = s.Scan() - if errs.Len() > 0 { - if err := c.Collect(errs.Err()); err != nil { - return err - } - } - if tok != token.EOL && tok != token.EOF && tok != token.COMMENT { - if err := c.Collect(errfn("expected EOL, EOF, or comment")); err != nil { - return err - } - } - } - err := c.Collect(callback(sect, sectsub, n, v, blank)) - if err != nil { - return err - } - default: - if sect == "" { - if err := c.Collect(errfn("expected section header")); err != nil { - return err - } - } - if err := c.Collect(errfn("expected section header or variable declaration")); err != nil { - return err - } - } - } - panic("never reached") -} - -func readInto(config interface{}, fset *token.FileSet, file *token.File, - src []byte) error { - // - c := warnings.NewCollector(isFatal) - firstPassCallback := func(s string, ss string, k string, v string, bv bool) error { - return set(c, config, s, ss, k, v, bv, false) - } - err := read(c, firstPassCallback, fset, file, src) - if err != nil { - return err - } - secondPassCallback := func(s string, ss string, k string, v string, bv bool) error { - return set(c, config, s, ss, k, v, bv, true) - } - err = read(c, secondPassCallback, fset, file, src) - if err != nil { - return err - } - return c.Done() -} - -// ReadWithCallback reads gcfg formatted data from reader and calls -// callback with each section and option found. -// -// Callback is called with section, subsection, option key, option value -// and blank value flag as arguments. -// -// When a section is found, callback is called with nil subsection, option key -// and option value. -// -// When a subsection is found, callback is called with nil option key and -// option value. -// -// If blank value flag is true, it means that the value was not set for an option -// (as opposed to set to empty string). -// -// If callback returns an error, ReadWithCallback terminates with an error too. -func ReadWithCallback(reader io.Reader, callback func(string, string, string, string, bool) error) error { - src, err := ioutil.ReadAll(reader) - if err != nil { - return err - } - - fset := token.NewFileSet() - file := fset.AddFile("", fset.Base(), len(src)) - c := warnings.NewCollector(isFatal) - - return read(c, callback, fset, file, src) -} - -// ReadInto reads gcfg formatted data from reader and sets the values into the -// corresponding fields in config. -func ReadInto(config interface{}, reader io.Reader) error { - src, err := ioutil.ReadAll(reader) - if err != nil { - return err - } - fset := token.NewFileSet() - file := fset.AddFile("", fset.Base(), len(src)) - return readInto(config, fset, file, src) -} - -// ReadStringInto reads gcfg formatted data from str and sets the values into -// the corresponding fields in config. -func ReadStringInto(config interface{}, str string) error { - r := strings.NewReader(str) - return ReadInto(config, r) -} - -// ReadFileInto reads gcfg formatted data from the file filename and sets the -// values into the corresponding fields in config. -func ReadFileInto(config interface{}, filename string) error { - f, err := os.Open(filename) - if err != nil { - return err - } - defer f.Close() - src, err := ioutil.ReadAll(f) - if err != nil { - return err - } - fset := token.NewFileSet() - file := fset.AddFile(filename, fset.Base(), len(src)) - return readInto(config, fset, file, src) -} diff --git a/ui/wasm/vendor/github.com/go-git/gcfg/scanner/errors.go b/ui/wasm/vendor/github.com/go-git/gcfg/scanner/errors.go deleted file mode 100644 index a6e00f5c64e..00000000000 --- a/ui/wasm/vendor/github.com/go-git/gcfg/scanner/errors.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package scanner - -import ( - "fmt" - "io" - "sort" -) - -import ( - "github.com/go-git/gcfg/token" -) - -// In an ErrorList, an error is represented by an *Error. -// The position Pos, if valid, points to the beginning of -// the offending token, and the error condition is described -// by Msg. -// -type Error struct { - Pos token.Position - Msg string -} - -// Error implements the error interface. -func (e Error) Error() string { - if e.Pos.Filename != "" || e.Pos.IsValid() { - // don't print "" - // TODO(gri) reconsider the semantics of Position.IsValid - return e.Pos.String() + ": " + e.Msg - } - return e.Msg -} - -// ErrorList is a list of *Errors. -// The zero value for an ErrorList is an empty ErrorList ready to use. -// -type ErrorList []*Error - -// Add adds an Error with given position and error message to an ErrorList. -func (p *ErrorList) Add(pos token.Position, msg string) { - *p = append(*p, &Error{pos, msg}) -} - -// Reset resets an ErrorList to no errors. -func (p *ErrorList) Reset() { *p = (*p)[0:0] } - -// ErrorList implements the sort Interface. -func (p ErrorList) Len() int { return len(p) } -func (p ErrorList) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func (p ErrorList) Less(i, j int) bool { - e := &p[i].Pos - f := &p[j].Pos - if e.Filename < f.Filename { - return true - } - if e.Filename == f.Filename { - return e.Offset < f.Offset - } - return false -} - -// Sort sorts an ErrorList. *Error entries are sorted by position, -// other errors are sorted by error message, and before any *Error -// entry. -// -func (p ErrorList) Sort() { - sort.Sort(p) -} - -// RemoveMultiples sorts an ErrorList and removes all but the first error per line. -func (p *ErrorList) RemoveMultiples() { - sort.Sort(p) - var last token.Position // initial last.Line is != any legal error line - i := 0 - for _, e := range *p { - if e.Pos.Filename != last.Filename || e.Pos.Line != last.Line { - last = e.Pos - (*p)[i] = e - i++ - } - } - (*p) = (*p)[0:i] -} - -// An ErrorList implements the error interface. -func (p ErrorList) Error() string { - switch len(p) { - case 0: - return "no errors" - case 1: - return p[0].Error() - } - return fmt.Sprintf("%s (and %d more errors)", p[0], len(p)-1) -} - -// Err returns an error equivalent to this error list. -// If the list is empty, Err returns nil. -func (p ErrorList) Err() error { - if len(p) == 0 { - return nil - } - return p -} - -// PrintError is a utility function that prints a list of errors to w, -// one error per line, if the err parameter is an ErrorList. Otherwise -// it prints the err string. -// -func PrintError(w io.Writer, err error) { - if list, ok := err.(ErrorList); ok { - for _, e := range list { - fmt.Fprintf(w, "%s\n", e) - } - } else if err != nil { - fmt.Fprintf(w, "%s\n", err) - } -} diff --git a/ui/wasm/vendor/github.com/go-git/gcfg/scanner/scanner.go b/ui/wasm/vendor/github.com/go-git/gcfg/scanner/scanner.go deleted file mode 100644 index 41aafec7589..00000000000 --- a/ui/wasm/vendor/github.com/go-git/gcfg/scanner/scanner.go +++ /dev/null @@ -1,342 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package scanner implements a scanner for gcfg configuration text. -// It takes a []byte as source which can then be tokenized -// through repeated calls to the Scan method. -// -// Note that the API for the scanner package may change to accommodate new -// features or implementation changes in gcfg. -// -package scanner - -import ( - "fmt" - "path/filepath" - "unicode" - "unicode/utf8" -) - -import ( - "github.com/go-git/gcfg/token" -) - -// An ErrorHandler may be provided to Scanner.Init. If a syntax error is -// encountered and a handler was installed, the handler is called with a -// position and an error message. The position points to the beginning of -// the offending token. -// -type ErrorHandler func(pos token.Position, msg string) - -// A Scanner holds the scanner's internal state while processing -// a given text. It can be allocated as part of another data -// structure but must be initialized via Init before use. -// -type Scanner struct { - // immutable state - file *token.File // source file handle - dir string // directory portion of file.Name() - src []byte // source - err ErrorHandler // error reporting; or nil - mode Mode // scanning mode - - // scanning state - ch rune // current character - offset int // character offset - rdOffset int // reading offset (position after current character) - lineOffset int // current line offset - nextVal bool // next token is expected to be a value - - // public state - ok to modify - ErrorCount int // number of errors encountered -} - -// Read the next Unicode char into s.ch. -// s.ch < 0 means end-of-file. -// -func (s *Scanner) next() { - if s.rdOffset < len(s.src) { - s.offset = s.rdOffset - if s.ch == '\n' { - s.lineOffset = s.offset - s.file.AddLine(s.offset) - } - r, w := rune(s.src[s.rdOffset]), 1 - switch { - case r == 0: - s.error(s.offset, "illegal character NUL") - case r >= 0x80: - // not ASCII - r, w = utf8.DecodeRune(s.src[s.rdOffset:]) - if r == utf8.RuneError && w == 1 { - s.error(s.offset, "illegal UTF-8 encoding") - } - } - s.rdOffset += w - s.ch = r - } else { - s.offset = len(s.src) - if s.ch == '\n' { - s.lineOffset = s.offset - s.file.AddLine(s.offset) - } - s.ch = -1 // eof - } -} - -// A mode value is a set of flags (or 0). -// They control scanner behavior. -// -type Mode uint - -const ( - ScanComments Mode = 1 << iota // return comments as COMMENT tokens -) - -// Init prepares the scanner s to tokenize the text src by setting the -// scanner at the beginning of src. The scanner uses the file set file -// for position information and it adds line information for each line. -// It is ok to re-use the same file when re-scanning the same file as -// line information which is already present is ignored. Init causes a -// panic if the file size does not match the src size. -// -// Calls to Scan will invoke the error handler err if they encounter a -// syntax error and err is not nil. Also, for each error encountered, -// the Scanner field ErrorCount is incremented by one. The mode parameter -// determines how comments are handled. -// -// Note that Init may call err if there is an error in the first character -// of the file. -// -func (s *Scanner) Init(file *token.File, src []byte, err ErrorHandler, mode Mode) { - // Explicitly initialize all fields since a scanner may be reused. - if file.Size() != len(src) { - panic(fmt.Sprintf("file size (%d) does not match src len (%d)", file.Size(), len(src))) - } - s.file = file - s.dir, _ = filepath.Split(file.Name()) - s.src = src - s.err = err - s.mode = mode - - s.ch = ' ' - s.offset = 0 - s.rdOffset = 0 - s.lineOffset = 0 - s.ErrorCount = 0 - s.nextVal = false - - s.next() -} - -func (s *Scanner) error(offs int, msg string) { - if s.err != nil { - s.err(s.file.Position(s.file.Pos(offs)), msg) - } - s.ErrorCount++ -} - -func (s *Scanner) scanComment() string { - // initial [;#] already consumed - offs := s.offset - 1 // position of initial [;#] - - for s.ch != '\n' && s.ch >= 0 { - s.next() - } - return string(s.src[offs:s.offset]) -} - -func isLetter(ch rune) bool { - return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch >= 0x80 && unicode.IsLetter(ch) -} - -func isDigit(ch rune) bool { - return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) -} - -func (s *Scanner) scanIdentifier() string { - offs := s.offset - for isLetter(s.ch) || isDigit(s.ch) || s.ch == '-' { - s.next() - } - return string(s.src[offs:s.offset]) -} - -func (s *Scanner) scanEscape(val bool) { - offs := s.offset - ch := s.ch - s.next() // always make progress - switch ch { - case '\\', '"': - // ok - case 'n', 't', 'b': - if val { - break // ok - } - fallthrough - default: - s.error(offs, "unknown escape sequence") - } -} - -func (s *Scanner) scanString() string { - // '"' opening already consumed - offs := s.offset - 1 - - for s.ch != '"' { - ch := s.ch - s.next() - if ch == '\n' || ch < 0 { - s.error(offs, "string not terminated") - break - } - if ch == '\\' { - s.scanEscape(false) - } - } - - s.next() - - return string(s.src[offs:s.offset]) -} - -func stripCR(b []byte) []byte { - c := make([]byte, len(b)) - i := 0 - for _, ch := range b { - if ch != '\r' { - c[i] = ch - i++ - } - } - return c[:i] -} - -func (s *Scanner) scanValString() string { - offs := s.offset - - hasCR := false - end := offs - inQuote := false -loop: - for inQuote || s.ch >= 0 && s.ch != '\n' && s.ch != ';' && s.ch != '#' { - ch := s.ch - s.next() - switch { - case inQuote && ch == '\\': - s.scanEscape(true) - case !inQuote && ch == '\\': - if s.ch == '\r' { - hasCR = true - s.next() - } - if s.ch != '\n' { - s.scanEscape(true) - } else { - s.next() - } - case ch == '"': - inQuote = !inQuote - case ch == '\r': - hasCR = true - case ch < 0 || inQuote && ch == '\n': - s.error(offs, "string not terminated") - break loop - } - if inQuote || !isWhiteSpace(ch) { - end = s.offset - } - } - - lit := s.src[offs:end] - if hasCR { - lit = stripCR(lit) - } - - return string(lit) -} - -func isWhiteSpace(ch rune) bool { - return ch == ' ' || ch == '\t' || ch == '\r' -} - -func (s *Scanner) skipWhitespace() { - for isWhiteSpace(s.ch) { - s.next() - } -} - -// Scan scans the next token and returns the token position, the token, -// and its literal string if applicable. The source end is indicated by -// token.EOF. -// -// If the returned token is a literal (token.IDENT, token.STRING) or -// token.COMMENT, the literal string has the corresponding value. -// -// If the returned token is token.ILLEGAL, the literal string is the -// offending character. -// -// In all other cases, Scan returns an empty literal string. -// -// For more tolerant parsing, Scan will return a valid token if -// possible even if a syntax error was encountered. Thus, even -// if the resulting token sequence contains no illegal tokens, -// a client may not assume that no error occurred. Instead it -// must check the scanner's ErrorCount or the number of calls -// of the error handler, if there was one installed. -// -// Scan adds line information to the file added to the file -// set with Init. Token positions are relative to that file -// and thus relative to the file set. -// -func (s *Scanner) Scan() (pos token.Pos, tok token.Token, lit string) { -scanAgain: - s.skipWhitespace() - - // current token start - pos = s.file.Pos(s.offset) - - // determine token value - switch ch := s.ch; { - case s.nextVal: - lit = s.scanValString() - tok = token.STRING - s.nextVal = false - case isLetter(ch): - lit = s.scanIdentifier() - tok = token.IDENT - default: - s.next() // always make progress - switch ch { - case -1: - tok = token.EOF - case '\n': - tok = token.EOL - case '"': - tok = token.STRING - lit = s.scanString() - case '[': - tok = token.LBRACK - case ']': - tok = token.RBRACK - case ';', '#': - // comment - lit = s.scanComment() - if s.mode&ScanComments == 0 { - // skip comment - goto scanAgain - } - tok = token.COMMENT - case '=': - tok = token.ASSIGN - s.nextVal = true - default: - s.error(s.file.Offset(pos), fmt.Sprintf("illegal character %#U", ch)) - tok = token.ILLEGAL - lit = string(ch) - } - } - - return -} diff --git a/ui/wasm/vendor/github.com/go-git/gcfg/set.go b/ui/wasm/vendor/github.com/go-git/gcfg/set.go deleted file mode 100644 index e2d9278025c..00000000000 --- a/ui/wasm/vendor/github.com/go-git/gcfg/set.go +++ /dev/null @@ -1,332 +0,0 @@ -package gcfg - -import ( - "bytes" - "encoding/gob" - "fmt" - "math/big" - "reflect" - "strings" - "unicode" - "unicode/utf8" - - "github.com/go-git/gcfg/types" - "gopkg.in/warnings.v0" -) - -type tag struct { - ident string - intMode string -} - -func newTag(ts string) tag { - t := tag{} - s := strings.Split(ts, ",") - t.ident = s[0] - for _, tse := range s[1:] { - if strings.HasPrefix(tse, "int=") { - t.intMode = tse[len("int="):] - } - } - return t -} - -func fieldFold(v reflect.Value, name string) (reflect.Value, tag) { - var n string - r0, _ := utf8.DecodeRuneInString(name) - if unicode.IsLetter(r0) && !unicode.IsLower(r0) && !unicode.IsUpper(r0) { - n = "X" - } - n += strings.Replace(name, "-", "_", -1) - f, ok := v.Type().FieldByNameFunc(func(fieldName string) bool { - if !v.FieldByName(fieldName).CanSet() { - return false - } - f, _ := v.Type().FieldByName(fieldName) - t := newTag(f.Tag.Get("gcfg")) - if t.ident != "" { - return strings.EqualFold(t.ident, name) - } - return strings.EqualFold(n, fieldName) - }) - if !ok { - return reflect.Value{}, tag{} - } - return v.FieldByName(f.Name), newTag(f.Tag.Get("gcfg")) -} - -type setter func(destp interface{}, blank bool, val string, t tag) error - -var errUnsupportedType = fmt.Errorf("unsupported type") -var errBlankUnsupported = fmt.Errorf("blank value not supported for type") - -var setters = []setter{ - typeSetter, textUnmarshalerSetter, kindSetter, scanSetter, -} - -func textUnmarshalerSetter(d interface{}, blank bool, val string, t tag) error { - dtu, ok := d.(textUnmarshaler) - if !ok { - return errUnsupportedType - } - if blank { - return errBlankUnsupported - } - return dtu.UnmarshalText([]byte(val)) -} - -func boolSetter(d interface{}, blank bool, val string, t tag) error { - if blank { - reflect.ValueOf(d).Elem().Set(reflect.ValueOf(true)) - return nil - } - b, err := types.ParseBool(val) - if err == nil { - reflect.ValueOf(d).Elem().Set(reflect.ValueOf(b)) - } - return err -} - -func intMode(mode string) types.IntMode { - var m types.IntMode - if strings.ContainsAny(mode, "dD") { - m |= types.Dec - } - if strings.ContainsAny(mode, "hH") { - m |= types.Hex - } - if strings.ContainsAny(mode, "oO") { - m |= types.Oct - } - return m -} - -var typeModes = map[reflect.Type]types.IntMode{ - reflect.TypeOf(int(0)): types.Dec | types.Hex, - reflect.TypeOf(int8(0)): types.Dec | types.Hex, - reflect.TypeOf(int16(0)): types.Dec | types.Hex, - reflect.TypeOf(int32(0)): types.Dec | types.Hex, - reflect.TypeOf(int64(0)): types.Dec | types.Hex, - reflect.TypeOf(uint(0)): types.Dec | types.Hex, - reflect.TypeOf(uint8(0)): types.Dec | types.Hex, - reflect.TypeOf(uint16(0)): types.Dec | types.Hex, - reflect.TypeOf(uint32(0)): types.Dec | types.Hex, - reflect.TypeOf(uint64(0)): types.Dec | types.Hex, - // use default mode (allow dec/hex/oct) for uintptr type - reflect.TypeOf(big.Int{}): types.Dec | types.Hex, -} - -func intModeDefault(t reflect.Type) types.IntMode { - m, ok := typeModes[t] - if !ok { - m = types.Dec | types.Hex | types.Oct - } - return m -} - -func intSetter(d interface{}, blank bool, val string, t tag) error { - if blank { - return errBlankUnsupported - } - mode := intMode(t.intMode) - if mode == 0 { - mode = intModeDefault(reflect.TypeOf(d).Elem()) - } - return types.ParseInt(d, val, mode) -} - -func stringSetter(d interface{}, blank bool, val string, t tag) error { - if blank { - return errBlankUnsupported - } - dsp, ok := d.(*string) - if !ok { - return errUnsupportedType - } - *dsp = val - return nil -} - -var kindSetters = map[reflect.Kind]setter{ - reflect.String: stringSetter, - reflect.Bool: boolSetter, - reflect.Int: intSetter, - reflect.Int8: intSetter, - reflect.Int16: intSetter, - reflect.Int32: intSetter, - reflect.Int64: intSetter, - reflect.Uint: intSetter, - reflect.Uint8: intSetter, - reflect.Uint16: intSetter, - reflect.Uint32: intSetter, - reflect.Uint64: intSetter, - reflect.Uintptr: intSetter, -} - -var typeSetters = map[reflect.Type]setter{ - reflect.TypeOf(big.Int{}): intSetter, -} - -func typeSetter(d interface{}, blank bool, val string, tt tag) error { - t := reflect.ValueOf(d).Type().Elem() - setter, ok := typeSetters[t] - if !ok { - return errUnsupportedType - } - return setter(d, blank, val, tt) -} - -func kindSetter(d interface{}, blank bool, val string, tt tag) error { - k := reflect.ValueOf(d).Type().Elem().Kind() - setter, ok := kindSetters[k] - if !ok { - return errUnsupportedType - } - return setter(d, blank, val, tt) -} - -func scanSetter(d interface{}, blank bool, val string, tt tag) error { - if blank { - return errBlankUnsupported - } - return types.ScanFully(d, val, 'v') -} - -func newValue(c *warnings.Collector, sect string, vCfg reflect.Value, - vType reflect.Type) (reflect.Value, error) { - // - pv := reflect.New(vType) - dfltName := "default-" + sect - dfltField, _ := fieldFold(vCfg, dfltName) - var err error - if dfltField.IsValid() { - b := bytes.NewBuffer(nil) - ge := gob.NewEncoder(b) - if err = c.Collect(ge.EncodeValue(dfltField)); err != nil { - return pv, err - } - gd := gob.NewDecoder(bytes.NewReader(b.Bytes())) - if err = c.Collect(gd.DecodeValue(pv.Elem())); err != nil { - return pv, err - } - } - return pv, nil -} - -func set(c *warnings.Collector, cfg interface{}, sect, sub, name string, - value string, blankValue bool, subsectPass bool) error { - // - vPCfg := reflect.ValueOf(cfg) - if vPCfg.Kind() != reflect.Ptr || vPCfg.Elem().Kind() != reflect.Struct { - panic(fmt.Errorf("config must be a pointer to a struct")) - } - vCfg := vPCfg.Elem() - vSect, _ := fieldFold(vCfg, sect) - if !vSect.IsValid() { - err := extraData{section: sect} - return c.Collect(err) - } - isSubsect := vSect.Kind() == reflect.Map - if subsectPass != isSubsect { - return nil - } - if isSubsect { - vst := vSect.Type() - if vst.Key().Kind() != reflect.String || - vst.Elem().Kind() != reflect.Ptr || - vst.Elem().Elem().Kind() != reflect.Struct { - panic(fmt.Errorf("map field for section must have string keys and "+ - " pointer-to-struct values: section %q", sect)) - } - if vSect.IsNil() { - vSect.Set(reflect.MakeMap(vst)) - } - k := reflect.ValueOf(sub) - pv := vSect.MapIndex(k) - if !pv.IsValid() { - vType := vSect.Type().Elem().Elem() - var err error - if pv, err = newValue(c, sect, vCfg, vType); err != nil { - return err - } - vSect.SetMapIndex(k, pv) - } - vSect = pv.Elem() - } else if vSect.Kind() != reflect.Struct { - panic(fmt.Errorf("field for section must be a map or a struct: "+ - "section %q", sect)) - } else if sub != "" { - err := extraData{section: sect, subsection: &sub} - return c.Collect(err) - } - // Empty name is a special value, meaning that only the - // section/subsection object is to be created, with no values set. - if name == "" { - return nil - } - vVar, t := fieldFold(vSect, name) - if !vVar.IsValid() { - var err error - if isSubsect { - err = extraData{section: sect, subsection: &sub, variable: &name} - } else { - err = extraData{section: sect, variable: &name} - } - return c.Collect(err) - } - // vVal is either single-valued var, or newly allocated value within multi-valued var - var vVal reflect.Value - // multi-value if unnamed slice type - isMulti := vVar.Type().Name() == "" && vVar.Kind() == reflect.Slice || - vVar.Type().Name() == "" && vVar.Kind() == reflect.Ptr && vVar.Type().Elem().Name() == "" && vVar.Type().Elem().Kind() == reflect.Slice - if isMulti && vVar.Kind() == reflect.Ptr { - if vVar.IsNil() { - vVar.Set(reflect.New(vVar.Type().Elem())) - } - vVar = vVar.Elem() - } - if isMulti && blankValue { - vVar.Set(reflect.Zero(vVar.Type())) - return nil - } - if isMulti { - vVal = reflect.New(vVar.Type().Elem()).Elem() - } else { - vVal = vVar - } - isDeref := vVal.Type().Name() == "" && vVal.Type().Kind() == reflect.Ptr - isNew := isDeref && vVal.IsNil() - // vAddr is address of value to set (dereferenced & allocated as needed) - var vAddr reflect.Value - switch { - case isNew: - vAddr = reflect.New(vVal.Type().Elem()) - case isDeref && !isNew: - vAddr = vVal - default: - vAddr = vVal.Addr() - } - vAddrI := vAddr.Interface() - err, ok := error(nil), false - for _, s := range setters { - err = s(vAddrI, blankValue, value, t) - if err == nil { - ok = true - break - } - if err != errUnsupportedType { - return err - } - } - if !ok { - // in case all setters returned errUnsupportedType - return err - } - if isNew { // set reference if it was dereferenced and newly allocated - vVal.Set(vAddr) - } - if isMulti { // append if multi-valued - vVar.Set(reflect.Append(vVar, vVal)) - } - return nil -} diff --git a/ui/wasm/vendor/github.com/go-git/gcfg/token/position.go b/ui/wasm/vendor/github.com/go-git/gcfg/token/position.go deleted file mode 100644 index fc45c1e7693..00000000000 --- a/ui/wasm/vendor/github.com/go-git/gcfg/token/position.go +++ /dev/null @@ -1,435 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// TODO(gri) consider making this a separate package outside the go directory. - -package token - -import ( - "fmt" - "sort" - "sync" -) - -// ----------------------------------------------------------------------------- -// Positions - -// Position describes an arbitrary source position -// including the file, line, and column location. -// A Position is valid if the line number is > 0. -// -type Position struct { - Filename string // filename, if any - Offset int // offset, starting at 0 - Line int // line number, starting at 1 - Column int // column number, starting at 1 (character count) -} - -// IsValid returns true if the position is valid. -func (pos *Position) IsValid() bool { return pos.Line > 0 } - -// String returns a string in one of several forms: -// -// file:line:column valid position with file name -// line:column valid position without file name -// file invalid position with file name -// - invalid position without file name -// -func (pos Position) String() string { - s := pos.Filename - if pos.IsValid() { - if s != "" { - s += ":" - } - s += fmt.Sprintf("%d:%d", pos.Line, pos.Column) - } - if s == "" { - s = "-" - } - return s -} - -// Pos is a compact encoding of a source position within a file set. -// It can be converted into a Position for a more convenient, but much -// larger, representation. -// -// The Pos value for a given file is a number in the range [base, base+size], -// where base and size are specified when adding the file to the file set via -// AddFile. -// -// To create the Pos value for a specific source offset, first add -// the respective file to the current file set (via FileSet.AddFile) -// and then call File.Pos(offset) for that file. Given a Pos value p -// for a specific file set fset, the corresponding Position value is -// obtained by calling fset.Position(p). -// -// Pos values can be compared directly with the usual comparison operators: -// If two Pos values p and q are in the same file, comparing p and q is -// equivalent to comparing the respective source file offsets. If p and q -// are in different files, p < q is true if the file implied by p was added -// to the respective file set before the file implied by q. -// -type Pos int - -// The zero value for Pos is NoPos; there is no file and line information -// associated with it, and NoPos().IsValid() is false. NoPos is always -// smaller than any other Pos value. The corresponding Position value -// for NoPos is the zero value for Position. -// -const NoPos Pos = 0 - -// IsValid returns true if the position is valid. -func (p Pos) IsValid() bool { - return p != NoPos -} - -// ----------------------------------------------------------------------------- -// File - -// A File is a handle for a file belonging to a FileSet. -// A File has a name, size, and line offset table. -// -type File struct { - set *FileSet - name string // file name as provided to AddFile - base int // Pos value range for this file is [base...base+size] - size int // file size as provided to AddFile - - // lines and infos are protected by set.mutex - lines []int - infos []lineInfo -} - -// Name returns the file name of file f as registered with AddFile. -func (f *File) Name() string { - return f.name -} - -// Base returns the base offset of file f as registered with AddFile. -func (f *File) Base() int { - return f.base -} - -// Size returns the size of file f as registered with AddFile. -func (f *File) Size() int { - return f.size -} - -// LineCount returns the number of lines in file f. -func (f *File) LineCount() int { - f.set.mutex.RLock() - n := len(f.lines) - f.set.mutex.RUnlock() - return n -} - -// AddLine adds the line offset for a new line. -// The line offset must be larger than the offset for the previous line -// and smaller than the file size; otherwise the line offset is ignored. -// -func (f *File) AddLine(offset int) { - f.set.mutex.Lock() - if i := len(f.lines); (i == 0 || f.lines[i-1] < offset) && offset < f.size { - f.lines = append(f.lines, offset) - } - f.set.mutex.Unlock() -} - -// SetLines sets the line offsets for a file and returns true if successful. -// The line offsets are the offsets of the first character of each line; -// for instance for the content "ab\nc\n" the line offsets are {0, 3}. -// An empty file has an empty line offset table. -// Each line offset must be larger than the offset for the previous line -// and smaller than the file size; otherwise SetLines fails and returns -// false. -// -func (f *File) SetLines(lines []int) bool { - // verify validity of lines table - size := f.size - for i, offset := range lines { - if i > 0 && offset <= lines[i-1] || size <= offset { - return false - } - } - - // set lines table - f.set.mutex.Lock() - f.lines = lines - f.set.mutex.Unlock() - return true -} - -// SetLinesForContent sets the line offsets for the given file content. -func (f *File) SetLinesForContent(content []byte) { - var lines []int - line := 0 - for offset, b := range content { - if line >= 0 { - lines = append(lines, line) - } - line = -1 - if b == '\n' { - line = offset + 1 - } - } - - // set lines table - f.set.mutex.Lock() - f.lines = lines - f.set.mutex.Unlock() -} - -// A lineInfo object describes alternative file and line number -// information (such as provided via a //line comment in a .go -// file) for a given file offset. -type lineInfo struct { - // fields are exported to make them accessible to gob - Offset int - Filename string - Line int -} - -// AddLineInfo adds alternative file and line number information for -// a given file offset. The offset must be larger than the offset for -// the previously added alternative line info and smaller than the -// file size; otherwise the information is ignored. -// -// AddLineInfo is typically used to register alternative position -// information for //line filename:line comments in source files. -// -func (f *File) AddLineInfo(offset int, filename string, line int) { - f.set.mutex.Lock() - if i := len(f.infos); i == 0 || f.infos[i-1].Offset < offset && offset < f.size { - f.infos = append(f.infos, lineInfo{offset, filename, line}) - } - f.set.mutex.Unlock() -} - -// Pos returns the Pos value for the given file offset; -// the offset must be <= f.Size(). -// f.Pos(f.Offset(p)) == p. -// -func (f *File) Pos(offset int) Pos { - if offset > f.size { - panic("illegal file offset") - } - return Pos(f.base + offset) -} - -// Offset returns the offset for the given file position p; -// p must be a valid Pos value in that file. -// f.Offset(f.Pos(offset)) == offset. -// -func (f *File) Offset(p Pos) int { - if int(p) < f.base || int(p) > f.base+f.size { - panic("illegal Pos value") - } - return int(p) - f.base -} - -// Line returns the line number for the given file position p; -// p must be a Pos value in that file or NoPos. -// -func (f *File) Line(p Pos) int { - // TODO(gri) this can be implemented much more efficiently - return f.Position(p).Line -} - -func searchLineInfos(a []lineInfo, x int) int { - return sort.Search(len(a), func(i int) bool { return a[i].Offset > x }) - 1 -} - -// info returns the file name, line, and column number for a file offset. -func (f *File) info(offset int) (filename string, line, column int) { - filename = f.name - if i := searchInts(f.lines, offset); i >= 0 { - line, column = i+1, offset-f.lines[i]+1 - } - if len(f.infos) > 0 { - // almost no files have extra line infos - if i := searchLineInfos(f.infos, offset); i >= 0 { - alt := &f.infos[i] - filename = alt.Filename - if i := searchInts(f.lines, alt.Offset); i >= 0 { - line += alt.Line - i - 1 - } - } - } - return -} - -func (f *File) position(p Pos) (pos Position) { - offset := int(p) - f.base - pos.Offset = offset - pos.Filename, pos.Line, pos.Column = f.info(offset) - return -} - -// Position returns the Position value for the given file position p; -// p must be a Pos value in that file or NoPos. -// -func (f *File) Position(p Pos) (pos Position) { - if p != NoPos { - if int(p) < f.base || int(p) > f.base+f.size { - panic("illegal Pos value") - } - pos = f.position(p) - } - return -} - -// ----------------------------------------------------------------------------- -// FileSet - -// A FileSet represents a set of source files. -// Methods of file sets are synchronized; multiple goroutines -// may invoke them concurrently. -// -type FileSet struct { - mutex sync.RWMutex // protects the file set - base int // base offset for the next file - files []*File // list of files in the order added to the set - last *File // cache of last file looked up -} - -// NewFileSet creates a new file set. -func NewFileSet() *FileSet { - s := new(FileSet) - s.base = 1 // 0 == NoPos - return s -} - -// Base returns the minimum base offset that must be provided to -// AddFile when adding the next file. -// -func (s *FileSet) Base() int { - s.mutex.RLock() - b := s.base - s.mutex.RUnlock() - return b - -} - -// AddFile adds a new file with a given filename, base offset, and file size -// to the file set s and returns the file. Multiple files may have the same -// name. The base offset must not be smaller than the FileSet's Base(), and -// size must not be negative. -// -// Adding the file will set the file set's Base() value to base + size + 1 -// as the minimum base value for the next file. The following relationship -// exists between a Pos value p for a given file offset offs: -// -// int(p) = base + offs -// -// with offs in the range [0, size] and thus p in the range [base, base+size]. -// For convenience, File.Pos may be used to create file-specific position -// values from a file offset. -// -func (s *FileSet) AddFile(filename string, base, size int) *File { - s.mutex.Lock() - defer s.mutex.Unlock() - if base < s.base || size < 0 { - panic("illegal base or size") - } - // base >= s.base && size >= 0 - f := &File{s, filename, base, size, []int{0}, nil} - base += size + 1 // +1 because EOF also has a position - if base < 0 { - panic("token.Pos offset overflow (> 2G of source code in file set)") - } - // add the file to the file set - s.base = base - s.files = append(s.files, f) - s.last = f - return f -} - -// Iterate calls f for the files in the file set in the order they were added -// until f returns false. -// -func (s *FileSet) Iterate(f func(*File) bool) { - for i := 0; ; i++ { - var file *File - s.mutex.RLock() - if i < len(s.files) { - file = s.files[i] - } - s.mutex.RUnlock() - if file == nil || !f(file) { - break - } - } -} - -func searchFiles(a []*File, x int) int { - return sort.Search(len(a), func(i int) bool { return a[i].base > x }) - 1 -} - -func (s *FileSet) file(p Pos) *File { - // common case: p is in last file - if f := s.last; f != nil && f.base <= int(p) && int(p) <= f.base+f.size { - return f - } - // p is not in last file - search all files - if i := searchFiles(s.files, int(p)); i >= 0 { - f := s.files[i] - // f.base <= int(p) by definition of searchFiles - if int(p) <= f.base+f.size { - s.last = f - return f - } - } - return nil -} - -// File returns the file that contains the position p. -// If no such file is found (for instance for p == NoPos), -// the result is nil. -// -func (s *FileSet) File(p Pos) (f *File) { - if p != NoPos { - s.mutex.RLock() - f = s.file(p) - s.mutex.RUnlock() - } - return -} - -// Position converts a Pos in the fileset into a general Position. -func (s *FileSet) Position(p Pos) (pos Position) { - if p != NoPos { - s.mutex.RLock() - if f := s.file(p); f != nil { - pos = f.position(p) - } - s.mutex.RUnlock() - } - return -} - -// ----------------------------------------------------------------------------- -// Helper functions - -func searchInts(a []int, x int) int { - // This function body is a manually inlined version of: - // - // return sort.Search(len(a), func(i int) bool { return a[i] > x }) - 1 - // - // With better compiler optimizations, this may not be needed in the - // future, but at the moment this change improves the go/printer - // benchmark performance by ~30%. This has a direct impact on the - // speed of gofmt and thus seems worthwhile (2011-04-29). - // TODO(gri): Remove this when compilers have caught up. - i, j := 0, len(a) - for i < j { - h := i + (j-i)/2 // avoid overflow when computing h - // i ≤ h < j - if a[h] <= x { - i = h + 1 - } else { - j = h - } - } - return i - 1 -} diff --git a/ui/wasm/vendor/github.com/go-git/gcfg/token/serialize.go b/ui/wasm/vendor/github.com/go-git/gcfg/token/serialize.go deleted file mode 100644 index 4adc8f9e334..00000000000 --- a/ui/wasm/vendor/github.com/go-git/gcfg/token/serialize.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package token - -type serializedFile struct { - // fields correspond 1:1 to fields with same (lower-case) name in File - Name string - Base int - Size int - Lines []int - Infos []lineInfo -} - -type serializedFileSet struct { - Base int - Files []serializedFile -} - -// Read calls decode to deserialize a file set into s; s must not be nil. -func (s *FileSet) Read(decode func(interface{}) error) error { - var ss serializedFileSet - if err := decode(&ss); err != nil { - return err - } - - s.mutex.Lock() - s.base = ss.Base - files := make([]*File, len(ss.Files)) - for i := 0; i < len(ss.Files); i++ { - f := &ss.Files[i] - files[i] = &File{s, f.Name, f.Base, f.Size, f.Lines, f.Infos} - } - s.files = files - s.last = nil - s.mutex.Unlock() - - return nil -} - -// Write calls encode to serialize the file set s. -func (s *FileSet) Write(encode func(interface{}) error) error { - var ss serializedFileSet - - s.mutex.Lock() - ss.Base = s.base - files := make([]serializedFile, len(s.files)) - for i, f := range s.files { - files[i] = serializedFile{f.name, f.base, f.size, f.lines, f.infos} - } - ss.Files = files - s.mutex.Unlock() - - return encode(ss) -} diff --git a/ui/wasm/vendor/github.com/go-git/gcfg/token/token.go b/ui/wasm/vendor/github.com/go-git/gcfg/token/token.go deleted file mode 100644 index b3c7c83fa9e..00000000000 --- a/ui/wasm/vendor/github.com/go-git/gcfg/token/token.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package token defines constants representing the lexical tokens of the gcfg -// configuration syntax and basic operations on tokens (printing, predicates). -// -// Note that the API for the token package may change to accommodate new -// features or implementation changes in gcfg. -// -package token - -import "strconv" - -// Token is the set of lexical tokens of the gcfg configuration syntax. -type Token int - -// The list of tokens. -const ( - // Special tokens - ILLEGAL Token = iota - EOF - COMMENT - - literal_beg - // Identifiers and basic type literals - // (these tokens stand for classes of literals) - IDENT // section-name, variable-name - STRING // "subsection-name", variable value - literal_end - - operator_beg - // Operators and delimiters - ASSIGN // = - LBRACK // [ - RBRACK // ] - EOL // \n - operator_end -) - -var tokens = [...]string{ - ILLEGAL: "ILLEGAL", - - EOF: "EOF", - COMMENT: "COMMENT", - - IDENT: "IDENT", - STRING: "STRING", - - ASSIGN: "=", - LBRACK: "[", - RBRACK: "]", - EOL: "\n", -} - -// String returns the string corresponding to the token tok. -// For operators and delimiters, the string is the actual token character -// sequence (e.g., for the token ASSIGN, the string is "="). For all other -// tokens the string corresponds to the token constant name (e.g. for the -// token IDENT, the string is "IDENT"). -// -func (tok Token) String() string { - s := "" - if 0 <= tok && tok < Token(len(tokens)) { - s = tokens[tok] - } - if s == "" { - s = "token(" + strconv.Itoa(int(tok)) + ")" - } - return s -} - -// Predicates - -// IsLiteral returns true for tokens corresponding to identifiers -// and basic type literals; it returns false otherwise. -// -func (tok Token) IsLiteral() bool { return literal_beg < tok && tok < literal_end } - -// IsOperator returns true for tokens corresponding to operators and -// delimiters; it returns false otherwise. -// -func (tok Token) IsOperator() bool { return operator_beg < tok && tok < operator_end } diff --git a/ui/wasm/vendor/github.com/go-git/gcfg/types/bool.go b/ui/wasm/vendor/github.com/go-git/gcfg/types/bool.go deleted file mode 100644 index 8dcae0d8cfd..00000000000 --- a/ui/wasm/vendor/github.com/go-git/gcfg/types/bool.go +++ /dev/null @@ -1,23 +0,0 @@ -package types - -// BoolValues defines the name and value mappings for ParseBool. -var BoolValues = map[string]interface{}{ - "true": true, "yes": true, "on": true, "1": true, - "false": false, "no": false, "off": false, "0": false, -} - -var boolParser = func() *EnumParser { - ep := &EnumParser{} - ep.AddVals(BoolValues) - return ep -}() - -// ParseBool parses bool values according to the definitions in BoolValues. -// Parsing is case-insensitive. -func ParseBool(s string) (bool, error) { - v, err := boolParser.Parse(s) - if err != nil { - return false, err - } - return v.(bool), nil -} diff --git a/ui/wasm/vendor/github.com/go-git/gcfg/types/doc.go b/ui/wasm/vendor/github.com/go-git/gcfg/types/doc.go deleted file mode 100644 index 9f9c345f6ea..00000000000 --- a/ui/wasm/vendor/github.com/go-git/gcfg/types/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Package types defines helpers for type conversions. -// -// The API for this package is not finalized yet. -package types diff --git a/ui/wasm/vendor/github.com/go-git/gcfg/types/enum.go b/ui/wasm/vendor/github.com/go-git/gcfg/types/enum.go deleted file mode 100644 index 1a0c7ef453d..00000000000 --- a/ui/wasm/vendor/github.com/go-git/gcfg/types/enum.go +++ /dev/null @@ -1,44 +0,0 @@ -package types - -import ( - "fmt" - "reflect" - "strings" -) - -// EnumParser parses "enum" values; i.e. a predefined set of strings to -// predefined values. -type EnumParser struct { - Type string // type name; if not set, use type of first value added - CaseMatch bool // if true, matching of strings is case-sensitive - // PrefixMatch bool - vals map[string]interface{} -} - -// AddVals adds strings and values to an EnumParser. -func (ep *EnumParser) AddVals(vals map[string]interface{}) { - if ep.vals == nil { - ep.vals = make(map[string]interface{}) - } - for k, v := range vals { - if ep.Type == "" { - ep.Type = reflect.TypeOf(v).Name() - } - if !ep.CaseMatch { - k = strings.ToLower(k) - } - ep.vals[k] = v - } -} - -// Parse parses the string and returns the value or an error. -func (ep EnumParser) Parse(s string) (interface{}, error) { - if !ep.CaseMatch { - s = strings.ToLower(s) - } - v, ok := ep.vals[s] - if !ok { - return false, fmt.Errorf("failed to parse %s %#q", ep.Type, s) - } - return v, nil -} diff --git a/ui/wasm/vendor/github.com/go-git/gcfg/types/int.go b/ui/wasm/vendor/github.com/go-git/gcfg/types/int.go deleted file mode 100644 index af7e75c1250..00000000000 --- a/ui/wasm/vendor/github.com/go-git/gcfg/types/int.go +++ /dev/null @@ -1,86 +0,0 @@ -package types - -import ( - "fmt" - "strings" -) - -// An IntMode is a mode for parsing integer values, representing a set of -// accepted bases. -type IntMode uint8 - -// IntMode values for ParseInt; can be combined using binary or. -const ( - Dec IntMode = 1 << iota - Hex - Oct -) - -// String returns a string representation of IntMode; e.g. `IntMode(Dec|Hex)`. -func (m IntMode) String() string { - var modes []string - if m&Dec != 0 { - modes = append(modes, "Dec") - } - if m&Hex != 0 { - modes = append(modes, "Hex") - } - if m&Oct != 0 { - modes = append(modes, "Oct") - } - return "IntMode(" + strings.Join(modes, "|") + ")" -} - -var errIntAmbig = fmt.Errorf("ambiguous integer value; must include '0' prefix") - -func prefix0(val string) bool { - return strings.HasPrefix(val, "0") || strings.HasPrefix(val, "-0") -} - -func prefix0x(val string) bool { - return strings.HasPrefix(val, "0x") || strings.HasPrefix(val, "-0x") -} - -// ParseInt parses val using mode into intptr, which must be a pointer to an -// integer kind type. Non-decimal value require prefix `0` or `0x` in the cases -// when mode permits ambiguity of base; otherwise the prefix can be omitted. -func ParseInt(intptr interface{}, val string, mode IntMode) error { - val = strings.TrimSpace(val) - verb := byte(0) - switch mode { - case Dec: - verb = 'd' - case Dec + Hex: - if prefix0x(val) { - verb = 'v' - } else { - verb = 'd' - } - case Dec + Oct: - if prefix0(val) && !prefix0x(val) { - verb = 'v' - } else { - verb = 'd' - } - case Dec + Hex + Oct: - verb = 'v' - case Hex: - if prefix0x(val) { - verb = 'v' - } else { - verb = 'x' - } - case Oct: - verb = 'o' - case Hex + Oct: - if prefix0(val) { - verb = 'v' - } else { - return errIntAmbig - } - } - if verb == 0 { - panic("unsupported mode") - } - return ScanFully(intptr, val, verb) -} diff --git a/ui/wasm/vendor/github.com/go-git/gcfg/types/scan.go b/ui/wasm/vendor/github.com/go-git/gcfg/types/scan.go deleted file mode 100644 index db2f6ed3caf..00000000000 --- a/ui/wasm/vendor/github.com/go-git/gcfg/types/scan.go +++ /dev/null @@ -1,23 +0,0 @@ -package types - -import ( - "fmt" - "io" - "reflect" -) - -// ScanFully uses fmt.Sscanf with verb to fully scan val into ptr. -func ScanFully(ptr interface{}, val string, verb byte) error { - t := reflect.ValueOf(ptr).Elem().Type() - // attempt to read extra bytes to make sure the value is consumed - var b []byte - n, err := fmt.Sscanf(val, "%"+string(verb)+"%s", ptr, &b) - switch { - case n < 1 || n == 1 && err != io.EOF: - return fmt.Errorf("failed to parse %q as %v: %v", val, t, err) - case n > 1: - return fmt.Errorf("failed to parse %q as %v: extra characters %q", val, t, string(b)) - } - // n == 1 && err == io.EOF - return nil -} diff --git a/ui/wasm/vendor/github.com/go-git/go-billy/v5/.gitignore b/ui/wasm/vendor/github.com/go-git/go-billy/v5/.gitignore deleted file mode 100644 index 7aeb46699cd..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-billy/v5/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -/coverage.txt -/vendor -Gopkg.lock -Gopkg.toml diff --git a/ui/wasm/vendor/github.com/go-git/go-billy/v5/LICENSE b/ui/wasm/vendor/github.com/go-git/go-billy/v5/LICENSE deleted file mode 100644 index 9d60756894a..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-billy/v5/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2017 Sourced Technologies S.L. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/ui/wasm/vendor/github.com/go-git/go-billy/v5/README.md b/ui/wasm/vendor/github.com/go-git/go-billy/v5/README.md deleted file mode 100644 index da5c074782c..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-billy/v5/README.md +++ /dev/null @@ -1,73 +0,0 @@ -# go-billy [![GoDoc](https://godoc.org/gopkg.in/go-git/go-billy.v5?status.svg)](https://pkg.go.dev/github.com/go-git/go-billy/v5) [![Test](https://github.com/go-git/go-billy/workflows/Test/badge.svg)](https://github.com/go-git/go-billy/actions?query=workflow%3ATest) - -The missing interface filesystem abstraction for Go. -Billy implements an interface based on the `os` standard library, allowing to develop applications without dependency on the underlying storage. Makes it virtually free to implement mocks and testing over filesystem operations. - -Billy was born as part of [go-git/go-git](https://github.com/go-git/go-git) project. - -## Installation - -```go -import "github.com/go-git/go-billy/v5" // with go modules enabled (GO111MODULE=on or outside GOPATH) -import "github.com/go-git/go-billy" // with go modules disabled -``` - -## Usage - -Billy exposes filesystems using the -[`Filesystem` interface](https://pkg.go.dev/github.com/go-git/go-billy/v5?tab=doc#Filesystem). -Each filesystem implementation gives you a `New` method, whose arguments depend on -the implementation itself, that returns a new `Filesystem`. - -The following example caches in memory all readable files in a directory from any -billy's filesystem implementation. - -```go -func LoadToMemory(origin billy.Filesystem, path string) (*memory.Memory, error) { - memory := memory.New() - - files, err := origin.ReadDir("/") - if err != nil { - return nil, err - } - - for _, file := range files { - if file.IsDir() { - continue - } - - src, err := origin.Open(file.Name()) - if err != nil { - return nil, err - } - - dst, err := memory.Create(file.Name()) - if err != nil { - return nil, err - } - - if _, err = io.Copy(dst, src); err != nil { - return nil, err - } - - if err := dst.Close(); err != nil { - return nil, err - } - - if err := src.Close(); err != nil { - return nil, err - } - } - - return memory, nil -} -``` - -## Why billy? - -The library billy deals with storage systems and Billy is the name of a well-known, IKEA -bookcase. That's it. - -## License - -Apache License Version 2.0, see [LICENSE](LICENSE) diff --git a/ui/wasm/vendor/github.com/go-git/go-billy/v5/fs.go b/ui/wasm/vendor/github.com/go-git/go-billy/v5/fs.go deleted file mode 100644 index a9efccdeb2f..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-billy/v5/fs.go +++ /dev/null @@ -1,202 +0,0 @@ -package billy - -import ( - "errors" - "io" - "os" - "time" -) - -var ( - ErrReadOnly = errors.New("read-only filesystem") - ErrNotSupported = errors.New("feature not supported") - ErrCrossedBoundary = errors.New("chroot boundary crossed") -) - -// Capability holds the supported features of a billy filesystem. This does -// not mean that the capability has to be supported by the underlying storage. -// For example, a billy filesystem may support WriteCapability but the -// storage be mounted in read only mode. -type Capability uint64 - -const ( - // WriteCapability means that the fs is writable. - WriteCapability Capability = 1 << iota - // ReadCapability means that the fs is readable. - ReadCapability - // ReadAndWriteCapability is the ability to open a file in read and write mode. - ReadAndWriteCapability - // SeekCapability means it is able to move position inside the file. - SeekCapability - // TruncateCapability means that a file can be truncated. - TruncateCapability - // LockCapability is the ability to lock a file. - LockCapability - - // DefaultCapabilities lists all capable features supported by filesystems - // without Capability interface. This list should not be changed until a - // major version is released. - DefaultCapabilities Capability = WriteCapability | ReadCapability | - ReadAndWriteCapability | SeekCapability | TruncateCapability | - LockCapability - - // AllCapabilities lists all capable features. - AllCapabilities Capability = WriteCapability | ReadCapability | - ReadAndWriteCapability | SeekCapability | TruncateCapability | - LockCapability -) - -// Filesystem abstract the operations in a storage-agnostic interface. -// Each method implementation mimics the behavior of the equivalent functions -// at the os package from the standard library. -type Filesystem interface { - Basic - TempFile - Dir - Symlink - Chroot -} - -// Basic abstract the basic operations in a storage-agnostic interface as -// an extension to the Basic interface. -type Basic interface { - // Create creates the named file with mode 0666 (before umask), truncating - // it if it already exists. If successful, methods on the returned File can - // be used for I/O; the associated file descriptor has mode O_RDWR. - Create(filename string) (File, error) - // Open opens the named file for reading. If successful, methods on the - // returned file can be used for reading; the associated file descriptor has - // mode O_RDONLY. - Open(filename string) (File, error) - // OpenFile is the generalized open call; most users will use Open or Create - // instead. It opens the named file with specified flag (O_RDONLY etc.) and - // perm, (0666 etc.) if applicable. If successful, methods on the returned - // File can be used for I/O. - OpenFile(filename string, flag int, perm os.FileMode) (File, error) - // Stat returns a FileInfo describing the named file. - Stat(filename string) (os.FileInfo, error) - // Rename renames (moves) oldpath to newpath. If newpath already exists and - // is not a directory, Rename replaces it. OS-specific restrictions may - // apply when oldpath and newpath are in different directories. - Rename(oldpath, newpath string) error - // Remove removes the named file or directory. - Remove(filename string) error - // Join joins any number of path elements into a single path, adding a - // Separator if necessary. Join calls filepath.Clean on the result; in - // particular, all empty strings are ignored. On Windows, the result is a - // UNC path if and only if the first path element is a UNC path. - Join(elem ...string) string -} - -type TempFile interface { - // TempFile creates a new temporary file in the directory dir with a name - // beginning with prefix, opens the file for reading and writing, and - // returns the resulting *os.File. If dir is the empty string, TempFile - // uses the default directory for temporary files (see os.TempDir). - // Multiple programs calling TempFile simultaneously will not choose the - // same file. The caller can use f.Name() to find the pathname of the file. - // It is the caller's responsibility to remove the file when no longer - // needed. - TempFile(dir, prefix string) (File, error) -} - -// Dir abstract the dir related operations in a storage-agnostic interface as -// an extension to the Basic interface. -type Dir interface { - // ReadDir reads the directory named by dirname and returns a list of - // directory entries sorted by filename. - ReadDir(path string) ([]os.FileInfo, error) - // MkdirAll creates a directory named path, along with any necessary - // parents, and returns nil, or else returns an error. The permission bits - // perm are used for all directories that MkdirAll creates. If path is/ - // already a directory, MkdirAll does nothing and returns nil. - MkdirAll(filename string, perm os.FileMode) error -} - -// Symlink abstract the symlink related operations in a storage-agnostic -// interface as an extension to the Basic interface. -type Symlink interface { - // Lstat returns a FileInfo describing the named file. If the file is a - // symbolic link, the returned FileInfo describes the symbolic link. Lstat - // makes no attempt to follow the link. - Lstat(filename string) (os.FileInfo, error) - // Symlink creates a symbolic-link from link to target. target may be an - // absolute or relative path, and need not refer to an existing node. - // Parent directories of link are created as necessary. - Symlink(target, link string) error - // Readlink returns the target path of link. - Readlink(link string) (string, error) -} - -// Change abstract the FileInfo change related operations in a storage-agnostic -// interface as an extension to the Basic interface -type Change interface { - // Chmod changes the mode of the named file to mode. If the file is a - // symbolic link, it changes the mode of the link's target. - Chmod(name string, mode os.FileMode) error - // Lchown changes the numeric uid and gid of the named file. If the file is - // a symbolic link, it changes the uid and gid of the link itself. - Lchown(name string, uid, gid int) error - // Chown changes the numeric uid and gid of the named file. If the file is a - // symbolic link, it changes the uid and gid of the link's target. - Chown(name string, uid, gid int) error - // Chtimes changes the access and modification times of the named file, - // similar to the Unix utime() or utimes() functions. - // - // The underlying filesystem may truncate or round the values to a less - // precise time unit. - Chtimes(name string, atime time.Time, mtime time.Time) error -} - -// Chroot abstract the chroot related operations in a storage-agnostic interface -// as an extension to the Basic interface. -type Chroot interface { - // Chroot returns a new filesystem from the same type where the new root is - // the given path. Files outside of the designated directory tree cannot be - // accessed. - Chroot(path string) (Filesystem, error) - // Root returns the root path of the filesystem. - Root() string -} - -// File represent a file, being a subset of the os.File -type File interface { - // Name returns the name of the file as presented to Open. - Name() string - io.Writer - io.Reader - io.ReaderAt - io.Seeker - io.Closer - // Lock locks the file like e.g. flock. It protects against access from - // other processes. - Lock() error - // Unlock unlocks the file. - Unlock() error - // Truncate the file. - Truncate(size int64) error -} - -// Capable interface can return the available features of a filesystem. -type Capable interface { - // Capabilities returns the capabilities of a filesystem in bit flags. - Capabilities() Capability -} - -// Capabilities returns the features supported by a filesystem. If the FS -// does not implement Capable interface it returns all features. -func Capabilities(fs Basic) Capability { - capable, ok := fs.(Capable) - if !ok { - return DefaultCapabilities - } - - return capable.Capabilities() -} - -// CapabilityCheck tests the filesystem for the provided capabilities and -// returns true in case it supports all of them. -func CapabilityCheck(fs Basic, capabilities Capability) bool { - fsCaps := Capabilities(fs) - return fsCaps&capabilities == capabilities -} diff --git a/ui/wasm/vendor/github.com/go-git/go-billy/v5/helper/chroot/chroot.go b/ui/wasm/vendor/github.com/go-git/go-billy/v5/helper/chroot/chroot.go deleted file mode 100644 index 8b44e784bd7..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-billy/v5/helper/chroot/chroot.go +++ /dev/null @@ -1,242 +0,0 @@ -package chroot - -import ( - "os" - "path/filepath" - "strings" - - "github.com/go-git/go-billy/v5" - "github.com/go-git/go-billy/v5/helper/polyfill" -) - -// ChrootHelper is a helper to implement billy.Chroot. -type ChrootHelper struct { - underlying billy.Filesystem - base string -} - -// New creates a new filesystem wrapping up the given 'fs'. -// The created filesystem has its base in the given ChrootHelperectory of the -// underlying filesystem. -func New(fs billy.Basic, base string) billy.Filesystem { - return &ChrootHelper{ - underlying: polyfill.New(fs), - base: base, - } -} - -func (fs *ChrootHelper) underlyingPath(filename string) (string, error) { - if isCrossBoundaries(filename) { - return "", billy.ErrCrossedBoundary - } - - return fs.Join(fs.Root(), filename), nil -} - -func isCrossBoundaries(path string) bool { - path = filepath.ToSlash(path) - path = filepath.Clean(path) - - return strings.HasPrefix(path, ".."+string(filepath.Separator)) -} - -func (fs *ChrootHelper) Create(filename string) (billy.File, error) { - fullpath, err := fs.underlyingPath(filename) - if err != nil { - return nil, err - } - - f, err := fs.underlying.Create(fullpath) - if err != nil { - return nil, err - } - - return newFile(fs, f, filename), nil -} - -func (fs *ChrootHelper) Open(filename string) (billy.File, error) { - fullpath, err := fs.underlyingPath(filename) - if err != nil { - return nil, err - } - - f, err := fs.underlying.Open(fullpath) - if err != nil { - return nil, err - } - - return newFile(fs, f, filename), nil -} - -func (fs *ChrootHelper) OpenFile(filename string, flag int, mode os.FileMode) (billy.File, error) { - fullpath, err := fs.underlyingPath(filename) - if err != nil { - return nil, err - } - - f, err := fs.underlying.OpenFile(fullpath, flag, mode) - if err != nil { - return nil, err - } - - return newFile(fs, f, filename), nil -} - -func (fs *ChrootHelper) Stat(filename string) (os.FileInfo, error) { - fullpath, err := fs.underlyingPath(filename) - if err != nil { - return nil, err - } - - return fs.underlying.Stat(fullpath) -} - -func (fs *ChrootHelper) Rename(from, to string) error { - var err error - from, err = fs.underlyingPath(from) - if err != nil { - return err - } - - to, err = fs.underlyingPath(to) - if err != nil { - return err - } - - return fs.underlying.Rename(from, to) -} - -func (fs *ChrootHelper) Remove(path string) error { - fullpath, err := fs.underlyingPath(path) - if err != nil { - return err - } - - return fs.underlying.Remove(fullpath) -} - -func (fs *ChrootHelper) Join(elem ...string) string { - return fs.underlying.Join(elem...) -} - -func (fs *ChrootHelper) TempFile(dir, prefix string) (billy.File, error) { - fullpath, err := fs.underlyingPath(dir) - if err != nil { - return nil, err - } - - f, err := fs.underlying.(billy.TempFile).TempFile(fullpath, prefix) - if err != nil { - return nil, err - } - - return newFile(fs, f, fs.Join(dir, filepath.Base(f.Name()))), nil -} - -func (fs *ChrootHelper) ReadDir(path string) ([]os.FileInfo, error) { - fullpath, err := fs.underlyingPath(path) - if err != nil { - return nil, err - } - - return fs.underlying.(billy.Dir).ReadDir(fullpath) -} - -func (fs *ChrootHelper) MkdirAll(filename string, perm os.FileMode) error { - fullpath, err := fs.underlyingPath(filename) - if err != nil { - return err - } - - return fs.underlying.(billy.Dir).MkdirAll(fullpath, perm) -} - -func (fs *ChrootHelper) Lstat(filename string) (os.FileInfo, error) { - fullpath, err := fs.underlyingPath(filename) - if err != nil { - return nil, err - } - - return fs.underlying.(billy.Symlink).Lstat(fullpath) -} - -func (fs *ChrootHelper) Symlink(target, link string) error { - target = filepath.FromSlash(target) - - // only rewrite target if it's already absolute - if filepath.IsAbs(target) || strings.HasPrefix(target, string(filepath.Separator)) { - target = fs.Join(fs.Root(), target) - target = filepath.Clean(filepath.FromSlash(target)) - } - - link, err := fs.underlyingPath(link) - if err != nil { - return err - } - - return fs.underlying.(billy.Symlink).Symlink(target, link) -} - -func (fs *ChrootHelper) Readlink(link string) (string, error) { - fullpath, err := fs.underlyingPath(link) - if err != nil { - return "", err - } - - target, err := fs.underlying.(billy.Symlink).Readlink(fullpath) - if err != nil { - return "", err - } - - if !filepath.IsAbs(target) && !strings.HasPrefix(target, string(filepath.Separator)) { - return target, nil - } - - target, err = filepath.Rel(fs.base, target) - if err != nil { - return "", err - } - - return string(os.PathSeparator) + target, nil -} - -func (fs *ChrootHelper) Chroot(path string) (billy.Filesystem, error) { - fullpath, err := fs.underlyingPath(path) - if err != nil { - return nil, err - } - - return New(fs.underlying, fullpath), nil -} - -func (fs *ChrootHelper) Root() string { - return fs.base -} - -func (fs *ChrootHelper) Underlying() billy.Basic { - return fs.underlying -} - -// Capabilities implements the Capable interface. -func (fs *ChrootHelper) Capabilities() billy.Capability { - return billy.Capabilities(fs.underlying) -} - -type file struct { - billy.File - name string -} - -func newFile(fs billy.Filesystem, f billy.File, filename string) billy.File { - filename = fs.Join(fs.Root(), filename) - filename, _ = filepath.Rel(fs.Root(), filename) - - return &file{ - File: f, - name: filename, - } -} - -func (f *file) Name() string { - return f.name -} diff --git a/ui/wasm/vendor/github.com/go-git/go-billy/v5/helper/polyfill/polyfill.go b/ui/wasm/vendor/github.com/go-git/go-billy/v5/helper/polyfill/polyfill.go deleted file mode 100644 index 1efce0e7b8f..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-billy/v5/helper/polyfill/polyfill.go +++ /dev/null @@ -1,105 +0,0 @@ -package polyfill - -import ( - "os" - "path/filepath" - - "github.com/go-git/go-billy/v5" -) - -// Polyfill is a helper that implements all missing method from billy.Filesystem. -type Polyfill struct { - billy.Basic - c capabilities -} - -type capabilities struct{ tempfile, dir, symlink, chroot bool } - -// New creates a new filesystem wrapping up 'fs' the intercepts all the calls -// made and errors if fs doesn't implement any of the billy interfaces. -func New(fs billy.Basic) billy.Filesystem { - if original, ok := fs.(billy.Filesystem); ok { - return original - } - - h := &Polyfill{Basic: fs} - - _, h.c.tempfile = h.Basic.(billy.TempFile) - _, h.c.dir = h.Basic.(billy.Dir) - _, h.c.symlink = h.Basic.(billy.Symlink) - _, h.c.chroot = h.Basic.(billy.Chroot) - return h -} - -func (h *Polyfill) TempFile(dir, prefix string) (billy.File, error) { - if !h.c.tempfile { - return nil, billy.ErrNotSupported - } - - return h.Basic.(billy.TempFile).TempFile(dir, prefix) -} - -func (h *Polyfill) ReadDir(path string) ([]os.FileInfo, error) { - if !h.c.dir { - return nil, billy.ErrNotSupported - } - - return h.Basic.(billy.Dir).ReadDir(path) -} - -func (h *Polyfill) MkdirAll(filename string, perm os.FileMode) error { - if !h.c.dir { - return billy.ErrNotSupported - } - - return h.Basic.(billy.Dir).MkdirAll(filename, perm) -} - -func (h *Polyfill) Symlink(target, link string) error { - if !h.c.symlink { - return billy.ErrNotSupported - } - - return h.Basic.(billy.Symlink).Symlink(target, link) -} - -func (h *Polyfill) Readlink(link string) (string, error) { - if !h.c.symlink { - return "", billy.ErrNotSupported - } - - return h.Basic.(billy.Symlink).Readlink(link) -} - -func (h *Polyfill) Lstat(path string) (os.FileInfo, error) { - if !h.c.symlink { - return nil, billy.ErrNotSupported - } - - return h.Basic.(billy.Symlink).Lstat(path) -} - -func (h *Polyfill) Chroot(path string) (billy.Filesystem, error) { - if !h.c.chroot { - return nil, billy.ErrNotSupported - } - - return h.Basic.(billy.Chroot).Chroot(path) -} - -func (h *Polyfill) Root() string { - if !h.c.chroot { - return string(filepath.Separator) - } - - return h.Basic.(billy.Chroot).Root() -} - -func (h *Polyfill) Underlying() billy.Basic { - return h.Basic -} - -// Capabilities implements the Capable interface. -func (h *Polyfill) Capabilities() billy.Capability { - return billy.Capabilities(h.Basic) -} diff --git a/ui/wasm/vendor/github.com/go-git/go-billy/v5/memfs/memory.go b/ui/wasm/vendor/github.com/go-git/go-billy/v5/memfs/memory.go deleted file mode 100644 index f217693e6f0..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-billy/v5/memfs/memory.go +++ /dev/null @@ -1,410 +0,0 @@ -// Package memfs provides a billy filesystem base on memory. -package memfs // import "github.com/go-git/go-billy/v5/memfs" - -import ( - "errors" - "fmt" - "io" - "os" - "path/filepath" - "sort" - "strings" - "time" - - "github.com/go-git/go-billy/v5" - "github.com/go-git/go-billy/v5/helper/chroot" - "github.com/go-git/go-billy/v5/util" -) - -const separator = filepath.Separator - -// Memory a very convenient filesystem based on memory files -type Memory struct { - s *storage - - tempCount int -} - -//New returns a new Memory filesystem. -func New() billy.Filesystem { - fs := &Memory{s: newStorage()} - return chroot.New(fs, string(separator)) -} - -func (fs *Memory) Create(filename string) (billy.File, error) { - return fs.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) -} - -func (fs *Memory) Open(filename string) (billy.File, error) { - return fs.OpenFile(filename, os.O_RDONLY, 0) -} - -func (fs *Memory) OpenFile(filename string, flag int, perm os.FileMode) (billy.File, error) { - f, has := fs.s.Get(filename) - if !has { - if !isCreate(flag) { - return nil, os.ErrNotExist - } - - var err error - f, err = fs.s.New(filename, perm, flag) - if err != nil { - return nil, err - } - } else { - if isExclusive(flag) { - return nil, os.ErrExist - } - - if target, isLink := fs.resolveLink(filename, f); isLink { - return fs.OpenFile(target, flag, perm) - } - } - - if f.mode.IsDir() { - return nil, fmt.Errorf("cannot open directory: %s", filename) - } - - return f.Duplicate(filename, perm, flag), nil -} - -var errNotLink = errors.New("not a link") - -func (fs *Memory) resolveLink(fullpath string, f *file) (target string, isLink bool) { - if !isSymlink(f.mode) { - return fullpath, false - } - - target = string(f.content.bytes) - if !isAbs(target) { - target = fs.Join(filepath.Dir(fullpath), target) - } - - return target, true -} - -// On Windows OS, IsAbs validates if a path is valid based on if stars with a -// unit (eg.: `C:\`) to assert that is absolute, but in this mem implementation -// any path starting by `separator` is also considered absolute. -func isAbs(path string) bool { - return filepath.IsAbs(path) || strings.HasPrefix(path, string(separator)) -} - -func (fs *Memory) Stat(filename string) (os.FileInfo, error) { - f, has := fs.s.Get(filename) - if !has { - return nil, os.ErrNotExist - } - - fi, _ := f.Stat() - - var err error - if target, isLink := fs.resolveLink(filename, f); isLink { - fi, err = fs.Stat(target) - if err != nil { - return nil, err - } - } - - // the name of the file should always the name of the stated file, so we - // overwrite the Stat returned from the storage with it, since the - // filename may belong to a link. - fi.(*fileInfo).name = filepath.Base(filename) - return fi, nil -} - -func (fs *Memory) Lstat(filename string) (os.FileInfo, error) { - f, has := fs.s.Get(filename) - if !has { - return nil, os.ErrNotExist - } - - return f.Stat() -} - -type ByName []os.FileInfo - -func (a ByName) Len() int { return len(a) } -func (a ByName) Less(i, j int) bool { return a[i].Name() < a[j].Name() } -func (a ByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -func (fs *Memory) ReadDir(path string) ([]os.FileInfo, error) { - if f, has := fs.s.Get(path); has { - if target, isLink := fs.resolveLink(path, f); isLink { - return fs.ReadDir(target) - } - } - - var entries []os.FileInfo - for _, f := range fs.s.Children(path) { - fi, _ := f.Stat() - entries = append(entries, fi) - } - - sort.Sort(ByName(entries)) - - return entries, nil -} - -func (fs *Memory) MkdirAll(path string, perm os.FileMode) error { - _, err := fs.s.New(path, perm|os.ModeDir, 0) - return err -} - -func (fs *Memory) TempFile(dir, prefix string) (billy.File, error) { - return util.TempFile(fs, dir, prefix) -} - -func (fs *Memory) getTempFilename(dir, prefix string) string { - fs.tempCount++ - filename := fmt.Sprintf("%s_%d_%d", prefix, fs.tempCount, time.Now().UnixNano()) - return fs.Join(dir, filename) -} - -func (fs *Memory) Rename(from, to string) error { - return fs.s.Rename(from, to) -} - -func (fs *Memory) Remove(filename string) error { - return fs.s.Remove(filename) -} - -func (fs *Memory) Join(elem ...string) string { - return filepath.Join(elem...) -} - -func (fs *Memory) Symlink(target, link string) error { - _, err := fs.Stat(link) - if err == nil { - return os.ErrExist - } - - if !os.IsNotExist(err) { - return err - } - - return util.WriteFile(fs, link, []byte(target), 0777|os.ModeSymlink) -} - -func (fs *Memory) Readlink(link string) (string, error) { - f, has := fs.s.Get(link) - if !has { - return "", os.ErrNotExist - } - - if !isSymlink(f.mode) { - return "", &os.PathError{ - Op: "readlink", - Path: link, - Err: fmt.Errorf("not a symlink"), - } - } - - return string(f.content.bytes), nil -} - -// Capabilities implements the Capable interface. -func (fs *Memory) Capabilities() billy.Capability { - return billy.WriteCapability | - billy.ReadCapability | - billy.ReadAndWriteCapability | - billy.SeekCapability | - billy.TruncateCapability -} - -type file struct { - name string - content *content - position int64 - flag int - mode os.FileMode - - isClosed bool -} - -func (f *file) Name() string { - return f.name -} - -func (f *file) Read(b []byte) (int, error) { - n, err := f.ReadAt(b, f.position) - f.position += int64(n) - - if err == io.EOF && n != 0 { - err = nil - } - - return n, err -} - -func (f *file) ReadAt(b []byte, off int64) (int, error) { - if f.isClosed { - return 0, os.ErrClosed - } - - if !isReadAndWrite(f.flag) && !isReadOnly(f.flag) { - return 0, errors.New("read not supported") - } - - n, err := f.content.ReadAt(b, off) - - return n, err -} - -func (f *file) Seek(offset int64, whence int) (int64, error) { - if f.isClosed { - return 0, os.ErrClosed - } - - switch whence { - case io.SeekCurrent: - f.position += offset - case io.SeekStart: - f.position = offset - case io.SeekEnd: - f.position = int64(f.content.Len()) + offset - } - - return f.position, nil -} - -func (f *file) Write(p []byte) (int, error) { - if f.isClosed { - return 0, os.ErrClosed - } - - if !isReadAndWrite(f.flag) && !isWriteOnly(f.flag) { - return 0, errors.New("write not supported") - } - - n, err := f.content.WriteAt(p, f.position) - f.position += int64(n) - - return n, err -} - -func (f *file) Close() error { - if f.isClosed { - return os.ErrClosed - } - - f.isClosed = true - return nil -} - -func (f *file) Truncate(size int64) error { - if size < int64(len(f.content.bytes)) { - f.content.bytes = f.content.bytes[:size] - } else if more := int(size) - len(f.content.bytes); more > 0 { - f.content.bytes = append(f.content.bytes, make([]byte, more)...) - } - - return nil -} - -func (f *file) Duplicate(filename string, mode os.FileMode, flag int) billy.File { - new := &file{ - name: filename, - content: f.content, - mode: mode, - flag: flag, - } - - if isAppend(flag) { - new.position = int64(new.content.Len()) - } - - if isTruncate(flag) { - new.content.Truncate() - } - - return new -} - -func (f *file) Stat() (os.FileInfo, error) { - return &fileInfo{ - name: f.Name(), - mode: f.mode, - size: f.content.Len(), - }, nil -} - -// Lock is a no-op in memfs. -func (f *file) Lock() error { - return nil -} - -// Unlock is a no-op in memfs. -func (f *file) Unlock() error { - return nil -} - -type fileInfo struct { - name string - size int - mode os.FileMode -} - -func (fi *fileInfo) Name() string { - return fi.name -} - -func (fi *fileInfo) Size() int64 { - return int64(fi.size) -} - -func (fi *fileInfo) Mode() os.FileMode { - return fi.mode -} - -func (*fileInfo) ModTime() time.Time { - return time.Now() -} - -func (fi *fileInfo) IsDir() bool { - return fi.mode.IsDir() -} - -func (*fileInfo) Sys() interface{} { - return nil -} - -func (c *content) Truncate() { - c.bytes = make([]byte, 0) -} - -func (c *content) Len() int { - return len(c.bytes) -} - -func isCreate(flag int) bool { - return flag&os.O_CREATE != 0 -} - -func isExclusive(flag int) bool { - return flag&os.O_EXCL != 0 -} - -func isAppend(flag int) bool { - return flag&os.O_APPEND != 0 -} - -func isTruncate(flag int) bool { - return flag&os.O_TRUNC != 0 -} - -func isReadAndWrite(flag int) bool { - return flag&os.O_RDWR != 0 -} - -func isReadOnly(flag int) bool { - return flag == os.O_RDONLY -} - -func isWriteOnly(flag int) bool { - return flag&os.O_WRONLY != 0 -} - -func isSymlink(m os.FileMode) bool { - return m&os.ModeSymlink != 0 -} diff --git a/ui/wasm/vendor/github.com/go-git/go-billy/v5/memfs/storage.go b/ui/wasm/vendor/github.com/go-git/go-billy/v5/memfs/storage.go deleted file mode 100644 index d3ff5a25db6..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-billy/v5/memfs/storage.go +++ /dev/null @@ -1,229 +0,0 @@ -package memfs - -import ( - "errors" - "fmt" - "io" - "os" - "path/filepath" -) - -type storage struct { - files map[string]*file - children map[string]map[string]*file -} - -func newStorage() *storage { - return &storage{ - files: make(map[string]*file, 0), - children: make(map[string]map[string]*file, 0), - } -} - -func (s *storage) Has(path string) bool { - path = clean(path) - - _, ok := s.files[path] - return ok -} - -func (s *storage) New(path string, mode os.FileMode, flag int) (*file, error) { - path = clean(path) - if s.Has(path) { - if !s.MustGet(path).mode.IsDir() { - return nil, fmt.Errorf("file already exists %q", path) - } - - return nil, nil - } - - name := filepath.Base(path) - - f := &file{ - name: name, - content: &content{name: name}, - mode: mode, - flag: flag, - } - - s.files[path] = f - s.createParent(path, mode, f) - return f, nil -} - -func (s *storage) createParent(path string, mode os.FileMode, f *file) error { - base := filepath.Dir(path) - base = clean(base) - if f.Name() == string(separator) { - return nil - } - - if _, err := s.New(base, mode.Perm()|os.ModeDir, 0); err != nil { - return err - } - - if _, ok := s.children[base]; !ok { - s.children[base] = make(map[string]*file, 0) - } - - s.children[base][f.Name()] = f - return nil -} - -func (s *storage) Children(path string) []*file { - path = clean(path) - - l := make([]*file, 0) - for _, f := range s.children[path] { - l = append(l, f) - } - - return l -} - -func (s *storage) MustGet(path string) *file { - f, ok := s.Get(path) - if !ok { - panic(fmt.Errorf("couldn't find %q", path)) - } - - return f -} - -func (s *storage) Get(path string) (*file, bool) { - path = clean(path) - if !s.Has(path) { - return nil, false - } - - file, ok := s.files[path] - return file, ok -} - -func (s *storage) Rename(from, to string) error { - from = clean(from) - to = clean(to) - - if !s.Has(from) { - return os.ErrNotExist - } - - move := [][2]string{{from, to}} - - for pathFrom := range s.files { - if pathFrom == from || !filepath.HasPrefix(pathFrom, from) { - continue - } - - rel, _ := filepath.Rel(from, pathFrom) - pathTo := filepath.Join(to, rel) - - move = append(move, [2]string{pathFrom, pathTo}) - } - - for _, ops := range move { - from := ops[0] - to := ops[1] - - if err := s.move(from, to); err != nil { - return err - } - } - - return nil -} - -func (s *storage) move(from, to string) error { - s.files[to] = s.files[from] - s.files[to].name = filepath.Base(to) - s.children[to] = s.children[from] - - defer func() { - delete(s.children, from) - delete(s.files, from) - delete(s.children[filepath.Dir(from)], filepath.Base(from)) - }() - - return s.createParent(to, 0644, s.files[to]) -} - -func (s *storage) Remove(path string) error { - path = clean(path) - - f, has := s.Get(path) - if !has { - return os.ErrNotExist - } - - if f.mode.IsDir() && len(s.children[path]) != 0 { - return fmt.Errorf("dir: %s contains files", path) - } - - base, file := filepath.Split(path) - base = filepath.Clean(base) - - delete(s.children[base], file) - delete(s.files, path) - return nil -} - -func clean(path string) string { - return filepath.Clean(filepath.FromSlash(path)) -} - -type content struct { - name string - bytes []byte -} - -func (c *content) WriteAt(p []byte, off int64) (int, error) { - if off < 0 { - return 0, &os.PathError{ - Op: "writeat", - Path: c.name, - Err: errors.New("negative offset"), - } - } - - prev := len(c.bytes) - - diff := int(off) - prev - if diff > 0 { - c.bytes = append(c.bytes, make([]byte, diff)...) - } - - c.bytes = append(c.bytes[:off], p...) - if len(c.bytes) < prev { - c.bytes = c.bytes[:prev] - } - - return len(p), nil -} - -func (c *content) ReadAt(b []byte, off int64) (n int, err error) { - if off < 0 { - return 0, &os.PathError{ - Op: "readat", - Path: c.name, - Err: errors.New("negative offset"), - } - } - - size := int64(len(c.bytes)) - if off >= size { - return 0, io.EOF - } - - l := int64(len(b)) - if off+l > size { - l = size - off - } - - btr := c.bytes[off : off+l] - if len(btr) < len(b) { - err = io.EOF - } - n = copy(b, btr) - - return -} diff --git a/ui/wasm/vendor/github.com/go-git/go-billy/v5/osfs/os.go b/ui/wasm/vendor/github.com/go-git/go-billy/v5/osfs/os.go deleted file mode 100644 index 9665d2755d3..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-billy/v5/osfs/os.go +++ /dev/null @@ -1,144 +0,0 @@ -// +build !js - -// Package osfs provides a billy filesystem for the OS. -package osfs // import "github.com/go-git/go-billy/v5/osfs" - -import ( - "io/ioutil" - "os" - "path/filepath" - "sync" - - "github.com/go-git/go-billy/v5" - "github.com/go-git/go-billy/v5/helper/chroot" -) - -const ( - defaultDirectoryMode = 0755 - defaultCreateMode = 0666 -) - -// Default Filesystem representing the root of the os filesystem. -var Default = &OS{} - -// OS is a filesystem based on the os filesystem. -type OS struct{} - -// New returns a new OS filesystem. -func New(baseDir string) billy.Filesystem { - return chroot.New(Default, baseDir) -} - -func (fs *OS) Create(filename string) (billy.File, error) { - return fs.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, defaultCreateMode) -} - -func (fs *OS) OpenFile(filename string, flag int, perm os.FileMode) (billy.File, error) { - if flag&os.O_CREATE != 0 { - if err := fs.createDir(filename); err != nil { - return nil, err - } - } - - f, err := os.OpenFile(filename, flag, perm) - if err != nil { - return nil, err - } - return &file{File: f}, err -} - -func (fs *OS) createDir(fullpath string) error { - dir := filepath.Dir(fullpath) - if dir != "." { - if err := os.MkdirAll(dir, defaultDirectoryMode); err != nil { - return err - } - } - - return nil -} - -func (fs *OS) ReadDir(path string) ([]os.FileInfo, error) { - l, err := ioutil.ReadDir(path) - if err != nil { - return nil, err - } - - var s = make([]os.FileInfo, len(l)) - for i, f := range l { - s[i] = f - } - - return s, nil -} - -func (fs *OS) Rename(from, to string) error { - if err := fs.createDir(to); err != nil { - return err - } - - return rename(from, to) -} - -func (fs *OS) MkdirAll(path string, perm os.FileMode) error { - return os.MkdirAll(path, defaultDirectoryMode) -} - -func (fs *OS) Open(filename string) (billy.File, error) { - return fs.OpenFile(filename, os.O_RDONLY, 0) -} - -func (fs *OS) Stat(filename string) (os.FileInfo, error) { - return os.Stat(filename) -} - -func (fs *OS) Remove(filename string) error { - return os.Remove(filename) -} - -func (fs *OS) TempFile(dir, prefix string) (billy.File, error) { - if err := fs.createDir(dir + string(os.PathSeparator)); err != nil { - return nil, err - } - - f, err := ioutil.TempFile(dir, prefix) - if err != nil { - return nil, err - } - return &file{File: f}, nil -} - -func (fs *OS) Join(elem ...string) string { - return filepath.Join(elem...) -} - -func (fs *OS) RemoveAll(path string) error { - return os.RemoveAll(filepath.Clean(path)) -} - -func (fs *OS) Lstat(filename string) (os.FileInfo, error) { - return os.Lstat(filepath.Clean(filename)) -} - -func (fs *OS) Symlink(target, link string) error { - if err := fs.createDir(link); err != nil { - return err - } - - return os.Symlink(target, link) -} - -func (fs *OS) Readlink(link string) (string, error) { - return os.Readlink(link) -} - -// Capabilities implements the Capable interface. -func (fs *OS) Capabilities() billy.Capability { - return billy.DefaultCapabilities -} - -// file is a wrapper for an os.File which adds support for file locking. -type file struct { - *os.File - m sync.Mutex -} diff --git a/ui/wasm/vendor/github.com/go-git/go-billy/v5/osfs/os_js.go b/ui/wasm/vendor/github.com/go-git/go-billy/v5/osfs/os_js.go deleted file mode 100644 index 8ae68fed680..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-billy/v5/osfs/os_js.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build js - -package osfs - -import ( - "github.com/go-git/go-billy/v5" - "github.com/go-git/go-billy/v5/helper/chroot" - "github.com/go-git/go-billy/v5/memfs" -) - -// globalMemFs is the global memory fs -var globalMemFs = memfs.New() - -// Default Filesystem representing the root of in-memory filesystem for a -// js/wasm environment. -var Default = memfs.New() - -// New returns a new OS filesystem. -func New(baseDir string) billy.Filesystem { - return chroot.New(Default, Default.Join("/", baseDir)) -} diff --git a/ui/wasm/vendor/github.com/go-git/go-billy/v5/osfs/os_plan9.go b/ui/wasm/vendor/github.com/go-git/go-billy/v5/osfs/os_plan9.go deleted file mode 100644 index e8f519ffe22..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-billy/v5/osfs/os_plan9.go +++ /dev/null @@ -1,85 +0,0 @@ -// +build plan9 - -package osfs - -import ( - "io" - "os" - "path/filepath" - "syscall" -) - -func (f *file) Lock() error { - // Plan 9 uses a mode bit instead of explicit lock/unlock syscalls. - // - // Per http://man.cat-v.org/plan_9/5/stat: “Exclusive use files may be open - // for I/O by only one fid at a time across all clients of the server. If a - // second open is attempted, it draws an error.” - // - // There is no obvious way to implement this function using the exclusive use bit. - // See https://golang.org/src/cmd/go/internal/lockedfile/lockedfile_plan9.go - // for how file locking is done by the go tool on Plan 9. - return nil -} - -func (f *file) Unlock() error { - return nil -} - -func rename(from, to string) error { - // If from and to are in different directories, copy the file - // since Plan 9 does not support cross-directory rename. - if filepath.Dir(from) != filepath.Dir(to) { - fi, err := os.Stat(from) - if err != nil { - return &os.LinkError{"rename", from, to, err} - } - if fi.Mode().IsDir() { - return &os.LinkError{"rename", from, to, syscall.EISDIR} - } - fromFile, err := os.Open(from) - if err != nil { - return &os.LinkError{"rename", from, to, err} - } - toFile, err := os.OpenFile(to, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, fi.Mode()) - if err != nil { - return &os.LinkError{"rename", from, to, err} - } - _, err = io.Copy(toFile, fromFile) - if err != nil { - return &os.LinkError{"rename", from, to, err} - } - - // Copy mtime and mode from original file. - // We need only one syscall if we avoid os.Chmod and os.Chtimes. - dir := fi.Sys().(*syscall.Dir) - var d syscall.Dir - d.Null() - d.Mtime = dir.Mtime - d.Mode = dir.Mode - if err = dirwstat(to, &d); err != nil { - return &os.LinkError{"rename", from, to, err} - } - - // Remove original file. - err = os.Remove(from) - if err != nil { - return &os.LinkError{"rename", from, to, err} - } - return nil - } - return os.Rename(from, to) -} - -func dirwstat(name string, d *syscall.Dir) error { - var buf [syscall.STATFIXLEN]byte - - n, err := d.Marshal(buf[:]) - if err != nil { - return &os.PathError{"dirwstat", name, err} - } - if err = syscall.Wstat(name, buf[:n]); err != nil { - return &os.PathError{"dirwstat", name, err} - } - return nil -} diff --git a/ui/wasm/vendor/github.com/go-git/go-billy/v5/osfs/os_posix.go b/ui/wasm/vendor/github.com/go-git/go-billy/v5/osfs/os_posix.go deleted file mode 100644 index c74d60ee6b5..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-billy/v5/osfs/os_posix.go +++ /dev/null @@ -1,27 +0,0 @@ -// +build !plan9,!windows,!js - -package osfs - -import ( - "os" - - "golang.org/x/sys/unix" -) - -func (f *file) Lock() error { - f.m.Lock() - defer f.m.Unlock() - - return unix.Flock(int(f.File.Fd()), unix.LOCK_EX) -} - -func (f *file) Unlock() error { - f.m.Lock() - defer f.m.Unlock() - - return unix.Flock(int(f.File.Fd()), unix.LOCK_UN) -} - -func rename(from, to string) error { - return os.Rename(from, to) -} diff --git a/ui/wasm/vendor/github.com/go-git/go-billy/v5/osfs/os_windows.go b/ui/wasm/vendor/github.com/go-git/go-billy/v5/osfs/os_windows.go deleted file mode 100644 index 8f5caeb0edd..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-billy/v5/osfs/os_windows.go +++ /dev/null @@ -1,61 +0,0 @@ -// +build windows - -package osfs - -import ( - "os" - "runtime" - "unsafe" - - "golang.org/x/sys/windows" -) - -type fileInfo struct { - os.FileInfo - name string -} - -func (fi *fileInfo) Name() string { - return fi.name -} - -var ( - kernel32DLL = windows.NewLazySystemDLL("kernel32.dll") - lockFileExProc = kernel32DLL.NewProc("LockFileEx") - unlockFileProc = kernel32DLL.NewProc("UnlockFile") -) - -const ( - lockfileExclusiveLock = 0x2 -) - -func (f *file) Lock() error { - f.m.Lock() - defer f.m.Unlock() - - var overlapped windows.Overlapped - // err is always non-nil as per sys/windows semantics. - ret, _, err := lockFileExProc.Call(f.File.Fd(), lockfileExclusiveLock, 0, 0xFFFFFFFF, 0, - uintptr(unsafe.Pointer(&overlapped))) - runtime.KeepAlive(&overlapped) - if ret == 0 { - return err - } - return nil -} - -func (f *file) Unlock() error { - f.m.Lock() - defer f.m.Unlock() - - // err is always non-nil as per sys/windows semantics. - ret, _, err := unlockFileProc.Call(f.File.Fd(), 0, 0, 0xFFFFFFFF, 0) - if ret == 0 { - return err - } - return nil -} - -func rename(from, to string) error { - return os.Rename(from, to) -} diff --git a/ui/wasm/vendor/github.com/go-git/go-billy/v5/util/glob.go b/ui/wasm/vendor/github.com/go-git/go-billy/v5/util/glob.go deleted file mode 100644 index f7cb1de8966..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-billy/v5/util/glob.go +++ /dev/null @@ -1,111 +0,0 @@ -package util - -import ( - "path/filepath" - "sort" - "strings" - - "github.com/go-git/go-billy/v5" -) - -// Glob returns the names of all files matching pattern or nil -// if there is no matching file. The syntax of patterns is the same -// as in Match. The pattern may describe hierarchical names such as -// /usr/*/bin/ed (assuming the Separator is '/'). -// -// Glob ignores file system errors such as I/O errors reading directories. -// The only possible returned error is ErrBadPattern, when pattern -// is malformed. -// -// Function originally from https://golang.org/src/path/filepath/match_test.go -func Glob(fs billy.Filesystem, pattern string) (matches []string, err error) { - if !hasMeta(pattern) { - if _, err = fs.Lstat(pattern); err != nil { - return nil, nil - } - return []string{pattern}, nil - } - - dir, file := filepath.Split(pattern) - // Prevent infinite recursion. See issue 15879. - if dir == pattern { - return nil, filepath.ErrBadPattern - } - - var m []string - m, err = Glob(fs, cleanGlobPath(dir)) - if err != nil { - return - } - for _, d := range m { - matches, err = glob(fs, d, file, matches) - if err != nil { - return - } - } - return -} - -// cleanGlobPath prepares path for glob matching. -func cleanGlobPath(path string) string { - switch path { - case "": - return "." - case string(filepath.Separator): - // do nothing to the path - return path - default: - return path[0 : len(path)-1] // chop off trailing separator - } -} - -// glob searches for files matching pattern in the directory dir -// and appends them to matches. If the directory cannot be -// opened, it returns the existing matches. New matches are -// added in lexicographical order. -func glob(fs billy.Filesystem, dir, pattern string, matches []string) (m []string, e error) { - m = matches - fi, err := fs.Stat(dir) - if err != nil { - return - } - - if !fi.IsDir() { - return - } - - names, _ := readdirnames(fs, dir) - sort.Strings(names) - - for _, n := range names { - matched, err := filepath.Match(pattern, n) - if err != nil { - return m, err - } - if matched { - m = append(m, filepath.Join(dir, n)) - } - } - return -} - -// hasMeta reports whether path contains any of the magic characters -// recognized by Match. -func hasMeta(path string) bool { - // TODO(niemeyer): Should other magic characters be added here? - return strings.ContainsAny(path, "*?[") -} - -func readdirnames(fs billy.Filesystem, dir string) ([]string, error) { - files, err := fs.ReadDir(dir) - if err != nil { - return nil, err - } - - var names []string - for _, file := range files { - names = append(names, file.Name()) - } - - return names, nil -} diff --git a/ui/wasm/vendor/github.com/go-git/go-billy/v5/util/util.go b/ui/wasm/vendor/github.com/go-git/go-billy/v5/util/util.go deleted file mode 100644 index 5c77128c3ca..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-billy/v5/util/util.go +++ /dev/null @@ -1,282 +0,0 @@ -package util - -import ( - "io" - "os" - "path/filepath" - "strconv" - "sync" - "time" - - "github.com/go-git/go-billy/v5" -) - -// RemoveAll removes path and any children it contains. It removes everything it -// can but returns the first error it encounters. If the path does not exist, -// RemoveAll returns nil (no error). -func RemoveAll(fs billy.Basic, path string) error { - fs, path = getUnderlyingAndPath(fs, path) - - if r, ok := fs.(removerAll); ok { - return r.RemoveAll(path) - } - - return removeAll(fs, path) -} - -type removerAll interface { - RemoveAll(string) error -} - -func removeAll(fs billy.Basic, path string) error { - // This implementation is adapted from os.RemoveAll. - - // Simple case: if Remove works, we're done. - err := fs.Remove(path) - if err == nil || os.IsNotExist(err) { - return nil - } - - // Otherwise, is this a directory we need to recurse into? - dir, serr := fs.Stat(path) - if serr != nil { - if os.IsNotExist(serr) { - return nil - } - - return serr - } - - if !dir.IsDir() { - // Not a directory; return the error from Remove. - return err - } - - dirfs, ok := fs.(billy.Dir) - if !ok { - return billy.ErrNotSupported - } - - // Directory. - fis, err := dirfs.ReadDir(path) - if err != nil { - if os.IsNotExist(err) { - // Race. It was deleted between the Lstat and Open. - // Return nil per RemoveAll's docs. - return nil - } - - return err - } - - // Remove contents & return first error. - err = nil - for _, fi := range fis { - cpath := fs.Join(path, fi.Name()) - err1 := removeAll(fs, cpath) - if err == nil { - err = err1 - } - } - - // Remove directory. - err1 := fs.Remove(path) - if err1 == nil || os.IsNotExist(err1) { - return nil - } - - if err == nil { - err = err1 - } - - return err - -} - -// WriteFile writes data to a file named by filename in the given filesystem. -// If the file does not exist, WriteFile creates it with permissions perm; -// otherwise WriteFile truncates it before writing. -func WriteFile(fs billy.Basic, filename string, data []byte, perm os.FileMode) error { - f, err := fs.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) - if err != nil { - return err - } - - n, err := f.Write(data) - if err == nil && n < len(data) { - err = io.ErrShortWrite - } - - if err1 := f.Close(); err == nil { - err = err1 - } - - return err -} - -// Random number state. -// We generate random temporary file names so that there's a good -// chance the file doesn't exist yet - keeps the number of tries in -// TempFile to a minimum. -var rand uint32 -var randmu sync.Mutex - -func reseed() uint32 { - return uint32(time.Now().UnixNano() + int64(os.Getpid())) -} - -func nextSuffix() string { - randmu.Lock() - r := rand - if r == 0 { - r = reseed() - } - r = r*1664525 + 1013904223 // constants from Numerical Recipes - rand = r - randmu.Unlock() - return strconv.Itoa(int(1e9 + r%1e9))[1:] -} - -// TempFile creates a new temporary file in the directory dir with a name -// beginning with prefix, opens the file for reading and writing, and returns -// the resulting *os.File. If dir is the empty string, TempFile uses the default -// directory for temporary files (see os.TempDir). Multiple programs calling -// TempFile simultaneously will not choose the same file. The caller can use -// f.Name() to find the pathname of the file. It is the caller's responsibility -// to remove the file when no longer needed. -func TempFile(fs billy.Basic, dir, prefix string) (f billy.File, err error) { - // This implementation is based on stdlib ioutil.TempFile. - if dir == "" { - dir = getTempDir(fs) - } - - nconflict := 0 - for i := 0; i < 10000; i++ { - name := filepath.Join(dir, prefix+nextSuffix()) - f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) - if os.IsExist(err) { - if nconflict++; nconflict > 10 { - randmu.Lock() - rand = reseed() - randmu.Unlock() - } - continue - } - break - } - return -} - -// TempDir creates a new temporary directory in the directory dir -// with a name beginning with prefix and returns the path of the -// new directory. If dir is the empty string, TempDir uses the -// default directory for temporary files (see os.TempDir). -// Multiple programs calling TempDir simultaneously -// will not choose the same directory. It is the caller's responsibility -// to remove the directory when no longer needed. -func TempDir(fs billy.Dir, dir, prefix string) (name string, err error) { - // This implementation is based on stdlib ioutil.TempDir - - if dir == "" { - dir = getTempDir(fs.(billy.Basic)) - } - - nconflict := 0 - for i := 0; i < 10000; i++ { - try := filepath.Join(dir, prefix+nextSuffix()) - err = fs.MkdirAll(try, 0700) - if os.IsExist(err) { - if nconflict++; nconflict > 10 { - randmu.Lock() - rand = reseed() - randmu.Unlock() - } - continue - } - if os.IsNotExist(err) { - if _, err := os.Stat(dir); os.IsNotExist(err) { - return "", err - } - } - if err == nil { - name = try - } - break - } - return -} - -func getTempDir(fs billy.Basic) string { - ch, ok := fs.(billy.Chroot) - if !ok || ch.Root() == "" || ch.Root() == "/" || ch.Root() == string(filepath.Separator) { - return os.TempDir() - } - - return ".tmp" -} - -type underlying interface { - Underlying() billy.Basic -} - -func getUnderlyingAndPath(fs billy.Basic, path string) (billy.Basic, string) { - u, ok := fs.(underlying) - if !ok { - return fs, path - } - if ch, ok := fs.(billy.Chroot); ok { - path = fs.Join(ch.Root(), path) - } - - return u.Underlying(), path -} - -// ReadFile reads the named file and returns the contents from the given filesystem. -// A successful call returns err == nil, not err == EOF. -// Because ReadFile reads the whole file, it does not treat an EOF from Read -// as an error to be reported. -func ReadFile(fs billy.Basic, name string) ([]byte, error) { - f, err := fs.Open(name) - if err != nil { - return nil, err - } - - defer f.Close() - - var size int - if info, err := fs.Stat(name); err == nil { - size64 := info.Size() - if int64(int(size64)) == size64 { - size = int(size64) - } - } - - size++ // one byte for final read at EOF - // If a file claims a small size, read at least 512 bytes. - // In particular, files in Linux's /proc claim size 0 but - // then do not work right if read in small pieces, - // so an initial read of 1 byte would not work correctly. - - if size < 512 { - size = 512 - } - - data := make([]byte, 0, size) - for { - if len(data) >= cap(data) { - d := append(data[:cap(data)], 0) - data = d[:len(data)] - } - - n, err := f.Read(data[len(data):cap(data)]) - data = data[:len(data)+n] - - if err != nil { - if err == io.EOF { - err = nil - } - - return data, err - } - } -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/.gitignore b/ui/wasm/vendor/github.com/go-git/go-git/v5/.gitignore deleted file mode 100644 index 038dd9f1ed5..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -coverage.out -*~ -coverage.txt -profile.out diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/CODE_OF_CONDUCT.md b/ui/wasm/vendor/github.com/go-git/go-git/v5/CODE_OF_CONDUCT.md deleted file mode 100644 index a689fa3c34a..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -education, socio-economic status, nationality, personal appearance, race, -religion, or sexual identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or - advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at conduct@sourced.tech. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html - -[homepage]: https://www.contributor-covenant.org - diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/COMPATIBILITY.md b/ui/wasm/vendor/github.com/go-git/go-git/v5/COMPATIBILITY.md deleted file mode 100644 index 2a72b501e2e..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/COMPATIBILITY.md +++ /dev/null @@ -1,111 +0,0 @@ -Supported Capabilities -====================== - -Here is a non-comprehensive table of git commands and features whose equivalent -is supported by go-git. - -| Feature | Status | Notes | -|---------------------------------------|--------|-------| -| **config** | -| config | ✔ | Reading and modifying per-repository configuration (`.git/config`) is supported. Global configuration (`$HOME/.gitconfig`) is not. | -| **getting and creating repositories** | -| init | ✔ | Plain init and `--bare` are supported. Flags `--template`, `--separate-git-dir` and `--shared` are not. | -| clone | ✔ | Plain clone and equivalents to `--progress`, `--single-branch`, `--depth`, `--origin`, `--recurse-submodules` are supported. Others are not. | -| **basic snapshotting** | -| add | ✔ | Plain add is supported. Any other flags aren't supported | -| status | ✔ | -| commit | ✔ | -| reset | ✔ | -| rm | ✔ | -| mv | ✔ | -| **branching and merging** | -| branch | ✔ | -| checkout | ✔ | Basic usages of checkout are supported. | -| merge | ✖ | -| mergetool | ✖ | -| stash | ✖ | -| tag | ✔ | -| **sharing and updating projects** | -| fetch | ✔ | -| pull | ✔ | Only supports merges where the merge can be resolved as a fast-forward. | -| push | ✔ | -| remote | ✔ | -| submodule | ✔ | -| **inspection and comparison** | -| show | ✔ | -| log | ✔ | -| shortlog | (see log) | -| describe | | -| **patching** | -| apply | ✖ | -| cherry-pick | ✖ | -| diff | ✔ | Patch object with UnifiedDiff output representation | -| rebase | ✖ | -| revert | ✖ | -| **debugging** | -| bisect | ✖ | -| blame | ✔ | -| grep | ✔ | -| **email** || -| am | ✖ | -| apply | ✖ | -| format-patch | ✖ | -| send-email | ✖ | -| request-pull | ✖ | -| **external systems** | -| svn | ✖ | -| fast-import | ✖ | -| **administration** | -| clean | ✔ | -| gc | ✖ | -| fsck | ✖ | -| reflog | ✖ | -| filter-branch | ✖ | -| instaweb | ✖ | -| archive | ✖ | -| bundle | ✖ | -| prune | ✖ | -| repack | ✖ | -| **server admin** | -| daemon | | -| update-server-info | | -| **advanced** | -| notes | ✖ | -| replace | ✖ | -| worktree | ✖ | -| annotate | (see blame) | -| **gpg** | -| git-verify-commit | ✔ | -| git-verify-tag | ✔ | -| **plumbing commands** | -| cat-file | ✔ | -| check-ignore | | -| commit-tree | | -| count-objects | | -| diff-index | | -| for-each-ref | ✔ | -| hash-object | ✔ | -| ls-files | ✔ | -| merge-base | ✔ | Calculates the merge-base only between two commits, and supports `--independent` and `--is-ancestor` modifiers; Does not support `--fork-point` nor `--octopus` modifiers. | -| read-tree | | -| rev-list | ✔ | -| rev-parse | | -| show-ref | ✔ | -| symbolic-ref | ✔ | -| update-index | | -| update-ref | | -| verify-pack | | -| write-tree | | -| **protocols** | -| http(s):// (dumb) | ✖ | -| http(s):// (smart) | ✔ | -| git:// | ✔ | -| ssh:// | ✔ | -| file:// | partial | Warning: this is not pure Golang. This shells out to the `git` binary. | -| custom | ✔ | -| **other features** | -| gitignore | ✔ | -| gitattributes | ✖ | -| index version | | -| packfile version | | -| push-certs | ✖ | diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/CONTRIBUTING.md b/ui/wasm/vendor/github.com/go-git/go-git/v5/CONTRIBUTING.md deleted file mode 100644 index fce25328a7f..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/CONTRIBUTING.md +++ /dev/null @@ -1,46 +0,0 @@ -# Contributing Guidelines - -source{d} go-git project is [Apache 2.0 licensed](LICENSE) and accepts -contributions via GitHub pull requests. This document outlines some of the -conventions on development workflow, commit message formatting, contact points, -and other resources to make it easier to get your contribution accepted. - -## Support Channels - -The official support channels, for both users and contributors, are: - -- [StackOverflow go-git tag](https://stackoverflow.com/questions/tagged/go-git) for user questions. -- GitHub [Issues](https://github.com/src-d/go-git/issues)* for bug reports and feature requests. - -*Before opening a new issue or submitting a new pull request, it's helpful to -search the project - it's likely that another user has already reported the -issue you're facing, or it's a known issue that we're already aware of. - - -## How to Contribute - -Pull Requests (PRs) are the main and exclusive way to contribute to the official go-git project. -In order for a PR to be accepted it needs to pass a list of requirements: - -- You should be able to run the same query using `git`. We don't accept features that are not implemented in the official git implementation. -- The expected behavior must match the [official git implementation](https://github.com/git/git). -- The actual behavior must be correctly explained with natural language and providing a minimum working example in Go that reproduces it. -- All PRs must be written in idiomatic Go, formatted according to [gofmt](https://golang.org/cmd/gofmt/), and without any warnings from [go lint](https://github.com/golang/lint) nor [go vet](https://golang.org/cmd/vet/). -- They should in general include tests, and those shall pass. -- If the PR is a bug fix, it has to include a suite of unit tests for the new functionality. -- If the PR is a new feature, it has to come with a suite of unit tests, that tests the new functionality. -- In any case, all the PRs have to pass the personal evaluation of at least one of the maintainers of go-git. - -### Format of the commit message - -Every commit message should describe what was changed, under which context and, if applicable, the GitHub issue it relates to: - -``` -plumbing: packp, Skip argument validations for unknown capabilities. Fixes #623 -``` - -The format can be described more formally as follows: - -``` -: , . [Fixes #] -``` diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/LICENSE b/ui/wasm/vendor/github.com/go-git/go-git/v5/LICENSE deleted file mode 100644 index 8aa3d854cf7..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2018 Sourced Technologies, S.L. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/Makefile b/ui/wasm/vendor/github.com/go-git/go-git/v5/Makefile deleted file mode 100644 index d10922fb10c..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/Makefile +++ /dev/null @@ -1,38 +0,0 @@ -# General -WORKDIR = $(PWD) - -# Go parameters -GOCMD = go -GOTEST = $(GOCMD) test - -# Git config -GIT_VERSION ?= -GIT_DIST_PATH ?= $(PWD)/.git-dist -GIT_REPOSITORY = http://github.com/git/git.git - -# Coverage -COVERAGE_REPORT = coverage.out -COVERAGE_MODE = count - -build-git: - @if [ -f $(GIT_DIST_PATH)/git ]; then \ - echo "nothing to do, using cache $(GIT_DIST_PATH)"; \ - else \ - git clone $(GIT_REPOSITORY) -b $(GIT_VERSION) --depth 1 --single-branch $(GIT_DIST_PATH); \ - cd $(GIT_DIST_PATH); \ - make configure; \ - ./configure; \ - make all; \ - fi - -test: - @echo "running against `git version`"; \ - $(GOTEST) ./... - -test-coverage: - @echo "running against `git version`"; \ - echo "" > $(COVERAGE_REPORT); \ - $(GOTEST) -coverprofile=$(COVERAGE_REPORT) -coverpkg=./... -covermode=$(COVERAGE_MODE) ./... - -clean: - rm -rf $(GIT_DIST_PATH) \ No newline at end of file diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/README.md b/ui/wasm/vendor/github.com/go-git/go-git/v5/README.md deleted file mode 100644 index ff0c9b72bae..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/README.md +++ /dev/null @@ -1,131 +0,0 @@ -![go-git logo](https://cdn.rawgit.com/src-d/artwork/02036484/go-git/files/go-git-github-readme-header.png) -[![GoDoc](https://godoc.org/github.com/go-git/go-git/v5?status.svg)](https://pkg.go.dev/github.com/go-git/go-git/v5) [![Build Status](https://github.com/go-git/go-git/workflows/Test/badge.svg)](https://github.com/go-git/go-git/actions) [![Go Report Card](https://goreportcard.com/badge/github.com/go-git/go-git)](https://goreportcard.com/report/github.com/go-git/go-git) - -*go-git* is a highly extensible git implementation library written in **pure Go**. - -It can be used to manipulate git repositories at low level *(plumbing)* or high level *(porcelain)*, through an idiomatic Go API. It also supports several types of storage, such as in-memory filesystems, or custom implementations, thanks to the [`Storer`](https://pkg.go.dev/github.com/go-git/go-git/v5/plumbing/storer) interface. - -It's being actively developed since 2015 and is being used extensively by [Keybase](https://keybase.io/blog/encrypted-git-for-everyone), [Gitea](https://gitea.io/en-us/) or [Pulumi](https://github.com/search?q=org%3Apulumi+go-git&type=Code), and by many other libraries and tools. - -Project Status --------------- - -After the legal issues with the [`src-d`](https://github.com/src-d) organization, the lack of update for four months and the requirement to make a hard fork, the project is **now back to normality**. - -The project is currently actively maintained by individual contributors, including several of the original authors, but also backed by a new company, [gitsight](https://github.com/gitsight), where `go-git` is a critical component used at scale. - - -Comparison with git -------------------- - -*go-git* aims to be fully compatible with [git](https://github.com/git/git), all the *porcelain* operations are implemented to work exactly as *git* does. - -*git* is a humongous project with years of development by thousands of contributors, making it challenging for *go-git* to implement all the features. You can find a comparison of *go-git* vs *git* in the [compatibility documentation](COMPATIBILITY.md). - - -Installation ------------- - -The recommended way to install *go-git* is: - -```go -import "github.com/go-git/go-git/v5" // with go modules enabled (GO111MODULE=on or outside GOPATH) -import "github.com/go-git/go-git" // with go modules disabled -``` - - -Examples --------- - -> Please note that the `CheckIfError` and `Info` functions used in the examples are from the [examples package](https://github.com/go-git/go-git/blob/master/_examples/common.go#L19) just to be used in the examples. - - -### Basic example - -A basic example that mimics the standard `git clone` command - -```go -// Clone the given repository to the given directory -Info("git clone https://github.com/go-git/go-git") - -_, err := git.PlainClone("/tmp/foo", false, &git.CloneOptions{ - URL: "https://github.com/go-git/go-git", - Progress: os.Stdout, -}) - -CheckIfError(err) -``` - -Outputs: -``` -Counting objects: 4924, done. -Compressing objects: 100% (1333/1333), done. -Total 4924 (delta 530), reused 6 (delta 6), pack-reused 3533 -``` - -### In-memory example - -Cloning a repository into memory and printing the history of HEAD, just like `git log` does - - -```go -// Clones the given repository in memory, creating the remote, the local -// branches and fetching the objects, exactly as: -Info("git clone https://github.com/go-git/go-billy") - -r, err := git.Clone(memory.NewStorage(), nil, &git.CloneOptions{ - URL: "https://github.com/go-git/go-billy", -}) - -CheckIfError(err) - -// Gets the HEAD history from HEAD, just like this command: -Info("git log") - -// ... retrieves the branch pointed by HEAD -ref, err := r.Head() -CheckIfError(err) - - -// ... retrieves the commit history -cIter, err := r.Log(&git.LogOptions{From: ref.Hash()}) -CheckIfError(err) - -// ... just iterates over the commits, printing it -err = cIter.ForEach(func(c *object.Commit) error { - fmt.Println(c) - return nil -}) -CheckIfError(err) -``` - -Outputs: -``` -commit ded8054fd0c3994453e9c8aacaf48d118d42991e -Author: Santiago M. Mola -Date: Sat Nov 12 21:18:41 2016 +0100 - - index: ReadFrom/WriteTo returns IndexReadError/IndexWriteError. (#9) - -commit df707095626f384ce2dc1a83b30f9a21d69b9dfc -Author: Santiago M. Mola -Date: Fri Nov 11 13:23:22 2016 +0100 - - readwriter: fix bug when writing index. (#10) - - When using ReadWriter on an existing siva file, absolute offset for - index entries was not being calculated correctly. -... -``` - -You can find this [example](_examples/log/main.go) and many others in the [examples](_examples) folder. - -Contribute ----------- - -[Contributions](https://github.com/go-git/go-git/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) are more than welcome, if you are interested please take a look to -our [Contributing Guidelines](CONTRIBUTING.md). - -License -------- -Apache License Version 2.0, see [LICENSE](LICENSE) diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/blame.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/blame.go deleted file mode 100644 index 43634b32ca6..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/blame.go +++ /dev/null @@ -1,302 +0,0 @@ -package git - -import ( - "bytes" - "errors" - "fmt" - "strconv" - "strings" - "time" - "unicode/utf8" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/object" - "github.com/go-git/go-git/v5/utils/diff" -) - -// BlameResult represents the result of a Blame operation. -type BlameResult struct { - // Path is the path of the File that we're blaming. - Path string - // Rev (Revision) is the hash of the specified Commit used to generate this result. - Rev plumbing.Hash - // Lines contains every line with its authorship. - Lines []*Line -} - -// Blame returns a BlameResult with the information about the last author of -// each line from file `path` at commit `c`. -func Blame(c *object.Commit, path string) (*BlameResult, error) { - // The file to blame is identified by the input arguments: - // commit and path. commit is a Commit object obtained from a Repository. Path - // represents a path to a specific file contained into the repository. - // - // Blaming a file is a two step process: - // - // 1. Create a linear history of the commits affecting a file. We use - // revlist.New for that. - // - // 2. Then build a graph with a node for every line in every file in - // the history of the file. - // - // Each node is assigned a commit: Start by the nodes in the first - // commit. Assign that commit as the creator of all its lines. - // - // Then jump to the nodes in the next commit, and calculate the diff - // between the two files. Newly created lines get - // assigned the new commit as its origin. Modified lines also get - // this new commit. Untouched lines retain the old commit. - // - // All this work is done in the assignOrigin function which holds all - // the internal relevant data in a "blame" struct, that is not - // exported. - // - // TODO: ways to improve the efficiency of this function: - // 1. Improve revlist - // 2. Improve how to traverse the history (example a backward traversal will - // be much more efficient) - // - // TODO: ways to improve the function in general: - // 1. Add memoization between revlist and assign. - // 2. It is using much more memory than needed, see the TODOs below. - - b := new(blame) - b.fRev = c - b.path = path - - // get all the file revisions - if err := b.fillRevs(); err != nil { - return nil, err - } - - // calculate the line tracking graph and fill in - // file contents in data. - if err := b.fillGraphAndData(); err != nil { - return nil, err - } - - file, err := b.fRev.File(b.path) - if err != nil { - return nil, err - } - finalLines, err := file.Lines() - if err != nil { - return nil, err - } - - // Each node (line) holds the commit where it was introduced or - // last modified. To achieve that we use the FORWARD algorithm - // described in Zimmermann, et al. "Mining Version Archives for - // Co-changed Lines", in proceedings of the Mining Software - // Repositories workshop, Shanghai, May 22-23, 2006. - lines, err := newLines(finalLines, b.sliceGraph(len(b.graph)-1)) - if err != nil { - return nil, err - } - - return &BlameResult{ - Path: path, - Rev: c.Hash, - Lines: lines, - }, nil -} - -// Line values represent the contents and author of a line in BlamedResult values. -type Line struct { - // Author is the email address of the last author that modified the line. - Author string - // Text is the original text of the line. - Text string - // Date is when the original text of the line was introduced - Date time.Time - // Hash is the commit hash that introduced the original line - Hash plumbing.Hash -} - -func newLine(author, text string, date time.Time, hash plumbing.Hash) *Line { - return &Line{ - Author: author, - Text: text, - Hash: hash, - Date: date, - } -} - -func newLines(contents []string, commits []*object.Commit) ([]*Line, error) { - lcontents := len(contents) - lcommits := len(commits) - - if lcontents != lcommits { - if lcontents == lcommits-1 && contents[lcontents-1] != "\n" { - contents = append(contents, "\n") - } else { - return nil, errors.New("contents and commits have different length") - } - } - - result := make([]*Line, 0, lcontents) - for i := range contents { - result = append(result, newLine( - commits[i].Author.Email, contents[i], - commits[i].Author.When, commits[i].Hash, - )) - } - - return result, nil -} - -// this struct is internally used by the blame function to hold its -// inputs, outputs and state. -type blame struct { - // the path of the file to blame - path string - // the commit of the final revision of the file to blame - fRev *object.Commit - // the chain of revisions affecting the the file to blame - revs []*object.Commit - // the contents of the file across all its revisions - data []string - // the graph of the lines in the file across all the revisions - graph [][]*object.Commit -} - -// calculate the history of a file "path", starting from commit "from", sorted by commit date. -func (b *blame) fillRevs() error { - var err error - - b.revs, err = references(b.fRev, b.path) - return err -} - -// build graph of a file from its revision history -func (b *blame) fillGraphAndData() error { - //TODO: not all commits are needed, only the current rev and the prev - b.graph = make([][]*object.Commit, len(b.revs)) - b.data = make([]string, len(b.revs)) // file contents in all the revisions - // for every revision of the file, starting with the first - // one... - for i, rev := range b.revs { - // get the contents of the file - file, err := rev.File(b.path) - if err != nil { - return nil - } - b.data[i], err = file.Contents() - if err != nil { - return err - } - nLines := countLines(b.data[i]) - // create a node for each line - b.graph[i] = make([]*object.Commit, nLines) - // assign a commit to each node - // if this is the first revision, then the node is assigned to - // this first commit. - if i == 0 { - for j := 0; j < nLines; j++ { - b.graph[i][j] = b.revs[i] - } - } else { - // if this is not the first commit, then assign to the old - // commit or to the new one, depending on what the diff - // says. - b.assignOrigin(i, i-1) - } - } - return nil -} - -// sliceGraph returns a slice of commits (one per line) for a particular -// revision of a file (0=first revision). -func (b *blame) sliceGraph(i int) []*object.Commit { - fVs := b.graph[i] - result := make([]*object.Commit, 0, len(fVs)) - for _, v := range fVs { - c := *v - result = append(result, &c) - } - return result -} - -// Assigns origin to vertexes in current (c) rev from data in its previous (p) -// revision -func (b *blame) assignOrigin(c, p int) { - // assign origin based on diff info - hunks := diff.Do(b.data[p], b.data[c]) - sl := -1 // source line - dl := -1 // destination line - for h := range hunks { - hLines := countLines(hunks[h].Text) - for hl := 0; hl < hLines; hl++ { - switch { - case hunks[h].Type == 0: - sl++ - dl++ - b.graph[c][dl] = b.graph[p][sl] - case hunks[h].Type == 1: - dl++ - b.graph[c][dl] = b.revs[c] - case hunks[h].Type == -1: - sl++ - default: - panic("unreachable") - } - } - } -} - -// GoString prints the results of a Blame using git-blame's style. -func (b *blame) GoString() string { - var buf bytes.Buffer - - file, err := b.fRev.File(b.path) - if err != nil { - panic("PrettyPrint: internal error in repo.Data") - } - contents, err := file.Contents() - if err != nil { - panic("PrettyPrint: internal error in repo.Data") - } - - lines := strings.Split(contents, "\n") - // max line number length - mlnl := len(strconv.Itoa(len(lines))) - // max author length - mal := b.maxAuthorLength() - format := fmt.Sprintf("%%s (%%-%ds %%%dd) %%s\n", - mal, mlnl) - - fVs := b.graph[len(b.graph)-1] - for ln, v := range fVs { - fmt.Fprintf(&buf, format, v.Hash.String()[:8], - prettyPrintAuthor(fVs[ln]), ln+1, lines[ln]) - } - return buf.String() -} - -// utility function to pretty print the author. -func prettyPrintAuthor(c *object.Commit) string { - return fmt.Sprintf("%s %s", c.Author.Name, c.Author.When.Format("2006-01-02")) -} - -// utility function to calculate the number of runes needed -// to print the longest author name in the blame of a file. -func (b *blame) maxAuthorLength() int { - memo := make(map[plumbing.Hash]struct{}, len(b.graph)-1) - fVs := b.graph[len(b.graph)-1] - m := 0 - for ln := range fVs { - if _, ok := memo[fVs[ln].Hash]; ok { - continue - } - memo[fVs[ln].Hash] = struct{}{} - m = max(m, utf8.RuneCountInString(prettyPrintAuthor(fVs[ln]))) - } - return m -} - -func max(a, b int) int { - if a > b { - return a - } - return b -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/common.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/common.go deleted file mode 100644 index 6174339a815..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/common.go +++ /dev/null @@ -1,20 +0,0 @@ -package git - -import "strings" - -// countLines returns the number of lines in a string à la git, this is -// The newline character is assumed to be '\n'. The empty string -// contains 0 lines. If the last line of the string doesn't end with a -// newline, it will still be considered a line. -func countLines(s string) int { - if s == "" { - return 0 - } - - nEOL := strings.Count(s, "\n") - if strings.HasSuffix(s, "\n") { - return nEOL - } - - return nEOL + 1 -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/config/branch.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/config/branch.go deleted file mode 100644 index fe86cf542cb..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/config/branch.go +++ /dev/null @@ -1,90 +0,0 @@ -package config - -import ( - "errors" - - "github.com/go-git/go-git/v5/plumbing" - format "github.com/go-git/go-git/v5/plumbing/format/config" -) - -var ( - errBranchEmptyName = errors.New("branch config: empty name") - errBranchInvalidMerge = errors.New("branch config: invalid merge") - errBranchInvalidRebase = errors.New("branch config: rebase must be one of 'true' or 'interactive'") -) - -// Branch contains information on the -// local branches and which remote to track -type Branch struct { - // Name of branch - Name string - // Remote name of remote to track - Remote string - // Merge is the local refspec for the branch - Merge plumbing.ReferenceName - // Rebase instead of merge when pulling. Valid values are - // "true" and "interactive". "false" is undocumented and - // typically represented by the non-existence of this field - Rebase string - - raw *format.Subsection -} - -// Validate validates fields of branch -func (b *Branch) Validate() error { - if b.Name == "" { - return errBranchEmptyName - } - - if b.Merge != "" && !b.Merge.IsBranch() { - return errBranchInvalidMerge - } - - if b.Rebase != "" && - b.Rebase != "true" && - b.Rebase != "interactive" && - b.Rebase != "false" { - return errBranchInvalidRebase - } - - return nil -} - -func (b *Branch) marshal() *format.Subsection { - if b.raw == nil { - b.raw = &format.Subsection{} - } - - b.raw.Name = b.Name - - if b.Remote == "" { - b.raw.RemoveOption(remoteSection) - } else { - b.raw.SetOption(remoteSection, b.Remote) - } - - if b.Merge == "" { - b.raw.RemoveOption(mergeKey) - } else { - b.raw.SetOption(mergeKey, string(b.Merge)) - } - - if b.Rebase == "" { - b.raw.RemoveOption(rebaseKey) - } else { - b.raw.SetOption(rebaseKey, b.Rebase) - } - - return b.raw -} - -func (b *Branch) unmarshal(s *format.Subsection) error { - b.raw = s - - b.Name = b.raw.Name - b.Remote = b.raw.Options.Get(remoteSection) - b.Merge = plumbing.ReferenceName(b.raw.Options.Get(mergeKey)) - b.Rebase = b.raw.Options.Get(rebaseKey) - - return b.Validate() -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/config/config.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/config/config.go deleted file mode 100644 index 1aee25a4c5a..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/config/config.go +++ /dev/null @@ -1,659 +0,0 @@ -// Package config contains the abstraction of multiple config files -package config - -import ( - "bytes" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "sort" - "strconv" - - "github.com/go-git/go-billy/v5/osfs" - "github.com/go-git/go-git/v5/internal/url" - format "github.com/go-git/go-git/v5/plumbing/format/config" - "github.com/mitchellh/go-homedir" -) - -const ( - // DefaultFetchRefSpec is the default refspec used for fetch. - DefaultFetchRefSpec = "+refs/heads/*:refs/remotes/%s/*" - // DefaultPushRefSpec is the default refspec used for push. - DefaultPushRefSpec = "refs/heads/*:refs/heads/*" -) - -// ConfigStorer generic storage of Config object -type ConfigStorer interface { - Config() (*Config, error) - SetConfig(*Config) error -} - -var ( - ErrInvalid = errors.New("config invalid key in remote or branch") - ErrRemoteConfigNotFound = errors.New("remote config not found") - ErrRemoteConfigEmptyURL = errors.New("remote config: empty URL") - ErrRemoteConfigEmptyName = errors.New("remote config: empty name") -) - -// Scope defines the scope of a config file, such as local, global or system. -type Scope int - -// Available ConfigScope's -const ( - LocalScope Scope = iota - GlobalScope - SystemScope -) - -// Config contains the repository configuration -// https://www.kernel.org/pub/software/scm/git/docs/git-config.html#FILES -type Config struct { - Core struct { - // IsBare if true this repository is assumed to be bare and has no - // working directory associated with it. - IsBare bool - // Worktree is the path to the root of the working tree. - Worktree string - // CommentChar is the character indicating the start of a - // comment for commands like commit and tag - CommentChar string - } - - User struct { - // Name is the personal name of the author and the commiter of a commit. - Name string - // Email is the email of the author and the commiter of a commit. - Email string - } - - Author struct { - // Name is the personal name of the author of a commit. - Name string - // Email is the email of the author of a commit. - Email string - } - - Committer struct { - // Name is the personal name of the commiter of a commit. - Name string - // Email is the email of the the commiter of a commit. - Email string - } - - Pack struct { - // Window controls the size of the sliding window for delta - // compression. The default is 10. A value of 0 turns off - // delta compression entirely. - Window uint - } - - Init struct { - // DefaultBranch Allows overriding the default branch name - // e.g. when initializing a new repository or when cloning - // an empty repository. - DefaultBranch string - } - - // Remotes list of repository remotes, the key of the map is the name - // of the remote, should equal to RemoteConfig.Name. - Remotes map[string]*RemoteConfig - // Submodules list of repository submodules, the key of the map is the name - // of the submodule, should equal to Submodule.Name. - Submodules map[string]*Submodule - // Branches list of branches, the key is the branch name and should - // equal Branch.Name - Branches map[string]*Branch - // URLs list of url rewrite rules, if repo url starts with URL.InsteadOf value, it will be replaced with the - // key instead. - URLs map[string]*URL - // Raw contains the raw information of a config file. The main goal is - // preserve the parsed information from the original format, to avoid - // dropping unsupported fields. - Raw *format.Config -} - -// NewConfig returns a new empty Config. -func NewConfig() *Config { - config := &Config{ - Remotes: make(map[string]*RemoteConfig), - Submodules: make(map[string]*Submodule), - Branches: make(map[string]*Branch), - URLs: make(map[string]*URL), - Raw: format.New(), - } - - config.Pack.Window = DefaultPackWindow - - return config -} - -// ReadConfig reads a config file from a io.Reader. -func ReadConfig(r io.Reader) (*Config, error) { - b, err := ioutil.ReadAll(r) - if err != nil { - return nil, err - } - - cfg := NewConfig() - if err = cfg.Unmarshal(b); err != nil { - return nil, err - } - - return cfg, nil -} - -// LoadConfig loads a config file from a given scope. The returned Config, -// contains exclusively information fom the given scope. If couldn't find a -// config file to the given scope, a empty one is returned. -func LoadConfig(scope Scope) (*Config, error) { - if scope == LocalScope { - return nil, fmt.Errorf("LocalScope should be read from the a ConfigStorer.") - } - - files, err := Paths(scope) - if err != nil { - return nil, err - } - - for _, file := range files { - f, err := osfs.Default.Open(file) - if err != nil { - if os.IsNotExist(err) { - continue - } - - return nil, err - } - - defer f.Close() - return ReadConfig(f) - } - - return NewConfig(), nil -} - -// Paths returns the config file location for a given scope. -func Paths(scope Scope) ([]string, error) { - var files []string - switch scope { - case GlobalScope: - xdg := os.Getenv("XDG_CONFIG_HOME") - if xdg != "" { - files = append(files, filepath.Join(xdg, "git/config")) - } - - home, err := homedir.Dir() - if err != nil { - return nil, err - } - - files = append(files, - filepath.Join(home, ".gitconfig"), - filepath.Join(home, ".config/git/config"), - ) - case SystemScope: - files = append(files, "/etc/gitconfig") - } - - return files, nil -} - -// Validate validates the fields and sets the default values. -func (c *Config) Validate() error { - for name, r := range c.Remotes { - if r.Name != name { - return ErrInvalid - } - - if err := r.Validate(); err != nil { - return err - } - } - - for name, b := range c.Branches { - if b.Name != name { - return ErrInvalid - } - - if err := b.Validate(); err != nil { - return err - } - } - - return nil -} - -const ( - remoteSection = "remote" - submoduleSection = "submodule" - branchSection = "branch" - coreSection = "core" - packSection = "pack" - userSection = "user" - authorSection = "author" - committerSection = "committer" - initSection = "init" - urlSection = "url" - fetchKey = "fetch" - urlKey = "url" - bareKey = "bare" - worktreeKey = "worktree" - commentCharKey = "commentChar" - windowKey = "window" - mergeKey = "merge" - rebaseKey = "rebase" - nameKey = "name" - emailKey = "email" - defaultBranchKey = "defaultBranch" - - // DefaultPackWindow holds the number of previous objects used to - // generate deltas. The value 10 is the same used by git command. - DefaultPackWindow = uint(10) -) - -// Unmarshal parses a git-config file and stores it. -func (c *Config) Unmarshal(b []byte) error { - r := bytes.NewBuffer(b) - d := format.NewDecoder(r) - - c.Raw = format.New() - if err := d.Decode(c.Raw); err != nil { - return err - } - - c.unmarshalCore() - c.unmarshalUser() - c.unmarshalInit() - if err := c.unmarshalPack(); err != nil { - return err - } - unmarshalSubmodules(c.Raw, c.Submodules) - - if err := c.unmarshalBranches(); err != nil { - return err - } - - if err := c.unmarshalURLs(); err != nil { - return err - } - - return c.unmarshalRemotes() -} - -func (c *Config) unmarshalCore() { - s := c.Raw.Section(coreSection) - if s.Options.Get(bareKey) == "true" { - c.Core.IsBare = true - } - - c.Core.Worktree = s.Options.Get(worktreeKey) - c.Core.CommentChar = s.Options.Get(commentCharKey) -} - -func (c *Config) unmarshalUser() { - s := c.Raw.Section(userSection) - c.User.Name = s.Options.Get(nameKey) - c.User.Email = s.Options.Get(emailKey) - - s = c.Raw.Section(authorSection) - c.Author.Name = s.Options.Get(nameKey) - c.Author.Email = s.Options.Get(emailKey) - - s = c.Raw.Section(committerSection) - c.Committer.Name = s.Options.Get(nameKey) - c.Committer.Email = s.Options.Get(emailKey) -} - -func (c *Config) unmarshalPack() error { - s := c.Raw.Section(packSection) - window := s.Options.Get(windowKey) - if window == "" { - c.Pack.Window = DefaultPackWindow - } else { - winUint, err := strconv.ParseUint(window, 10, 32) - if err != nil { - return err - } - c.Pack.Window = uint(winUint) - } - return nil -} - -func (c *Config) unmarshalRemotes() error { - s := c.Raw.Section(remoteSection) - for _, sub := range s.Subsections { - r := &RemoteConfig{} - if err := r.unmarshal(sub); err != nil { - return err - } - - c.Remotes[r.Name] = r - } - - // Apply insteadOf url rules - for _, r := range c.Remotes { - r.applyURLRules(c.URLs) - } - - return nil -} - -func (c *Config) unmarshalURLs() error { - s := c.Raw.Section(urlSection) - for _, sub := range s.Subsections { - r := &URL{} - if err := r.unmarshal(sub); err != nil { - return err - } - - c.URLs[r.Name] = r - } - - return nil -} - -func unmarshalSubmodules(fc *format.Config, submodules map[string]*Submodule) { - s := fc.Section(submoduleSection) - for _, sub := range s.Subsections { - m := &Submodule{} - m.unmarshal(sub) - - if m.Validate() == ErrModuleBadPath { - continue - } - - submodules[m.Name] = m - } -} - -func (c *Config) unmarshalBranches() error { - bs := c.Raw.Section(branchSection) - for _, sub := range bs.Subsections { - b := &Branch{} - - if err := b.unmarshal(sub); err != nil { - return err - } - - c.Branches[b.Name] = b - } - return nil -} - -func (c *Config) unmarshalInit() { - s := c.Raw.Section(initSection) - c.Init.DefaultBranch = s.Options.Get(defaultBranchKey) -} - -// Marshal returns Config encoded as a git-config file. -func (c *Config) Marshal() ([]byte, error) { - c.marshalCore() - c.marshalUser() - c.marshalPack() - c.marshalRemotes() - c.marshalSubmodules() - c.marshalBranches() - c.marshalURLs() - c.marshalInit() - - buf := bytes.NewBuffer(nil) - if err := format.NewEncoder(buf).Encode(c.Raw); err != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -func (c *Config) marshalCore() { - s := c.Raw.Section(coreSection) - s.SetOption(bareKey, fmt.Sprintf("%t", c.Core.IsBare)) - - if c.Core.Worktree != "" { - s.SetOption(worktreeKey, c.Core.Worktree) - } -} - -func (c *Config) marshalUser() { - s := c.Raw.Section(userSection) - if c.User.Name != "" { - s.SetOption(nameKey, c.User.Name) - } - - if c.User.Email != "" { - s.SetOption(emailKey, c.User.Email) - } - - s = c.Raw.Section(authorSection) - if c.Author.Name != "" { - s.SetOption(nameKey, c.Author.Name) - } - - if c.Author.Email != "" { - s.SetOption(emailKey, c.Author.Email) - } - - s = c.Raw.Section(committerSection) - if c.Committer.Name != "" { - s.SetOption(nameKey, c.Committer.Name) - } - - if c.Committer.Email != "" { - s.SetOption(emailKey, c.Committer.Email) - } -} - -func (c *Config) marshalPack() { - s := c.Raw.Section(packSection) - if c.Pack.Window != DefaultPackWindow { - s.SetOption(windowKey, fmt.Sprintf("%d", c.Pack.Window)) - } -} - -func (c *Config) marshalRemotes() { - s := c.Raw.Section(remoteSection) - newSubsections := make(format.Subsections, 0, len(c.Remotes)) - added := make(map[string]bool) - for _, subsection := range s.Subsections { - if remote, ok := c.Remotes[subsection.Name]; ok { - newSubsections = append(newSubsections, remote.marshal()) - added[subsection.Name] = true - } - } - - remoteNames := make([]string, 0, len(c.Remotes)) - for name := range c.Remotes { - remoteNames = append(remoteNames, name) - } - - sort.Strings(remoteNames) - - for _, name := range remoteNames { - if !added[name] { - newSubsections = append(newSubsections, c.Remotes[name].marshal()) - } - } - - s.Subsections = newSubsections -} - -func (c *Config) marshalSubmodules() { - s := c.Raw.Section(submoduleSection) - s.Subsections = make(format.Subsections, len(c.Submodules)) - - var i int - for _, r := range c.Submodules { - section := r.marshal() - // the submodule section at config is a subset of the .gitmodule file - // we should remove the non-valid options for the config file. - section.RemoveOption(pathKey) - s.Subsections[i] = section - i++ - } -} - -func (c *Config) marshalBranches() { - s := c.Raw.Section(branchSection) - newSubsections := make(format.Subsections, 0, len(c.Branches)) - added := make(map[string]bool) - for _, subsection := range s.Subsections { - if branch, ok := c.Branches[subsection.Name]; ok { - newSubsections = append(newSubsections, branch.marshal()) - added[subsection.Name] = true - } - } - - branchNames := make([]string, 0, len(c.Branches)) - for name := range c.Branches { - branchNames = append(branchNames, name) - } - - sort.Strings(branchNames) - - for _, name := range branchNames { - if !added[name] { - newSubsections = append(newSubsections, c.Branches[name].marshal()) - } - } - - s.Subsections = newSubsections -} - -func (c *Config) marshalURLs() { - s := c.Raw.Section(urlSection) - s.Subsections = make(format.Subsections, len(c.URLs)) - - var i int - for _, r := range c.URLs { - section := r.marshal() - // the submodule section at config is a subset of the .gitmodule file - // we should remove the non-valid options for the config file. - s.Subsections[i] = section - i++ - } -} - -func (c *Config) marshalInit() { - s := c.Raw.Section(initSection) - if c.Init.DefaultBranch != "" { - s.SetOption(defaultBranchKey, c.Init.DefaultBranch) - } -} - -// RemoteConfig contains the configuration for a given remote repository. -type RemoteConfig struct { - // Name of the remote - Name string - // URLs the URLs of a remote repository. It must be non-empty. Fetch will - // always use the first URL, while push will use all of them. - URLs []string - - // insteadOfRulesApplied have urls been modified - insteadOfRulesApplied bool - // originalURLs are the urls before applying insteadOf rules - originalURLs []string - - // Fetch the default set of "refspec" for fetch operation - Fetch []RefSpec - - // raw representation of the subsection, filled by marshal or unmarshal are - // called - raw *format.Subsection -} - -// Validate validates the fields and sets the default values. -func (c *RemoteConfig) Validate() error { - if c.Name == "" { - return ErrRemoteConfigEmptyName - } - - if len(c.URLs) == 0 { - return ErrRemoteConfigEmptyURL - } - - for _, r := range c.Fetch { - if err := r.Validate(); err != nil { - return err - } - } - - if len(c.Fetch) == 0 { - c.Fetch = []RefSpec{RefSpec(fmt.Sprintf(DefaultFetchRefSpec, c.Name))} - } - - return nil -} - -func (c *RemoteConfig) unmarshal(s *format.Subsection) error { - c.raw = s - - fetch := []RefSpec{} - for _, f := range c.raw.Options.GetAll(fetchKey) { - rs := RefSpec(f) - if err := rs.Validate(); err != nil { - return err - } - - fetch = append(fetch, rs) - } - - c.Name = c.raw.Name - c.URLs = append([]string(nil), c.raw.Options.GetAll(urlKey)...) - c.Fetch = fetch - - return nil -} - -func (c *RemoteConfig) marshal() *format.Subsection { - if c.raw == nil { - c.raw = &format.Subsection{} - } - - c.raw.Name = c.Name - if len(c.URLs) == 0 { - c.raw.RemoveOption(urlKey) - } else { - urls := c.URLs - if c.insteadOfRulesApplied { - urls = c.originalURLs - } - - c.raw.SetOption(urlKey, urls...) - } - - if len(c.Fetch) == 0 { - c.raw.RemoveOption(fetchKey) - } else { - var values []string - for _, rs := range c.Fetch { - values = append(values, rs.String()) - } - - c.raw.SetOption(fetchKey, values...) - } - - return c.raw -} - -func (c *RemoteConfig) IsFirstURLLocal() bool { - return url.IsLocalEndpoint(c.URLs[0]) -} - -func (c *RemoteConfig) applyURLRules(urlRules map[string]*URL) { - // save original urls - originalURLs := make([]string, len(c.URLs)) - copy(originalURLs, c.URLs) - - for i, url := range c.URLs { - if matchingURLRule := findLongestInsteadOfMatch(url, urlRules); matchingURLRule != nil { - c.URLs[i] = matchingURLRule.ApplyInsteadOf(c.URLs[i]) - c.insteadOfRulesApplied = true - } - } - - if c.insteadOfRulesApplied { - c.originalURLs = originalURLs - } -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/config/modules.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/config/modules.go deleted file mode 100644 index 1c10aa354eb..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/config/modules.go +++ /dev/null @@ -1,139 +0,0 @@ -package config - -import ( - "bytes" - "errors" - "regexp" - - format "github.com/go-git/go-git/v5/plumbing/format/config" -) - -var ( - ErrModuleEmptyURL = errors.New("module config: empty URL") - ErrModuleEmptyPath = errors.New("module config: empty path") - ErrModuleBadPath = errors.New("submodule has an invalid path") -) - -var ( - // Matches module paths with dotdot ".." components. - dotdotPath = regexp.MustCompile(`(^|[/\\])\.\.([/\\]|$)`) -) - -// Modules defines the submodules properties, represents a .gitmodules file -// https://www.kernel.org/pub/software/scm/git/docs/gitmodules.html -type Modules struct { - // Submodules is a map of submodules being the key the name of the submodule. - Submodules map[string]*Submodule - - raw *format.Config -} - -// NewModules returns a new empty Modules -func NewModules() *Modules { - return &Modules{ - Submodules: make(map[string]*Submodule), - raw: format.New(), - } -} - -const ( - pathKey = "path" - branchKey = "branch" -) - -// Unmarshal parses a git-config file and stores it. -func (m *Modules) Unmarshal(b []byte) error { - r := bytes.NewBuffer(b) - d := format.NewDecoder(r) - - m.raw = format.New() - if err := d.Decode(m.raw); err != nil { - return err - } - - unmarshalSubmodules(m.raw, m.Submodules) - return nil -} - -// Marshal returns Modules encoded as a git-config file. -func (m *Modules) Marshal() ([]byte, error) { - s := m.raw.Section(submoduleSection) - s.Subsections = make(format.Subsections, len(m.Submodules)) - - var i int - for _, r := range m.Submodules { - s.Subsections[i] = r.marshal() - i++ - } - - buf := bytes.NewBuffer(nil) - if err := format.NewEncoder(buf).Encode(m.raw); err != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -// Submodule defines a submodule. -type Submodule struct { - // Name module name - Name string - // Path defines the path, relative to the top-level directory of the Git - // working tree. - Path string - // URL defines a URL from which the submodule repository can be cloned. - URL string - // Branch is a remote branch name for tracking updates in the upstream - // submodule. Optional value. - Branch string - - // raw representation of the subsection, filled by marshal or unmarshal are - // called. - raw *format.Subsection -} - -// Validate validates the fields and sets the default values. -func (m *Submodule) Validate() error { - if m.Path == "" { - return ErrModuleEmptyPath - } - - if m.URL == "" { - return ErrModuleEmptyURL - } - - if dotdotPath.MatchString(m.Path) { - return ErrModuleBadPath - } - - return nil -} - -func (m *Submodule) unmarshal(s *format.Subsection) { - m.raw = s - - m.Name = m.raw.Name - m.Path = m.raw.Option(pathKey) - m.URL = m.raw.Option(urlKey) - m.Branch = m.raw.Option(branchKey) -} - -func (m *Submodule) marshal() *format.Subsection { - if m.raw == nil { - m.raw = &format.Subsection{} - } - - m.raw.Name = m.Name - if m.raw.Name == "" { - m.raw.Name = m.Path - } - - m.raw.SetOption(pathKey, m.Path) - m.raw.SetOption(urlKey, m.URL) - - if m.Branch != "" { - m.raw.SetOption(branchKey, m.Branch) - } - - return m.raw -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/config/refspec.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/config/refspec.go deleted file mode 100644 index 4bfaa37bbb2..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/config/refspec.go +++ /dev/null @@ -1,155 +0,0 @@ -package config - -import ( - "errors" - "strings" - - "github.com/go-git/go-git/v5/plumbing" -) - -const ( - refSpecWildcard = "*" - refSpecForce = "+" - refSpecSeparator = ":" -) - -var ( - ErrRefSpecMalformedSeparator = errors.New("malformed refspec, separators are wrong") - ErrRefSpecMalformedWildcard = errors.New("malformed refspec, mismatched number of wildcards") -) - -// RefSpec is a mapping from local branches to remote references. -// The format of the refspec is an optional +, followed by :, where -// is the pattern for references on the remote side and is where -// those references will be written locally. The + tells Git to update the -// reference even if it isn’t a fast-forward. -// eg.: "+refs/heads/*:refs/remotes/origin/*" -// -// https://git-scm.com/book/en/v2/Git-Internals-The-Refspec -type RefSpec string - -// Validate validates the RefSpec -func (s RefSpec) Validate() error { - spec := string(s) - if strings.Count(spec, refSpecSeparator) != 1 { - return ErrRefSpecMalformedSeparator - } - - sep := strings.Index(spec, refSpecSeparator) - if sep == len(spec)-1 { - return ErrRefSpecMalformedSeparator - } - - ws := strings.Count(spec[0:sep], refSpecWildcard) - wd := strings.Count(spec[sep+1:], refSpecWildcard) - if ws == wd && ws < 2 && wd < 2 { - return nil - } - - return ErrRefSpecMalformedWildcard -} - -// IsForceUpdate returns if update is allowed in non fast-forward merges. -func (s RefSpec) IsForceUpdate() bool { - return s[0] == refSpecForce[0] -} - -// IsDelete returns true if the refspec indicates a delete (empty src). -func (s RefSpec) IsDelete() bool { - return s[0] == refSpecSeparator[0] -} - -// IsExactSHA1 returns true if the source is a SHA1 hash. -func (s RefSpec) IsExactSHA1() bool { - return plumbing.IsHash(s.Src()) -} - -// Src return the src side. -func (s RefSpec) Src() string { - spec := string(s) - - var start int - if s.IsForceUpdate() { - start = 1 - } else { - start = 0 - } - - end := strings.Index(spec, refSpecSeparator) - return spec[start:end] -} - -// Match match the given plumbing.ReferenceName against the source. -func (s RefSpec) Match(n plumbing.ReferenceName) bool { - if !s.IsWildcard() { - return s.matchExact(n) - } - - return s.matchGlob(n) -} - -// IsWildcard returns true if the RefSpec contains a wildcard. -func (s RefSpec) IsWildcard() bool { - return strings.Contains(string(s), refSpecWildcard) -} - -func (s RefSpec) matchExact(n plumbing.ReferenceName) bool { - return s.Src() == n.String() -} - -func (s RefSpec) matchGlob(n plumbing.ReferenceName) bool { - src := s.Src() - name := n.String() - wildcard := strings.Index(src, refSpecWildcard) - - var prefix, suffix string - prefix = src[0:wildcard] - if len(src) > wildcard+1 { - suffix = src[wildcard+1:] - } - - return len(name) >= len(prefix)+len(suffix) && - strings.HasPrefix(name, prefix) && - strings.HasSuffix(name, suffix) -} - -// Dst returns the destination for the given remote reference. -func (s RefSpec) Dst(n plumbing.ReferenceName) plumbing.ReferenceName { - spec := string(s) - start := strings.Index(spec, refSpecSeparator) + 1 - dst := spec[start:] - src := s.Src() - - if !s.IsWildcard() { - return plumbing.ReferenceName(dst) - } - - name := n.String() - ws := strings.Index(src, refSpecWildcard) - wd := strings.Index(dst, refSpecWildcard) - match := name[ws : len(name)-(len(src)-(ws+1))] - - return plumbing.ReferenceName(dst[0:wd] + match + dst[wd+1:]) -} - -func (s RefSpec) Reverse() RefSpec { - spec := string(s) - separator := strings.Index(spec, refSpecSeparator) - - return RefSpec(spec[separator+1:] + refSpecSeparator + spec[:separator]) -} - -func (s RefSpec) String() string { - return string(s) -} - -// MatchAny returns true if any of the RefSpec match with the given ReferenceName. -func MatchAny(l []RefSpec, n plumbing.ReferenceName) bool { - for _, r := range l { - if r.Match(n) { - return true - } - } - - return false -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/config/url.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/config/url.go deleted file mode 100644 index 114d6b2662a..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/config/url.go +++ /dev/null @@ -1,81 +0,0 @@ -package config - -import ( - "errors" - "strings" - - format "github.com/go-git/go-git/v5/plumbing/format/config" -) - -var ( - errURLEmptyInsteadOf = errors.New("url config: empty insteadOf") -) - -// Url defines Url rewrite rules -type URL struct { - // Name new base url - Name string - // Any URL that starts with this value will be rewritten to start, instead, with . - // When more than one insteadOf strings match a given URL, the longest match is used. - InsteadOf string - - // raw representation of the subsection, filled by marshal or unmarshal are - // called. - raw *format.Subsection -} - -// Validate validates fields of branch -func (b *URL) Validate() error { - if b.InsteadOf == "" { - return errURLEmptyInsteadOf - } - - return nil -} - -const ( - insteadOfKey = "insteadOf" -) - -func (u *URL) unmarshal(s *format.Subsection) error { - u.raw = s - - u.Name = s.Name - u.InsteadOf = u.raw.Option(insteadOfKey) - return nil -} - -func (u *URL) marshal() *format.Subsection { - if u.raw == nil { - u.raw = &format.Subsection{} - } - - u.raw.Name = u.Name - u.raw.SetOption(insteadOfKey, u.InsteadOf) - - return u.raw -} - -func findLongestInsteadOfMatch(remoteURL string, urls map[string]*URL) *URL { - var longestMatch *URL - for _, u := range urls { - if !strings.HasPrefix(remoteURL, u.InsteadOf) { - continue - } - - // according to spec if there is more than one match, take the logest - if longestMatch == nil || len(longestMatch.InsteadOf) < len(u.InsteadOf) { - longestMatch = u - } - } - - return longestMatch -} - -func (u *URL) ApplyInsteadOf(url string) string { - if !strings.HasPrefix(url, u.InsteadOf) { - return url - } - - return u.Name + url[len(u.InsteadOf):] -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/doc.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/doc.go deleted file mode 100644 index 3d817fe9c8c..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/doc.go +++ /dev/null @@ -1,10 +0,0 @@ -// A highly extensible git implementation in pure Go. -// -// go-git aims to reach the completeness of libgit2 or jgit, nowadays covers the -// majority of the plumbing read operations and some of the main write -// operations, but lacks the main porcelain operations such as merges. -// -// It is highly extensible, we have been following the open/close principle in -// its design to facilitate extensions, mainly focusing the efforts on the -// persistence of the objects. -package git diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/internal/revision/parser.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/internal/revision/parser.go deleted file mode 100644 index 8facf17ffb2..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/internal/revision/parser.go +++ /dev/null @@ -1,622 +0,0 @@ -// Package revision extracts git revision from string -// More information about revision : https://www.kernel.org/pub/software/scm/git/docs/gitrevisions.html -package revision - -import ( - "bytes" - "fmt" - "io" - "regexp" - "strconv" - "time" -) - -// ErrInvalidRevision is emitted if string doesn't match valid revision -type ErrInvalidRevision struct { - s string -} - -func (e *ErrInvalidRevision) Error() string { - return "Revision invalid : " + e.s -} - -// Revisioner represents a revision component. -// A revision is made of multiple revision components -// obtained after parsing a revision string, -// for instance revision "master~" will be converted in -// two revision components Ref and TildePath -type Revisioner interface { -} - -// Ref represents a reference name : HEAD, master, -type Ref string - -// TildePath represents ~, ~{n} -type TildePath struct { - Depth int -} - -// CaretPath represents ^, ^{n} -type CaretPath struct { - Depth int -} - -// CaretReg represents ^{/foo bar} -type CaretReg struct { - Regexp *regexp.Regexp - Negate bool -} - -// CaretType represents ^{commit} -type CaretType struct { - ObjectType string -} - -// AtReflog represents @{n} -type AtReflog struct { - Depth int -} - -// AtCheckout represents @{-n} -type AtCheckout struct { - Depth int -} - -// AtUpstream represents @{upstream}, @{u} -type AtUpstream struct { - BranchName string -} - -// AtPush represents @{push} -type AtPush struct { - BranchName string -} - -// AtDate represents @{"2006-01-02T15:04:05Z"} -type AtDate struct { - Date time.Time -} - -// ColonReg represents :/foo bar -type ColonReg struct { - Regexp *regexp.Regexp - Negate bool -} - -// ColonPath represents :./ : -type ColonPath struct { - Path string -} - -// ColonStagePath represents ::/ -type ColonStagePath struct { - Path string - Stage int -} - -// Parser represents a parser -// use to tokenize and transform to revisioner chunks -// a given string -type Parser struct { - s *scanner - currentParsedChar struct { - tok token - lit string - } - unreadLastChar bool -} - -// NewParserFromString returns a new instance of parser from a string. -func NewParserFromString(s string) *Parser { - return NewParser(bytes.NewBufferString(s)) -} - -// NewParser returns a new instance of parser. -func NewParser(r io.Reader) *Parser { - return &Parser{s: newScanner(r)} -} - -// scan returns the next token from the underlying scanner -// or the last scanned token if an unscan was requested -func (p *Parser) scan() (token, string, error) { - if p.unreadLastChar { - p.unreadLastChar = false - return p.currentParsedChar.tok, p.currentParsedChar.lit, nil - } - - tok, lit, err := p.s.scan() - - p.currentParsedChar.tok, p.currentParsedChar.lit = tok, lit - - return tok, lit, err -} - -// unscan pushes the previously read token back onto the buffer. -func (p *Parser) unscan() { p.unreadLastChar = true } - -// Parse explode a revision string into revisioner chunks -func (p *Parser) Parse() ([]Revisioner, error) { - var rev Revisioner - var revs []Revisioner - var tok token - var err error - - for { - tok, _, err = p.scan() - - if err != nil { - return nil, err - } - - switch tok { - case at: - rev, err = p.parseAt() - case tilde: - rev, err = p.parseTilde() - case caret: - rev, err = p.parseCaret() - case colon: - rev, err = p.parseColon() - case eof: - err = p.validateFullRevision(&revs) - - if err != nil { - return []Revisioner{}, err - } - - return revs, nil - default: - p.unscan() - rev, err = p.parseRef() - } - - if err != nil { - return []Revisioner{}, err - } - - revs = append(revs, rev) - } -} - -// validateFullRevision ensures all revisioner chunks make a valid revision -func (p *Parser) validateFullRevision(chunks *[]Revisioner) error { - var hasReference bool - - for i, chunk := range *chunks { - switch chunk.(type) { - case Ref: - if i == 0 { - hasReference = true - } else { - return &ErrInvalidRevision{`reference must be defined once at the beginning`} - } - case AtDate: - if len(*chunks) == 1 || hasReference && len(*chunks) == 2 { - return nil - } - - return &ErrInvalidRevision{`"@" statement is not valid, could be : @{}, @{}`} - case AtReflog: - if len(*chunks) == 1 || hasReference && len(*chunks) == 2 { - return nil - } - - return &ErrInvalidRevision{`"@" statement is not valid, could be : @{}, @{}`} - case AtCheckout: - if len(*chunks) == 1 { - return nil - } - - return &ErrInvalidRevision{`"@" statement is not valid, could be : @{-}`} - case AtUpstream: - if len(*chunks) == 1 || hasReference && len(*chunks) == 2 { - return nil - } - - return &ErrInvalidRevision{`"@" statement is not valid, could be : @{upstream}, @{upstream}, @{u}, @{u}`} - case AtPush: - if len(*chunks) == 1 || hasReference && len(*chunks) == 2 { - return nil - } - - return &ErrInvalidRevision{`"@" statement is not valid, could be : @{push}, @{push}`} - case TildePath, CaretPath, CaretReg: - if !hasReference { - return &ErrInvalidRevision{`"~" or "^" statement must have a reference defined at the beginning`} - } - case ColonReg: - if len(*chunks) == 1 { - return nil - } - - return &ErrInvalidRevision{`":" statement is not valid, could be : :/`} - case ColonPath: - if i == len(*chunks)-1 && hasReference || len(*chunks) == 1 { - return nil - } - - return &ErrInvalidRevision{`":" statement is not valid, could be : :`} - case ColonStagePath: - if len(*chunks) == 1 { - return nil - } - - return &ErrInvalidRevision{`":" statement is not valid, could be : ::`} - } - } - - return nil -} - -// parseAt extract @ statements -func (p *Parser) parseAt() (Revisioner, error) { - var tok, nextTok token - var lit, nextLit string - var err error - - tok, _, err = p.scan() - - if err != nil { - return nil, err - } - - if tok != obrace { - p.unscan() - - return Ref("HEAD"), nil - } - - tok, lit, err = p.scan() - - if err != nil { - return nil, err - } - - nextTok, nextLit, err = p.scan() - - if err != nil { - return nil, err - } - - switch { - case tok == word && (lit == "u" || lit == "upstream") && nextTok == cbrace: - return AtUpstream{}, nil - case tok == word && lit == "push" && nextTok == cbrace: - return AtPush{}, nil - case tok == number && nextTok == cbrace: - n, _ := strconv.Atoi(lit) - - return AtReflog{n}, nil - case tok == minus && nextTok == number: - n, _ := strconv.Atoi(nextLit) - - t, _, err := p.scan() - - if err != nil { - return nil, err - } - - if t != cbrace { - return nil, &ErrInvalidRevision{s: `missing "}" in @{-n} structure`} - } - - return AtCheckout{n}, nil - default: - p.unscan() - - date := lit - - for { - tok, lit, err = p.scan() - - if err != nil { - return nil, err - } - - switch { - case tok == cbrace: - t, err := time.Parse("2006-01-02T15:04:05Z", date) - - if err != nil { - return nil, &ErrInvalidRevision{fmt.Sprintf(`wrong date "%s" must fit ISO-8601 format : 2006-01-02T15:04:05Z`, date)} - } - - return AtDate{t}, nil - default: - date += lit - } - } - } -} - -// parseTilde extract ~ statements -func (p *Parser) parseTilde() (Revisioner, error) { - var tok token - var lit string - var err error - - tok, lit, err = p.scan() - - if err != nil { - return nil, err - } - - switch { - case tok == number: - n, _ := strconv.Atoi(lit) - - return TildePath{n}, nil - default: - p.unscan() - return TildePath{1}, nil - } -} - -// parseCaret extract ^ statements -func (p *Parser) parseCaret() (Revisioner, error) { - var tok token - var lit string - var err error - - tok, lit, err = p.scan() - - if err != nil { - return nil, err - } - - switch { - case tok == obrace: - r, err := p.parseCaretBraces() - - if err != nil { - return nil, err - } - - return r, nil - case tok == number: - n, _ := strconv.Atoi(lit) - - if n > 2 { - return nil, &ErrInvalidRevision{fmt.Sprintf(`"%s" found must be 0, 1 or 2 after "^"`, lit)} - } - - return CaretPath{n}, nil - default: - p.unscan() - return CaretPath{1}, nil - } -} - -// parseCaretBraces extract ^{} statements -func (p *Parser) parseCaretBraces() (Revisioner, error) { - var tok, nextTok token - var lit, _ string - start := true - var re string - var negate bool - var err error - - for { - tok, lit, err = p.scan() - - if err != nil { - return nil, err - } - - nextTok, _, err = p.scan() - - if err != nil { - return nil, err - } - - switch { - case tok == word && nextTok == cbrace && (lit == "commit" || lit == "tree" || lit == "blob" || lit == "tag" || lit == "object"): - return CaretType{lit}, nil - case re == "" && tok == cbrace: - return CaretType{"tag"}, nil - case re == "" && tok == emark && nextTok == emark: - re += lit - case re == "" && tok == emark && nextTok == minus: - negate = true - case re == "" && tok == emark: - return nil, &ErrInvalidRevision{s: `revision suffix brace component sequences starting with "/!" others than those defined are reserved`} - case re == "" && tok == slash: - p.unscan() - case tok != slash && start: - return nil, &ErrInvalidRevision{fmt.Sprintf(`"%s" is not a valid revision suffix brace component`, lit)} - case tok != cbrace: - p.unscan() - re += lit - case tok == cbrace: - p.unscan() - - reg, err := regexp.Compile(re) - - if err != nil { - return CaretReg{}, &ErrInvalidRevision{fmt.Sprintf(`revision suffix brace component, %s`, err.Error())} - } - - return CaretReg{reg, negate}, nil - } - - start = false - } -} - -// parseColon extract : statements -func (p *Parser) parseColon() (Revisioner, error) { - var tok token - var err error - - tok, _, err = p.scan() - - if err != nil { - return nil, err - } - - switch tok { - case slash: - return p.parseColonSlash() - default: - p.unscan() - return p.parseColonDefault() - } -} - -// parseColonSlash extract :/ statements -func (p *Parser) parseColonSlash() (Revisioner, error) { - var tok, nextTok token - var lit string - var re string - var negate bool - var err error - - for { - tok, lit, err = p.scan() - - if err != nil { - return nil, err - } - - nextTok, _, err = p.scan() - - if err != nil { - return nil, err - } - - switch { - case tok == emark && nextTok == emark: - re += lit - case re == "" && tok == emark && nextTok == minus: - negate = true - case re == "" && tok == emark: - return nil, &ErrInvalidRevision{s: `revision suffix brace component sequences starting with "/!" others than those defined are reserved`} - case tok == eof: - p.unscan() - reg, err := regexp.Compile(re) - - if err != nil { - return ColonReg{}, &ErrInvalidRevision{fmt.Sprintf(`revision suffix brace component, %s`, err.Error())} - } - - return ColonReg{reg, negate}, nil - default: - p.unscan() - re += lit - } - } -} - -// parseColonDefault extract : statements -func (p *Parser) parseColonDefault() (Revisioner, error) { - var tok token - var lit string - var path string - var stage int - var err error - var n = -1 - - tok, lit, err = p.scan() - - if err != nil { - return nil, err - } - - nextTok, _, err := p.scan() - - if err != nil { - return nil, err - } - - if tok == number && nextTok == colon { - n, _ = strconv.Atoi(lit) - } - - switch n { - case 0, 1, 2, 3: - stage = n - default: - path += lit - p.unscan() - } - - for { - tok, lit, err = p.scan() - - if err != nil { - return nil, err - } - - switch { - case tok == eof && n == -1: - return ColonPath{path}, nil - case tok == eof: - return ColonStagePath{path, stage}, nil - default: - path += lit - } - } -} - -// parseRef extract reference name -func (p *Parser) parseRef() (Revisioner, error) { - var tok, prevTok token - var lit, buf string - var endOfRef bool - var err error - - for { - tok, lit, err = p.scan() - - if err != nil { - return nil, err - } - - switch tok { - case eof, at, colon, tilde, caret: - endOfRef = true - } - - err := p.checkRefFormat(tok, lit, prevTok, buf, endOfRef) - - if err != nil { - return "", err - } - - if endOfRef { - p.unscan() - return Ref(buf), nil - } - - buf += lit - prevTok = tok - } -} - -// checkRefFormat ensure reference name follow rules defined here : -// https://git-scm.com/docs/git-check-ref-format -func (p *Parser) checkRefFormat(token token, literal string, previousToken token, buffer string, endOfRef bool) error { - switch token { - case aslash, space, control, qmark, asterisk, obracket: - return &ErrInvalidRevision{fmt.Sprintf(`must not contains "%s"`, literal)} - } - - switch { - case (token == dot || token == slash) && buffer == "": - return &ErrInvalidRevision{fmt.Sprintf(`must not start with "%s"`, literal)} - case previousToken == slash && endOfRef: - return &ErrInvalidRevision{`must not end with "/"`} - case previousToken == dot && endOfRef: - return &ErrInvalidRevision{`must not end with "."`} - case token == dot && previousToken == slash: - return &ErrInvalidRevision{`must not contains "/."`} - case previousToken == dot && token == dot: - return &ErrInvalidRevision{`must not contains ".."`} - case previousToken == slash && token == slash: - return &ErrInvalidRevision{`must not contains consecutively "/"`} - case (token == slash || endOfRef) && len(buffer) > 4 && buffer[len(buffer)-5:] == ".lock": - return &ErrInvalidRevision{"cannot end with .lock"} - } - - return nil -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/internal/revision/scanner.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/internal/revision/scanner.go deleted file mode 100644 index c46c21b7959..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/internal/revision/scanner.go +++ /dev/null @@ -1,117 +0,0 @@ -package revision - -import ( - "bufio" - "io" - "unicode" -) - -// runeCategoryValidator takes a rune as input and -// validates it belongs to a rune category -type runeCategoryValidator func(r rune) bool - -// tokenizeExpression aggregates a series of runes matching check predicate into a single -// string and provides given tokenType as token type -func tokenizeExpression(ch rune, tokenType token, check runeCategoryValidator, r *bufio.Reader) (token, string, error) { - var data []rune - data = append(data, ch) - - for { - c, _, err := r.ReadRune() - - if c == zeroRune { - break - } - - if err != nil { - return tokenError, "", err - } - - if check(c) { - data = append(data, c) - } else { - err := r.UnreadRune() - - if err != nil { - return tokenError, "", err - } - - return tokenType, string(data), nil - } - } - - return tokenType, string(data), nil -} - -var zeroRune = rune(0) - -// scanner represents a lexical scanner. -type scanner struct { - r *bufio.Reader -} - -// newScanner returns a new instance of scanner. -func newScanner(r io.Reader) *scanner { - return &scanner{r: bufio.NewReader(r)} -} - -// Scan extracts tokens and their strings counterpart -// from the reader -func (s *scanner) scan() (token, string, error) { - ch, _, err := s.r.ReadRune() - - if err != nil && err != io.EOF { - return tokenError, "", err - } - - switch ch { - case zeroRune: - return eof, "", nil - case ':': - return colon, string(ch), nil - case '~': - return tilde, string(ch), nil - case '^': - return caret, string(ch), nil - case '.': - return dot, string(ch), nil - case '/': - return slash, string(ch), nil - case '{': - return obrace, string(ch), nil - case '}': - return cbrace, string(ch), nil - case '-': - return minus, string(ch), nil - case '@': - return at, string(ch), nil - case '\\': - return aslash, string(ch), nil - case '?': - return qmark, string(ch), nil - case '*': - return asterisk, string(ch), nil - case '[': - return obracket, string(ch), nil - case '!': - return emark, string(ch), nil - } - - if unicode.IsSpace(ch) { - return space, string(ch), nil - } - - if unicode.IsControl(ch) { - return control, string(ch), nil - } - - if unicode.IsLetter(ch) { - return tokenizeExpression(ch, word, unicode.IsLetter, s.r) - } - - if unicode.IsNumber(ch) { - return tokenizeExpression(ch, number, unicode.IsNumber, s.r) - } - - return tokenError, string(ch), nil -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/internal/revision/token.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/internal/revision/token.go deleted file mode 100644 index abc40488693..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/internal/revision/token.go +++ /dev/null @@ -1,28 +0,0 @@ -package revision - -// token represents a entity extracted from string parsing -type token int - -const ( - eof token = iota - - aslash - asterisk - at - caret - cbrace - colon - control - dot - emark - minus - number - obrace - obracket - qmark - slash - space - tilde - tokenError - word -) diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/internal/url/url.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/internal/url/url.go deleted file mode 100644 index 14cf133de8f..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/internal/url/url.go +++ /dev/null @@ -1,37 +0,0 @@ -package url - -import ( - "regexp" -) - -var ( - isSchemeRegExp = regexp.MustCompile(`^[^:]+://`) - scpLikeUrlRegExp = regexp.MustCompile(`^(?:(?P[^@]+)@)?(?P[^:\s]+):(?:(?P[0-9]{1,5})(?:\/|:))?(?P[^\\].*\/[^\\].*)$`) -) - -// MatchesScheme returns true if the given string matches a URL-like -// format scheme. -func MatchesScheme(url string) bool { - return isSchemeRegExp.MatchString(url) -} - -// MatchesScpLike returns true if the given string matches an SCP-like -// format scheme. -func MatchesScpLike(url string) bool { - return scpLikeUrlRegExp.MatchString(url) -} - -// FindScpLikeComponents returns the user, host, port and path of the -// given SCP-like URL. -func FindScpLikeComponents(url string) (user, host, port, path string) { - m := scpLikeUrlRegExp.FindStringSubmatch(url) - return m[1], m[2], m[3], m[4] -} - -// IsLocalEndpoint returns true if the given URL string specifies a -// local file endpoint. For example, on a Linux machine, -// `/home/user/src/go-git` would match as a local endpoint, but -// `https://github.com/src-d/go-git` would not. -func IsLocalEndpoint(url string) bool { - return !MatchesScheme(url) && !MatchesScpLike(url) -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/object_walker.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/object_walker.go deleted file mode 100644 index 3fcdd2999cd..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/object_walker.go +++ /dev/null @@ -1,104 +0,0 @@ -package git - -import ( - "fmt" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/filemode" - "github.com/go-git/go-git/v5/plumbing/object" - "github.com/go-git/go-git/v5/storage" -) - -type objectWalker struct { - Storer storage.Storer - // seen is the set of objects seen in the repo. - // seen map can become huge if walking over large - // repos. Thus using struct{} as the value type. - seen map[plumbing.Hash]struct{} -} - -func newObjectWalker(s storage.Storer) *objectWalker { - return &objectWalker{s, map[plumbing.Hash]struct{}{}} -} - -// walkAllRefs walks all (hash) references from the repo. -func (p *objectWalker) walkAllRefs() error { - // Walk over all the references in the repo. - it, err := p.Storer.IterReferences() - if err != nil { - return err - } - defer it.Close() - err = it.ForEach(func(ref *plumbing.Reference) error { - // Exit this iteration early for non-hash references. - if ref.Type() != plumbing.HashReference { - return nil - } - return p.walkObjectTree(ref.Hash()) - }) - return err -} - -func (p *objectWalker) isSeen(hash plumbing.Hash) bool { - _, seen := p.seen[hash] - return seen -} - -func (p *objectWalker) add(hash plumbing.Hash) { - p.seen[hash] = struct{}{} -} - -// walkObjectTree walks over all objects and remembers references -// to them in the objectWalker. This is used instead of the revlist -// walks because memory usage is tight with huge repos. -func (p *objectWalker) walkObjectTree(hash plumbing.Hash) error { - // Check if we have already seen, and mark this object - if p.isSeen(hash) { - return nil - } - p.add(hash) - // Fetch the object. - obj, err := object.GetObject(p.Storer, hash) - if err != nil { - return fmt.Errorf("Getting object %s failed: %v", hash, err) - } - // Walk all children depending on object type. - switch obj := obj.(type) { - case *object.Commit: - err = p.walkObjectTree(obj.TreeHash) - if err != nil { - return err - } - for _, h := range obj.ParentHashes { - err = p.walkObjectTree(h) - if err != nil { - return err - } - } - case *object.Tree: - for i := range obj.Entries { - // Shortcut for blob objects: - // 'or' the lower bits of a mode and check that it - // it matches a filemode.Executable. The type information - // is in the higher bits, but this is the cleanest way - // to handle plain files with different modes. - // Other non-tree objects are somewhat rare, so they - // are not special-cased. - if obj.Entries[i].Mode|0755 == filemode.Executable { - p.add(obj.Entries[i].Hash) - continue - } - // Normal walk for sub-trees (and symlinks etc). - err = p.walkObjectTree(obj.Entries[i].Hash) - if err != nil { - return err - } - } - case *object.Tag: - return p.walkObjectTree(obj.Target) - default: - // Error out on unhandled object types. - return fmt.Errorf("Unknown object %X %s %T\n", obj.ID(), obj.Type(), obj) - } - return nil -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/options.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/options.go deleted file mode 100644 index 70687965bbd..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/options.go +++ /dev/null @@ -1,634 +0,0 @@ -package git - -import ( - "errors" - "fmt" - "regexp" - "strings" - "time" - - "github.com/ProtonMail/go-crypto/openpgp" - "github.com/go-git/go-git/v5/config" - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/object" - "github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband" - "github.com/go-git/go-git/v5/plumbing/transport" -) - -// SubmoduleRescursivity defines how depth will affect any submodule recursive -// operation. -type SubmoduleRescursivity uint - -const ( - // DefaultRemoteName name of the default Remote, just like git command. - DefaultRemoteName = "origin" - - // NoRecurseSubmodules disables the recursion for a submodule operation. - NoRecurseSubmodules SubmoduleRescursivity = 0 - // DefaultSubmoduleRecursionDepth allow recursion in a submodule operation. - DefaultSubmoduleRecursionDepth SubmoduleRescursivity = 10 -) - -var ( - ErrMissingURL = errors.New("URL field is required") -) - -// CloneOptions describes how a clone should be performed. -type CloneOptions struct { - // The (possibly remote) repository URL to clone from. - URL string - // Auth credentials, if required, to use with the remote repository. - Auth transport.AuthMethod - // Name of the remote to be added, by default `origin`. - RemoteName string - // Remote branch to clone. - ReferenceName plumbing.ReferenceName - // Fetch only ReferenceName if true. - SingleBranch bool - // No checkout of HEAD after clone if true. - NoCheckout bool - // Limit fetching to the specified number of commits. - Depth int - // RecurseSubmodules after the clone is created, initialize all submodules - // within, using their default settings. This option is ignored if the - // cloned repository does not have a worktree. - RecurseSubmodules SubmoduleRescursivity - // Progress is where the human readable information sent by the server is - // stored, if nil nothing is stored and the capability (if supported) - // no-progress, is sent to the server to avoid send this information. - Progress sideband.Progress - // Tags describe how the tags will be fetched from the remote repository, - // by default is AllTags. - Tags TagMode - // InsecureSkipTLS skips ssl verify if protocol is https - InsecureSkipTLS bool - // CABundle specify additional ca bundle with system cert pool - CABundle []byte -} - -// Validate validates the fields and sets the default values. -func (o *CloneOptions) Validate() error { - if o.URL == "" { - return ErrMissingURL - } - - if o.RemoteName == "" { - o.RemoteName = DefaultRemoteName - } - - if o.ReferenceName == "" { - o.ReferenceName = plumbing.HEAD - } - - if o.Tags == InvalidTagMode { - o.Tags = AllTags - } - - return nil -} - -// PullOptions describes how a pull should be performed. -type PullOptions struct { - // Name of the remote to be pulled. If empty, uses the default. - RemoteName string - // Remote branch to clone. If empty, uses HEAD. - ReferenceName plumbing.ReferenceName - // Fetch only ReferenceName if true. - SingleBranch bool - // Limit fetching to the specified number of commits. - Depth int - // Auth credentials, if required, to use with the remote repository. - Auth transport.AuthMethod - // RecurseSubmodules controls if new commits of all populated submodules - // should be fetched too. - RecurseSubmodules SubmoduleRescursivity - // Progress is where the human readable information sent by the server is - // stored, if nil nothing is stored and the capability (if supported) - // no-progress, is sent to the server to avoid send this information. - Progress sideband.Progress - // Force allows the pull to update a local branch even when the remote - // branch does not descend from it. - Force bool - // InsecureSkipTLS skips ssl verify if protocol is https - InsecureSkipTLS bool - // CABundle specify additional ca bundle with system cert pool - CABundle []byte -} - -// Validate validates the fields and sets the default values. -func (o *PullOptions) Validate() error { - if o.RemoteName == "" { - o.RemoteName = DefaultRemoteName - } - - if o.ReferenceName == "" { - o.ReferenceName = plumbing.HEAD - } - - return nil -} - -type TagMode int - -const ( - InvalidTagMode TagMode = iota - // TagFollowing any tag that points into the histories being fetched is also - // fetched. TagFollowing requires a server with `include-tag` capability - // in order to fetch the annotated tags objects. - TagFollowing - // AllTags fetch all tags from the remote (i.e., fetch remote tags - // refs/tags/* into local tags with the same name) - AllTags - //NoTags fetch no tags from the remote at all - NoTags -) - -// FetchOptions describes how a fetch should be performed -type FetchOptions struct { - // Name of the remote to fetch from. Defaults to origin. - RemoteName string - RefSpecs []config.RefSpec - // Depth limit fetching to the specified number of commits from the tip of - // each remote branch history. - Depth int - // Auth credentials, if required, to use with the remote repository. - Auth transport.AuthMethod - // Progress is where the human readable information sent by the server is - // stored, if nil nothing is stored and the capability (if supported) - // no-progress, is sent to the server to avoid send this information. - Progress sideband.Progress - // Tags describe how the tags will be fetched from the remote repository, - // by default is TagFollowing. - Tags TagMode - // Force allows the fetch to update a local branch even when the remote - // branch does not descend from it. - Force bool - // InsecureSkipTLS skips ssl verify if protocol is https - InsecureSkipTLS bool - // CABundle specify additional ca bundle with system cert pool - CABundle []byte -} - -// Validate validates the fields and sets the default values. -func (o *FetchOptions) Validate() error { - if o.RemoteName == "" { - o.RemoteName = DefaultRemoteName - } - - if o.Tags == InvalidTagMode { - o.Tags = TagFollowing - } - - for _, r := range o.RefSpecs { - if err := r.Validate(); err != nil { - return err - } - } - - return nil -} - -// PushOptions describes how a push should be performed. -type PushOptions struct { - // RemoteName is the name of the remote to be pushed to. - RemoteName string - // RefSpecs specify what destination ref to update with what source - // object. A refspec with empty src can be used to delete a reference. - RefSpecs []config.RefSpec - // Auth credentials, if required, to use with the remote repository. - Auth transport.AuthMethod - // Progress is where the human readable information sent by the server is - // stored, if nil nothing is stored. - Progress sideband.Progress - // Prune specify that remote refs that match given RefSpecs and that do - // not exist locally will be removed. - Prune bool - // Force allows the push to update a remote branch even when the local - // branch does not descend from it. - Force bool - // InsecureSkipTLS skips ssl verify if protocal is https - InsecureSkipTLS bool - // CABundle specify additional ca bundle with system cert pool - CABundle []byte - // RequireRemoteRefs only allows a remote ref to be updated if its current - // value is the one specified here. - RequireRemoteRefs []config.RefSpec -} - -// Validate validates the fields and sets the default values. -func (o *PushOptions) Validate() error { - if o.RemoteName == "" { - o.RemoteName = DefaultRemoteName - } - - if len(o.RefSpecs) == 0 { - o.RefSpecs = []config.RefSpec{ - config.RefSpec(config.DefaultPushRefSpec), - } - } - - for _, r := range o.RefSpecs { - if err := r.Validate(); err != nil { - return err - } - } - - return nil -} - -// SubmoduleUpdateOptions describes how a submodule update should be performed. -type SubmoduleUpdateOptions struct { - // Init, if true initializes the submodules recorded in the index. - Init bool - // NoFetch tell to the update command to not fetch new objects from the - // remote site. - NoFetch bool - // RecurseSubmodules the update is performed not only in the submodules of - // the current repository but also in any nested submodules inside those - // submodules (and so on). Until the SubmoduleRescursivity is reached. - RecurseSubmodules SubmoduleRescursivity - // Auth credentials, if required, to use with the remote repository. - Auth transport.AuthMethod -} - -var ( - ErrBranchHashExclusive = errors.New("Branch and Hash are mutually exclusive") - ErrCreateRequiresBranch = errors.New("Branch is mandatory when Create is used") -) - -// CheckoutOptions describes how a checkout operation should be performed. -type CheckoutOptions struct { - // Hash is the hash of the commit to be checked out. If used, HEAD will be - // in detached mode. If Create is not used, Branch and Hash are mutually - // exclusive. - Hash plumbing.Hash - // Branch to be checked out, if Branch and Hash are empty is set to `master`. - Branch plumbing.ReferenceName - // Create a new branch named Branch and start it at Hash. - Create bool - // Force, if true when switching branches, proceed even if the index or the - // working tree differs from HEAD. This is used to throw away local changes - Force bool - // Keep, if true when switching branches, local changes (the index or the - // working tree changes) will be kept so that they can be committed to the - // target branch. Force and Keep are mutually exclusive, should not be both - // set to true. - Keep bool -} - -// Validate validates the fields and sets the default values. -func (o *CheckoutOptions) Validate() error { - if !o.Create && !o.Hash.IsZero() && o.Branch != "" { - return ErrBranchHashExclusive - } - - if o.Create && o.Branch == "" { - return ErrCreateRequiresBranch - } - - if o.Branch == "" { - o.Branch = plumbing.Master - } - - return nil -} - -// ResetMode defines the mode of a reset operation. -type ResetMode int8 - -const ( - // MixedReset resets the index but not the working tree (i.e., the changed - // files are preserved but not marked for commit) and reports what has not - // been updated. This is the default action. - MixedReset ResetMode = iota - // HardReset resets the index and working tree. Any changes to tracked files - // in the working tree are discarded. - HardReset - // MergeReset resets the index and updates the files in the working tree - // that are different between Commit and HEAD, but keeps those which are - // different between the index and working tree (i.e. which have changes - // which have not been added). - // - // If a file that is different between Commit and the index has unstaged - // changes, reset is aborted. - MergeReset - // SoftReset does not touch the index file or the working tree at all (but - // resets the head to , just like all modes do). This leaves all - // your changed files "Changes to be committed", as git status would put it. - SoftReset -) - -// ResetOptions describes how a reset operation should be performed. -type ResetOptions struct { - // Commit, if commit is present set the current branch head (HEAD) to it. - Commit plumbing.Hash - // Mode, form resets the current branch head to Commit and possibly updates - // the index (resetting it to the tree of Commit) and the working tree - // depending on Mode. If empty MixedReset is used. - Mode ResetMode -} - -// Validate validates the fields and sets the default values. -func (o *ResetOptions) Validate(r *Repository) error { - if o.Commit == plumbing.ZeroHash { - ref, err := r.Head() - if err != nil { - return err - } - - o.Commit = ref.Hash() - } - - return nil -} - -type LogOrder int8 - -const ( - LogOrderDefault LogOrder = iota - LogOrderDFS - LogOrderDFSPost - LogOrderBSF - LogOrderCommitterTime -) - -// LogOptions describes how a log action should be performed. -type LogOptions struct { - // When the From option is set the log will only contain commits - // reachable from it. If this option is not set, HEAD will be used as - // the default From. - From plumbing.Hash - - // The default traversal algorithm is Depth-first search - // set Order=LogOrderCommitterTime for ordering by committer time (more compatible with `git log`) - // set Order=LogOrderBSF for Breadth-first search - Order LogOrder - - // Show only those commits in which the specified file was inserted/updated. - // It is equivalent to running `git log -- `. - // this field is kept for compatility, it can be replaced with PathFilter - FileName *string - - // Filter commits based on the path of files that are updated - // takes file path as argument and should return true if the file is desired - // It can be used to implement `git log -- ` - // either is a file path, or directory path, or a regexp of file/directory path - PathFilter func(string) bool - - // Pretend as if all the refs in refs/, along with HEAD, are listed on the command line as . - // It is equivalent to running `git log --all`. - // If set on true, the From option will be ignored. - All bool - - // Show commits more recent than a specific date. - // It is equivalent to running `git log --since ` or `git log --after `. - Since *time.Time - - // Show commits older than a specific date. - // It is equivalent to running `git log --until ` or `git log --before `. - Until *time.Time -} - -var ( - ErrMissingAuthor = errors.New("author field is required") -) - -// AddOptions describes how an `add` operation should be performed -type AddOptions struct { - // All equivalent to `git add -A`, update the index not only where the - // working tree has a file matching `Path` but also where the index already - // has an entry. This adds, modifies, and removes index entries to match the - // working tree. If no `Path` nor `Glob` is given when `All` option is - // used, all files in the entire working tree are updated. - All bool - // Path is the exact filepath to the file or directory to be added. - Path string - // Glob adds all paths, matching pattern, to the index. If pattern matches a - // directory path, all directory contents are added to the index recursively. - Glob string -} - -// Validate validates the fields and sets the default values. -func (o *AddOptions) Validate(r *Repository) error { - if o.Path != "" && o.Glob != "" { - return fmt.Errorf("fields Path and Glob are mutual exclusive") - } - - return nil -} - -// CommitOptions describes how a commit operation should be performed. -type CommitOptions struct { - // All automatically stage files that have been modified and deleted, but - // new files you have not told Git about are not affected. - All bool - // Author is the author's signature of the commit. If Author is empty the - // Name and Email is read from the config, and time.Now it's used as When. - Author *object.Signature - // Committer is the committer's signature of the commit. If Committer is - // nil the Author signature is used. - Committer *object.Signature - // Parents are the parents commits for the new commit, by default when - // len(Parents) is zero, the hash of HEAD reference is used. - Parents []plumbing.Hash - // SignKey denotes a key to sign the commit with. A nil value here means the - // commit will not be signed. The private key must be present and already - // decrypted. - SignKey *openpgp.Entity -} - -// Validate validates the fields and sets the default values. -func (o *CommitOptions) Validate(r *Repository) error { - if o.Author == nil { - if err := o.loadConfigAuthorAndCommitter(r); err != nil { - return err - } - } - - if o.Committer == nil { - o.Committer = o.Author - } - - if len(o.Parents) == 0 { - head, err := r.Head() - if err != nil && err != plumbing.ErrReferenceNotFound { - return err - } - - if head != nil { - o.Parents = []plumbing.Hash{head.Hash()} - } - } - - return nil -} - -func (o *CommitOptions) loadConfigAuthorAndCommitter(r *Repository) error { - cfg, err := r.ConfigScoped(config.SystemScope) - if err != nil { - return err - } - - if o.Author == nil && cfg.Author.Email != "" && cfg.Author.Name != "" { - o.Author = &object.Signature{ - Name: cfg.Author.Name, - Email: cfg.Author.Email, - When: time.Now(), - } - } - - if o.Committer == nil && cfg.Committer.Email != "" && cfg.Committer.Name != "" { - o.Committer = &object.Signature{ - Name: cfg.Committer.Name, - Email: cfg.Committer.Email, - When: time.Now(), - } - } - - if o.Author == nil && cfg.User.Email != "" && cfg.User.Name != "" { - o.Author = &object.Signature{ - Name: cfg.User.Name, - Email: cfg.User.Email, - When: time.Now(), - } - } - - if o.Author == nil { - return ErrMissingAuthor - } - - return nil -} - -var ( - ErrMissingName = errors.New("name field is required") - ErrMissingTagger = errors.New("tagger field is required") - ErrMissingMessage = errors.New("message field is required") -) - -// CreateTagOptions describes how a tag object should be created. -type CreateTagOptions struct { - // Tagger defines the signature of the tag creator. If Tagger is empty the - // Name and Email is read from the config, and time.Now it's used as When. - Tagger *object.Signature - // Message defines the annotation of the tag. It is canonicalized during - // validation into the format expected by git - no leading whitespace and - // ending in a newline. - Message string - // SignKey denotes a key to sign the tag with. A nil value here means the tag - // will not be signed. The private key must be present and already decrypted. - SignKey *openpgp.Entity -} - -// Validate validates the fields and sets the default values. -func (o *CreateTagOptions) Validate(r *Repository, hash plumbing.Hash) error { - if o.Tagger == nil { - if err := o.loadConfigTagger(r); err != nil { - return err - } - } - - if o.Message == "" { - return ErrMissingMessage - } - - // Canonicalize the message into the expected message format. - o.Message = strings.TrimSpace(o.Message) + "\n" - - return nil -} - -func (o *CreateTagOptions) loadConfigTagger(r *Repository) error { - cfg, err := r.ConfigScoped(config.SystemScope) - if err != nil { - return err - } - - if o.Tagger == nil && cfg.Author.Email != "" && cfg.Author.Name != "" { - o.Tagger = &object.Signature{ - Name: cfg.Author.Name, - Email: cfg.Author.Email, - When: time.Now(), - } - } - - if o.Tagger == nil && cfg.User.Email != "" && cfg.User.Name != "" { - o.Tagger = &object.Signature{ - Name: cfg.User.Name, - Email: cfg.User.Email, - When: time.Now(), - } - } - - if o.Tagger == nil { - return ErrMissingTagger - } - - return nil -} - -// ListOptions describes how a remote list should be performed. -type ListOptions struct { - // Auth credentials, if required, to use with the remote repository. - Auth transport.AuthMethod - // InsecureSkipTLS skips ssl verify if protocal is https - InsecureSkipTLS bool - // CABundle specify additional ca bundle with system cert pool - CABundle []byte -} - -// CleanOptions describes how a clean should be performed. -type CleanOptions struct { - Dir bool -} - -// GrepOptions describes how a grep should be performed. -type GrepOptions struct { - // Patterns are compiled Regexp objects to be matched. - Patterns []*regexp.Regexp - // InvertMatch selects non-matching lines. - InvertMatch bool - // CommitHash is the hash of the commit from which worktree should be derived. - CommitHash plumbing.Hash - // ReferenceName is the branch or tag name from which worktree should be derived. - ReferenceName plumbing.ReferenceName - // PathSpecs are compiled Regexp objects of pathspec to use in the matching. - PathSpecs []*regexp.Regexp -} - -var ( - ErrHashOrReference = errors.New("ambiguous options, only one of CommitHash or ReferenceName can be passed") -) - -// Validate validates the fields and sets the default values. -func (o *GrepOptions) Validate(w *Worktree) error { - if !o.CommitHash.IsZero() && o.ReferenceName != "" { - return ErrHashOrReference - } - - // If none of CommitHash and ReferenceName are provided, set commit hash of - // the repository's head. - if o.CommitHash.IsZero() && o.ReferenceName == "" { - ref, err := w.r.Head() - if err != nil { - return err - } - o.CommitHash = ref.Hash() - } - - return nil -} - -// PlainOpenOptions describes how opening a plain repository should be -// performed. -type PlainOpenOptions struct { - // DetectDotGit defines whether parent directories should be - // walked until a .git directory or file is found. - DetectDotGit bool - // Enable .git/commondir support (see https://git-scm.com/docs/gitrepository-layout#Documentation/gitrepository-layout.txt). - // NOTE: This option will only work with the filesystem storage. - EnableDotGitCommonDir bool -} - -// Validate validates the fields and sets the default values. -func (o *PlainOpenOptions) Validate() error { return nil } diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/cache/buffer_lru.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/cache/buffer_lru.go deleted file mode 100644 index acaf1952033..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/cache/buffer_lru.go +++ /dev/null @@ -1,98 +0,0 @@ -package cache - -import ( - "container/list" - "sync" -) - -// BufferLRU implements an object cache with an LRU eviction policy and a -// maximum size (measured in object size). -type BufferLRU struct { - MaxSize FileSize - - actualSize FileSize - ll *list.List - cache map[int64]*list.Element - mut sync.Mutex -} - -// NewBufferLRU creates a new BufferLRU with the given maximum size. The maximum -// size will never be exceeded. -func NewBufferLRU(maxSize FileSize) *BufferLRU { - return &BufferLRU{MaxSize: maxSize} -} - -// NewBufferLRUDefault creates a new BufferLRU with the default cache size. -func NewBufferLRUDefault() *BufferLRU { - return &BufferLRU{MaxSize: DefaultMaxSize} -} - -type buffer struct { - Key int64 - Slice []byte -} - -// Put puts a buffer into the cache. If the buffer is already in the cache, it -// will be marked as used. Otherwise, it will be inserted. A buffers might -// be evicted to make room for the new one. -func (c *BufferLRU) Put(key int64, slice []byte) { - c.mut.Lock() - defer c.mut.Unlock() - - if c.cache == nil { - c.actualSize = 0 - c.cache = make(map[int64]*list.Element, 1000) - c.ll = list.New() - } - - bufSize := FileSize(len(slice)) - if ee, ok := c.cache[key]; ok { - oldBuf := ee.Value.(buffer) - // in this case bufSize is a delta: new size - old size - bufSize -= FileSize(len(oldBuf.Slice)) - c.ll.MoveToFront(ee) - ee.Value = buffer{key, slice} - } else { - if bufSize > c.MaxSize { - return - } - ee := c.ll.PushFront(buffer{key, slice}) - c.cache[key] = ee - } - - c.actualSize += bufSize - for c.actualSize > c.MaxSize { - last := c.ll.Back() - lastObj := last.Value.(buffer) - lastSize := FileSize(len(lastObj.Slice)) - - c.ll.Remove(last) - delete(c.cache, lastObj.Key) - c.actualSize -= lastSize - } -} - -// Get returns a buffer by its key. It marks the buffer as used. If the buffer -// is not in the cache, (nil, false) will be returned. -func (c *BufferLRU) Get(key int64) ([]byte, bool) { - c.mut.Lock() - defer c.mut.Unlock() - - ee, ok := c.cache[key] - if !ok { - return nil, false - } - - c.ll.MoveToFront(ee) - return ee.Value.(buffer).Slice, true -} - -// Clear the content of this buffer cache. -func (c *BufferLRU) Clear() { - c.mut.Lock() - defer c.mut.Unlock() - - c.ll = nil - c.cache = nil - c.actualSize = 0 -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/cache/common.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/cache/common.go deleted file mode 100644 index 7b0d0c76bb3..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/cache/common.go +++ /dev/null @@ -1,39 +0,0 @@ -package cache - -import "github.com/go-git/go-git/v5/plumbing" - -const ( - Byte FileSize = 1 << (iota * 10) - KiByte - MiByte - GiByte -) - -type FileSize int64 - -const DefaultMaxSize FileSize = 96 * MiByte - -// Object is an interface to a object cache. -type Object interface { - // Put puts the given object into the cache. Whether this object will - // actually be put into the cache or not is implementation specific. - Put(o plumbing.EncodedObject) - // Get gets an object from the cache given its hash. The second return value - // is true if the object was returned, and false otherwise. - Get(k plumbing.Hash) (plumbing.EncodedObject, bool) - // Clear clears every object from the cache. - Clear() -} - -// Buffer is an interface to a buffer cache. -type Buffer interface { - // Put puts a buffer into the cache. If the buffer is already in the cache, - // it will be marked as used. Otherwise, it will be inserted. Buffer might - // be evicted to make room for the new one. - Put(key int64, slice []byte) - // Get returns a buffer by its key. It marks the buffer as used. If the - // buffer is not in the cache, (nil, false) will be returned. - Get(key int64) ([]byte, bool) - // Clear clears every object from the cache. - Clear() -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/cache/object_lru.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/cache/object_lru.go deleted file mode 100644 index c50d0d1e6c5..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/cache/object_lru.go +++ /dev/null @@ -1,101 +0,0 @@ -package cache - -import ( - "container/list" - "sync" - - "github.com/go-git/go-git/v5/plumbing" -) - -// ObjectLRU implements an object cache with an LRU eviction policy and a -// maximum size (measured in object size). -type ObjectLRU struct { - MaxSize FileSize - - actualSize FileSize - ll *list.List - cache map[interface{}]*list.Element - mut sync.Mutex -} - -// NewObjectLRU creates a new ObjectLRU with the given maximum size. The maximum -// size will never be exceeded. -func NewObjectLRU(maxSize FileSize) *ObjectLRU { - return &ObjectLRU{MaxSize: maxSize} -} - -// NewObjectLRUDefault creates a new ObjectLRU with the default cache size. -func NewObjectLRUDefault() *ObjectLRU { - return &ObjectLRU{MaxSize: DefaultMaxSize} -} - -// Put puts an object into the cache. If the object is already in the cache, it -// will be marked as used. Otherwise, it will be inserted. A single object might -// be evicted to make room for the new object. -func (c *ObjectLRU) Put(obj plumbing.EncodedObject) { - c.mut.Lock() - defer c.mut.Unlock() - - if c.cache == nil { - c.actualSize = 0 - c.cache = make(map[interface{}]*list.Element, 1000) - c.ll = list.New() - } - - objSize := FileSize(obj.Size()) - key := obj.Hash() - if ee, ok := c.cache[key]; ok { - oldObj := ee.Value.(plumbing.EncodedObject) - // in this case objSize is a delta: new size - old size - objSize -= FileSize(oldObj.Size()) - c.ll.MoveToFront(ee) - ee.Value = obj - } else { - if objSize > c.MaxSize { - return - } - ee := c.ll.PushFront(obj) - c.cache[key] = ee - } - - c.actualSize += objSize - for c.actualSize > c.MaxSize { - last := c.ll.Back() - if last == nil { - c.actualSize = 0 - break - } - - lastObj := last.Value.(plumbing.EncodedObject) - lastSize := FileSize(lastObj.Size()) - - c.ll.Remove(last) - delete(c.cache, lastObj.Hash()) - c.actualSize -= lastSize - } -} - -// Get returns an object by its hash. It marks the object as used. If the object -// is not in the cache, (nil, false) will be returned. -func (c *ObjectLRU) Get(k plumbing.Hash) (plumbing.EncodedObject, bool) { - c.mut.Lock() - defer c.mut.Unlock() - - ee, ok := c.cache[k] - if !ok { - return nil, false - } - - c.ll.MoveToFront(ee) - return ee.Value.(plumbing.EncodedObject), true -} - -// Clear the content of this object cache. -func (c *ObjectLRU) Clear() { - c.mut.Lock() - defer c.mut.Unlock() - - c.ll = nil - c.cache = nil - c.actualSize = 0 -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/color/color.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/color/color.go deleted file mode 100644 index 2cd74bdc1a8..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/color/color.go +++ /dev/null @@ -1,38 +0,0 @@ -package color - -// TODO read colors from a github.com/go-git/go-git/plumbing/format/config.Config struct -// TODO implement color parsing, see https://github.com/git/git/blob/v2.26.2/color.c - -// Colors. See https://github.com/git/git/blob/v2.26.2/color.h#L24-L53. -const ( - Normal = "" - Reset = "\033[m" - Bold = "\033[1m" - Red = "\033[31m" - Green = "\033[32m" - Yellow = "\033[33m" - Blue = "\033[34m" - Magenta = "\033[35m" - Cyan = "\033[36m" - BoldRed = "\033[1;31m" - BoldGreen = "\033[1;32m" - BoldYellow = "\033[1;33m" - BoldBlue = "\033[1;34m" - BoldMagenta = "\033[1;35m" - BoldCyan = "\033[1;36m" - FaintRed = "\033[2;31m" - FaintGreen = "\033[2;32m" - FaintYellow = "\033[2;33m" - FaintBlue = "\033[2;34m" - FaintMagenta = "\033[2;35m" - FaintCyan = "\033[2;36m" - BgRed = "\033[41m" - BgGreen = "\033[42m" - BgYellow = "\033[43m" - BgBlue = "\033[44m" - BgMagenta = "\033[45m" - BgCyan = "\033[46m" - Faint = "\033[2m" - FaintItalic = "\033[2;3m" - Reverse = "\033[7m" -) diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/error.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/error.go deleted file mode 100644 index a3ebed3f6c2..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/error.go +++ /dev/null @@ -1,35 +0,0 @@ -package plumbing - -import "fmt" - -type PermanentError struct { - Err error -} - -func NewPermanentError(err error) *PermanentError { - if err == nil { - return nil - } - - return &PermanentError{Err: err} -} - -func (e *PermanentError) Error() string { - return fmt.Sprintf("permanent client error: %s", e.Err.Error()) -} - -type UnexpectedError struct { - Err error -} - -func NewUnexpectedError(err error) *UnexpectedError { - if err == nil { - return nil - } - - return &UnexpectedError{Err: err} -} - -func (e *UnexpectedError) Error() string { - return fmt.Sprintf("unexpected client error: %s", e.Err.Error()) -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/filemode/filemode.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/filemode/filemode.go deleted file mode 100644 index b848a979616..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/filemode/filemode.go +++ /dev/null @@ -1,188 +0,0 @@ -package filemode - -import ( - "encoding/binary" - "fmt" - "os" - "strconv" -) - -// A FileMode represents the kind of tree entries used by git. It -// resembles regular file systems modes, although FileModes are -// considerably simpler (there are not so many), and there are some, -// like Submodule that has no file system equivalent. -type FileMode uint32 - -const ( - // Empty is used as the FileMode of tree elements when comparing - // trees in the following situations: - // - // - the mode of tree elements before their creation. - the mode of - // tree elements after their deletion. - the mode of unmerged - // elements when checking the index. - // - // Empty has no file system equivalent. As Empty is the zero value - // of FileMode, it is also returned by New and - // NewFromOsNewFromOSFileMode along with an error, when they fail. - Empty FileMode = 0 - // Dir represent a Directory. - Dir FileMode = 0040000 - // Regular represent non-executable files. Please note this is not - // the same as golang regular files, which include executable files. - Regular FileMode = 0100644 - // Deprecated represent non-executable files with the group writable - // bit set. This mode was supported by the first versions of git, - // but it has been deprecated nowadays. This library uses them - // internally, so you can read old packfiles, but will treat them as - // Regulars when interfacing with the outside world. This is the - // standard git behaviour. - Deprecated FileMode = 0100664 - // Executable represents executable files. - Executable FileMode = 0100755 - // Symlink represents symbolic links to files. - Symlink FileMode = 0120000 - // Submodule represents git submodules. This mode has no file system - // equivalent. - Submodule FileMode = 0160000 -) - -// New takes the octal string representation of a FileMode and returns -// the FileMode and a nil error. If the string can not be parsed to a -// 32 bit unsigned octal number, it returns Empty and the parsing error. -// -// Example: "40000" means Dir, "100644" means Regular. -// -// Please note this function does not check if the returned FileMode -// is valid in git or if it is malformed. For instance, "1" will -// return the malformed FileMode(1) and a nil error. -func New(s string) (FileMode, error) { - n, err := strconv.ParseUint(s, 8, 32) - if err != nil { - return Empty, err - } - - return FileMode(n), nil -} - -// NewFromOSFileMode returns the FileMode used by git to represent -// the provided file system modes and a nil error on success. If the -// file system mode cannot be mapped to any valid git mode (as with -// sockets or named pipes), it will return Empty and an error. -// -// Note that some git modes cannot be generated from os.FileModes, like -// Deprecated and Submodule; while Empty will be returned, along with an -// error, only when the method fails. -func NewFromOSFileMode(m os.FileMode) (FileMode, error) { - if m.IsRegular() { - if isSetTemporary(m) { - return Empty, fmt.Errorf("no equivalent git mode for %s", m) - } - if isSetCharDevice(m) { - return Empty, fmt.Errorf("no equivalent git mode for %s", m) - } - if isSetUserExecutable(m) { - return Executable, nil - } - return Regular, nil - } - - if m.IsDir() { - return Dir, nil - } - - if isSetSymLink(m) { - return Symlink, nil - } - - return Empty, fmt.Errorf("no equivalent git mode for %s", m) -} - -func isSetCharDevice(m os.FileMode) bool { - return m&os.ModeCharDevice != 0 -} - -func isSetTemporary(m os.FileMode) bool { - return m&os.ModeTemporary != 0 -} - -func isSetUserExecutable(m os.FileMode) bool { - return m&0100 != 0 -} - -func isSetSymLink(m os.FileMode) bool { - return m&os.ModeSymlink != 0 -} - -// Bytes return a slice of 4 bytes with the mode in little endian -// encoding. -func (m FileMode) Bytes() []byte { - ret := make([]byte, 4) - binary.LittleEndian.PutUint32(ret, uint32(m)) - return ret -} - -// IsMalformed returns if the FileMode should not appear in a git packfile, -// this is: Empty and any other mode not mentioned as a constant in this -// package. -func (m FileMode) IsMalformed() bool { - return m != Dir && - m != Regular && - m != Deprecated && - m != Executable && - m != Symlink && - m != Submodule -} - -// String returns the FileMode as a string in the standatd git format, -// this is, an octal number padded with ceros to 7 digits. Malformed -// modes are printed in that same format, for easier debugging. -// -// Example: Regular is "0100644", Empty is "0000000". -func (m FileMode) String() string { - return fmt.Sprintf("%07o", uint32(m)) -} - -// IsRegular returns if the FileMode represents that of a regular file, -// this is, either Regular or Deprecated. Please note that Executable -// are not regular even though in the UNIX tradition, they usually are: -// See the IsFile method. -func (m FileMode) IsRegular() bool { - return m == Regular || - m == Deprecated -} - -// IsFile returns if the FileMode represents that of a file, this is, -// Regular, Deprecated, Executable or Link. -func (m FileMode) IsFile() bool { - return m == Regular || - m == Deprecated || - m == Executable || - m == Symlink -} - -// ToOSFileMode returns the os.FileMode to be used when creating file -// system elements with the given git mode and a nil error on success. -// -// When the provided mode cannot be mapped to a valid file system mode -// (e.g. Submodule) it returns os.FileMode(0) and an error. -// -// The returned file mode does not take into account the umask. -func (m FileMode) ToOSFileMode() (os.FileMode, error) { - switch m { - case Dir: - return os.ModePerm | os.ModeDir, nil - case Submodule: - return os.ModePerm | os.ModeDir, nil - case Regular: - return os.FileMode(0644), nil - // Deprecated is no longer allowed: treated as a Regular instead - case Deprecated: - return os.FileMode(0644), nil - case Executable: - return os.FileMode(0755), nil - case Symlink: - return os.ModePerm | os.ModeSymlink, nil - } - - return os.FileMode(0), fmt.Errorf("malformed mode (%s)", m) -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/config/common.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/config/common.go deleted file mode 100644 index 6d689ea1e01..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/config/common.go +++ /dev/null @@ -1,109 +0,0 @@ -package config - -// New creates a new config instance. -func New() *Config { - return &Config{} -} - -// Config contains all the sections, comments and includes from a config file. -type Config struct { - Comment *Comment - Sections Sections - Includes Includes -} - -// Includes is a list of Includes in a config file. -type Includes []*Include - -// Include is a reference to an included config file. -type Include struct { - Path string - Config *Config -} - -// Comment string without the prefix '#' or ';'. -type Comment string - -const ( - // NoSubsection token is passed to Config.Section and Config.SetSection to - // represent the absence of a section. - NoSubsection = "" -) - -// Section returns a existing section with the given name or creates a new one. -func (c *Config) Section(name string) *Section { - for i := len(c.Sections) - 1; i >= 0; i-- { - s := c.Sections[i] - if s.IsName(name) { - return s - } - } - - s := &Section{Name: name} - c.Sections = append(c.Sections, s) - return s -} - -// HasSection checks if the Config has a section with the specified name. -func (c *Config) HasSection(name string) bool { - for _, s := range c.Sections { - if s.IsName(name) { - return true - } - } - return false -} - -// RemoveSection removes a section from a config file. -func (c *Config) RemoveSection(name string) *Config { - result := Sections{} - for _, s := range c.Sections { - if !s.IsName(name) { - result = append(result, s) - } - } - - c.Sections = result - return c -} - -// RemoveSubsection remove a subsection from a config file. -func (c *Config) RemoveSubsection(section string, subsection string) *Config { - for _, s := range c.Sections { - if s.IsName(section) { - result := Subsections{} - for _, ss := range s.Subsections { - if !ss.IsName(subsection) { - result = append(result, ss) - } - } - s.Subsections = result - } - } - - return c -} - -// AddOption adds an option to a given section and subsection. Use the -// NoSubsection constant for the subsection argument if no subsection is wanted. -func (c *Config) AddOption(section string, subsection string, key string, value string) *Config { - if subsection == "" { - c.Section(section).AddOption(key, value) - } else { - c.Section(section).Subsection(subsection).AddOption(key, value) - } - - return c -} - -// SetOption sets an option to a given section and subsection. Use the -// NoSubsection constant for the subsection argument if no subsection is wanted. -func (c *Config) SetOption(section string, subsection string, key string, value string) *Config { - if subsection == "" { - c.Section(section).SetOption(key, value) - } else { - c.Section(section).Subsection(subsection).SetOption(key, value) - } - - return c -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/config/decoder.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/config/decoder.go deleted file mode 100644 index 8e52d57f302..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/config/decoder.go +++ /dev/null @@ -1,37 +0,0 @@ -package config - -import ( - "io" - - "github.com/go-git/gcfg" -) - -// A Decoder reads and decodes config files from an input stream. -type Decoder struct { - io.Reader -} - -// NewDecoder returns a new decoder that reads from r. -func NewDecoder(r io.Reader) *Decoder { - return &Decoder{r} -} - -// Decode reads the whole config from its input and stores it in the -// value pointed to by config. -func (d *Decoder) Decode(config *Config) error { - cb := func(s string, ss string, k string, v string, bv bool) error { - if ss == "" && k == "" { - config.Section(s) - return nil - } - - if ss != "" && k == "" { - config.Section(s).Subsection(ss) - return nil - } - - config.AddOption(s, ss, k, v) - return nil - } - return gcfg.ReadWithCallback(d, cb) -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/config/doc.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/config/doc.go deleted file mode 100644 index 3986c836581..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/config/doc.go +++ /dev/null @@ -1,122 +0,0 @@ -// Package config implements encoding and decoding of git config files. -// -// Configuration File -// ------------------ -// -// The Git configuration file contains a number of variables that affect -// the Git commands' behavior. The `.git/config` file in each repository -// is used to store the configuration for that repository, and -// `$HOME/.gitconfig` is used to store a per-user configuration as -// fallback values for the `.git/config` file. The file `/etc/gitconfig` -// can be used to store a system-wide default configuration. -// -// The configuration variables are used by both the Git plumbing -// and the porcelains. The variables are divided into sections, wherein -// the fully qualified variable name of the variable itself is the last -// dot-separated segment and the section name is everything before the last -// dot. The variable names are case-insensitive, allow only alphanumeric -// characters and `-`, and must start with an alphabetic character. Some -// variables may appear multiple times; we say then that the variable is -// multivalued. -// -// Syntax -// ~~~~~~ -// -// The syntax is fairly flexible and permissive; whitespaces are mostly -// ignored. The '#' and ';' characters begin comments to the end of line, -// blank lines are ignored. -// -// The file consists of sections and variables. A section begins with -// the name of the section in square brackets and continues until the next -// section begins. Section names are case-insensitive. Only alphanumeric -// characters, `-` and `.` are allowed in section names. Each variable -// must belong to some section, which means that there must be a section -// header before the first setting of a variable. -// -// Sections can be further divided into subsections. To begin a subsection -// put its name in double quotes, separated by space from the section name, -// in the section header, like in the example below: -// -// -------- -// [section "subsection"] -// -// -------- -// -// Subsection names are case sensitive and can contain any characters except -// newline (doublequote `"` and backslash can be included by escaping them -// as `\"` and `\\`, respectively). Section headers cannot span multiple -// lines. Variables may belong directly to a section or to a given subsection. -// You can have `[section]` if you have `[section "subsection"]`, but you -// don't need to. -// -// There is also a deprecated `[section.subsection]` syntax. With this -// syntax, the subsection name is converted to lower-case and is also -// compared case sensitively. These subsection names follow the same -// restrictions as section names. -// -// All the other lines (and the remainder of the line after the section -// header) are recognized as setting variables, in the form -// 'name = value' (or just 'name', which is a short-hand to say that -// the variable is the boolean "true"). -// The variable names are case-insensitive, allow only alphanumeric characters -// and `-`, and must start with an alphabetic character. -// -// A line that defines a value can be continued to the next line by -// ending it with a `\`; the backquote and the end-of-line are -// stripped. Leading whitespaces after 'name =', the remainder of the -// line after the first comment character '#' or ';', and trailing -// whitespaces of the line are discarded unless they are enclosed in -// double quotes. Internal whitespaces within the value are retained -// verbatim. -// -// Inside double quotes, double quote `"` and backslash `\` characters -// must be escaped: use `\"` for `"` and `\\` for `\`. -// -// The following escape sequences (beside `\"` and `\\`) are recognized: -// `\n` for newline character (NL), `\t` for horizontal tabulation (HT, TAB) -// and `\b` for backspace (BS). Other char escape sequences (including octal -// escape sequences) are invalid. -// -// Includes -// ~~~~~~~~ -// -// You can include one config file from another by setting the special -// `include.path` variable to the name of the file to be included. The -// variable takes a pathname as its value, and is subject to tilde -// expansion. -// -// The included file is expanded immediately, as if its contents had been -// found at the location of the include directive. If the value of the -// `include.path` variable is a relative path, the path is considered to be -// relative to the configuration file in which the include directive was -// found. See below for examples. -// -// -// Example -// ~~~~~~~ -// -// # Core variables -// [core] -// ; Don't trust file modes -// filemode = false -// -// # Our diff algorithm -// [diff] -// external = /usr/local/bin/diff-wrapper -// renames = true -// -// [branch "devel"] -// remote = origin -// merge = refs/heads/devel -// -// # Proxy settings -// [core] -// gitProxy="ssh" for "kernel.org" -// gitProxy=default-proxy ; for the rest -// -// [include] -// path = /path/to/foo.inc ; include by absolute path -// path = foo ; expand "foo" relative to the current file -// path = ~/foo ; expand "foo" in your `$HOME` directory -// -package config diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/config/encoder.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/config/encoder.go deleted file mode 100644 index 4eac8968adf..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/config/encoder.go +++ /dev/null @@ -1,77 +0,0 @@ -package config - -import ( - "fmt" - "io" - "strings" -) - -// An Encoder writes config files to an output stream. -type Encoder struct { - w io.Writer -} - -// NewEncoder returns a new encoder that writes to w. -func NewEncoder(w io.Writer) *Encoder { - return &Encoder{w} -} - -// Encode writes the config in git config format to the stream of the encoder. -func (e *Encoder) Encode(cfg *Config) error { - for _, s := range cfg.Sections { - if err := e.encodeSection(s); err != nil { - return err - } - } - - return nil -} - -func (e *Encoder) encodeSection(s *Section) error { - if len(s.Options) > 0 { - if err := e.printf("[%s]\n", s.Name); err != nil { - return err - } - - if err := e.encodeOptions(s.Options); err != nil { - return err - } - } - - for _, ss := range s.Subsections { - if err := e.encodeSubsection(s.Name, ss); err != nil { - return err - } - } - - return nil -} - -func (e *Encoder) encodeSubsection(sectionName string, s *Subsection) error { - //TODO: escape - if err := e.printf("[%s \"%s\"]\n", sectionName, s.Name); err != nil { - return err - } - - return e.encodeOptions(s.Options) -} - -func (e *Encoder) encodeOptions(opts Options) error { - for _, o := range opts { - pattern := "\t%s = %s\n" - if strings.Contains(o.Value, "\\") { - pattern = "\t%s = %q\n" - } - - if err := e.printf(pattern, o.Key, o.Value); err != nil { - return err - } - } - - return nil -} - -func (e *Encoder) printf(msg string, args ...interface{}) error { - _, err := fmt.Fprintf(e.w, msg, args...) - return err -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/config/option.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/config/option.go deleted file mode 100644 index cad394810a1..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/config/option.go +++ /dev/null @@ -1,127 +0,0 @@ -package config - -import ( - "fmt" - "strings" -) - -// Option defines a key/value entity in a config file. -type Option struct { - // Key preserving original caseness. - // Use IsKey instead to compare key regardless of caseness. - Key string - // Original value as string, could be not normalized. - Value string -} - -type Options []*Option - -// IsKey returns true if the given key matches -// this option's key in a case-insensitive comparison. -func (o *Option) IsKey(key string) bool { - return strings.EqualFold(o.Key, key) -} - -func (opts Options) GoString() string { - var strs []string - for _, opt := range opts { - strs = append(strs, fmt.Sprintf("%#v", opt)) - } - - return strings.Join(strs, ", ") -} - -// Get gets the value for the given key if set, -// otherwise it returns the empty string. -// -// Note that there is no difference -// -// This matches git behaviour since git v1.8.1-rc1, -// if there are multiple definitions of a key, the -// last one wins. -// -// See: http://article.gmane.org/gmane.linux.kernel/1407184 -// -// In order to get all possible values for the same key, -// use GetAll. -func (opts Options) Get(key string) string { - for i := len(opts) - 1; i >= 0; i-- { - o := opts[i] - if o.IsKey(key) { - return o.Value - } - } - return "" -} - -// Has checks if an Option exist with the given key. -func (opts Options) Has(key string) bool { - for _, o := range opts { - if o.IsKey(key) { - return true - } - } - return false -} - -// GetAll returns all possible values for the same key. -func (opts Options) GetAll(key string) []string { - result := []string{} - for _, o := range opts { - if o.IsKey(key) { - result = append(result, o.Value) - } - } - return result -} - -func (opts Options) withoutOption(key string) Options { - result := Options{} - for _, o := range opts { - if !o.IsKey(key) { - result = append(result, o) - } - } - return result -} - -func (opts Options) withAddedOption(key string, value string) Options { - return append(opts, &Option{key, value}) -} - -func (opts Options) withSettedOption(key string, values ...string) Options { - var result Options - var added []string - for _, o := range opts { - if !o.IsKey(key) { - result = append(result, o) - continue - } - - if contains(values, o.Value) { - added = append(added, o.Value) - result = append(result, o) - continue - } - } - - for _, value := range values { - if contains(added, value) { - continue - } - - result = result.withAddedOption(key, value) - } - - return result -} - -func contains(haystack []string, needle string) bool { - for _, s := range haystack { - if s == needle { - return true - } - } - - return false -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/config/section.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/config/section.go deleted file mode 100644 index 07f72f35af2..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/config/section.go +++ /dev/null @@ -1,181 +0,0 @@ -package config - -import ( - "fmt" - "strings" -) - -// Section is the representation of a section inside git configuration files. -// Each Section contains Options that are used by both the Git plumbing -// and the porcelains. -// Sections can be further divided into subsections. To begin a subsection -// put its name in double quotes, separated by space from the section name, -// in the section header, like in the example below: -// -// [section "subsection"] -// -// All the other lines (and the remainder of the line after the section header) -// are recognized as option variables, in the form "name = value" (or just name, -// which is a short-hand to say that the variable is the boolean "true"). -// The variable names are case-insensitive, allow only alphanumeric characters -// and -, and must start with an alphabetic character: -// -// [section "subsection1"] -// option1 = value1 -// option2 -// [section "subsection2"] -// option3 = value2 -// -type Section struct { - Name string - Options Options - Subsections Subsections -} - -type Subsection struct { - Name string - Options Options -} - -type Sections []*Section - -func (s Sections) GoString() string { - var strs []string - for _, ss := range s { - strs = append(strs, fmt.Sprintf("%#v", ss)) - } - - return strings.Join(strs, ", ") -} - -type Subsections []*Subsection - -func (s Subsections) GoString() string { - var strs []string - for _, ss := range s { - strs = append(strs, fmt.Sprintf("%#v", ss)) - } - - return strings.Join(strs, ", ") -} - -// IsName checks if the name provided is equals to the Section name, case insensitive. -func (s *Section) IsName(name string) bool { - return strings.EqualFold(s.Name, name) -} - -// Subsection returns a Subsection from the specified Section. If the -// Subsection does not exists, new one is created and added to Section. -func (s *Section) Subsection(name string) *Subsection { - for i := len(s.Subsections) - 1; i >= 0; i-- { - ss := s.Subsections[i] - if ss.IsName(name) { - return ss - } - } - - ss := &Subsection{Name: name} - s.Subsections = append(s.Subsections, ss) - return ss -} - -// HasSubsection checks if the Section has a Subsection with the specified name. -func (s *Section) HasSubsection(name string) bool { - for _, ss := range s.Subsections { - if ss.IsName(name) { - return true - } - } - - return false -} - -// RemoveSubsection removes a subsection from a Section. -func (s *Section) RemoveSubsection(name string) *Section { - result := Subsections{} - for _, s := range s.Subsections { - if !s.IsName(name) { - result = append(result, s) - } - } - - s.Subsections = result - return s -} - -// Option return the value for the specified key. Empty string is returned if -// key does not exists. -func (s *Section) Option(key string) string { - return s.Options.Get(key) -} - -// OptionAll returns all possible values for an option with the specified key. -// If the option does not exists, an empty slice will be returned. -func (s *Section) OptionAll(key string) []string { - return s.Options.GetAll(key) -} - -// HasOption checks if the Section has an Option with the given key. -func (s *Section) HasOption(key string) bool { - return s.Options.Has(key) -} - -// AddOption adds a new Option to the Section. The updated Section is returned. -func (s *Section) AddOption(key string, value string) *Section { - s.Options = s.Options.withAddedOption(key, value) - return s -} - -// SetOption adds a new Option to the Section. If the option already exists, is replaced. -// The updated Section is returned. -func (s *Section) SetOption(key string, value string) *Section { - s.Options = s.Options.withSettedOption(key, value) - return s -} - -// Remove an option with the specified key. The updated Section is returned. -func (s *Section) RemoveOption(key string) *Section { - s.Options = s.Options.withoutOption(key) - return s -} - -// IsName checks if the name of the subsection is exactly the specified name. -func (s *Subsection) IsName(name string) bool { - return s.Name == name -} - -// Option returns an option with the specified key. If the option does not exists, -// empty spring will be returned. -func (s *Subsection) Option(key string) string { - return s.Options.Get(key) -} - -// OptionAll returns all possible values for an option with the specified key. -// If the option does not exists, an empty slice will be returned. -func (s *Subsection) OptionAll(key string) []string { - return s.Options.GetAll(key) -} - -// HasOption checks if the Subsection has an Option with the given key. -func (s *Subsection) HasOption(key string) bool { - return s.Options.Has(key) -} - -// AddOption adds a new Option to the Subsection. The updated Subsection is returned. -func (s *Subsection) AddOption(key string, value string) *Subsection { - s.Options = s.Options.withAddedOption(key, value) - return s -} - -// SetOption adds a new Option to the Subsection. If the option already exists, is replaced. -// The updated Subsection is returned. -func (s *Subsection) SetOption(key string, value ...string) *Subsection { - s.Options = s.Options.withSettedOption(key, value...) - return s -} - -// RemoveOption removes the option with the specified key. The updated Subsection is returned. -func (s *Subsection) RemoveOption(key string) *Subsection { - s.Options = s.Options.withoutOption(key) - return s -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/colorconfig.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/colorconfig.go deleted file mode 100644 index 6fd4158462d..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/colorconfig.go +++ /dev/null @@ -1,97 +0,0 @@ -package diff - -import "github.com/go-git/go-git/v5/plumbing/color" - -// A ColorKey is a key into a ColorConfig map and also equal to the key in the -// diff.color subsection of the config. See -// https://github.com/git/git/blob/v2.26.2/diff.c#L83-L106. -type ColorKey string - -// ColorKeys. -const ( - Context ColorKey = "context" - Meta ColorKey = "meta" - Frag ColorKey = "frag" - Old ColorKey = "old" - New ColorKey = "new" - Commit ColorKey = "commit" - Whitespace ColorKey = "whitespace" - Func ColorKey = "func" - OldMoved ColorKey = "oldMoved" - OldMovedAlternative ColorKey = "oldMovedAlternative" - OldMovedDimmed ColorKey = "oldMovedDimmed" - OldMovedAlternativeDimmed ColorKey = "oldMovedAlternativeDimmed" - NewMoved ColorKey = "newMoved" - NewMovedAlternative ColorKey = "newMovedAlternative" - NewMovedDimmed ColorKey = "newMovedDimmed" - NewMovedAlternativeDimmed ColorKey = "newMovedAlternativeDimmed" - ContextDimmed ColorKey = "contextDimmed" - OldDimmed ColorKey = "oldDimmed" - NewDimmed ColorKey = "newDimmed" - ContextBold ColorKey = "contextBold" - OldBold ColorKey = "oldBold" - NewBold ColorKey = "newBold" -) - -// A ColorConfig is a color configuration. A nil or empty ColorConfig -// corresponds to no color. -type ColorConfig map[ColorKey]string - -// A ColorConfigOption sets an option on a ColorConfig. -type ColorConfigOption func(ColorConfig) - -// WithColor sets the color for key. -func WithColor(key ColorKey, color string) ColorConfigOption { - return func(cc ColorConfig) { - cc[key] = color - } -} - -// defaultColorConfig is the default color configuration. See -// https://github.com/git/git/blob/v2.26.2/diff.c#L57-L81. -var defaultColorConfig = ColorConfig{ - Context: color.Normal, - Meta: color.Bold, - Frag: color.Cyan, - Old: color.Red, - New: color.Green, - Commit: color.Yellow, - Whitespace: color.BgRed, - Func: color.Normal, - OldMoved: color.BoldMagenta, - OldMovedAlternative: color.BoldBlue, - OldMovedDimmed: color.Faint, - OldMovedAlternativeDimmed: color.FaintItalic, - NewMoved: color.BoldCyan, - NewMovedAlternative: color.BoldYellow, - NewMovedDimmed: color.Faint, - NewMovedAlternativeDimmed: color.FaintItalic, - ContextDimmed: color.Faint, - OldDimmed: color.FaintRed, - NewDimmed: color.FaintGreen, - ContextBold: color.Bold, - OldBold: color.BoldRed, - NewBold: color.BoldGreen, -} - -// NewColorConfig returns a new ColorConfig. -func NewColorConfig(options ...ColorConfigOption) ColorConfig { - cc := make(ColorConfig) - for key, value := range defaultColorConfig { - cc[key] = value - } - for _, option := range options { - option(cc) - } - return cc -} - -// Reset returns the ANSI escape sequence to reset the color with key set from -// cc. If no color was set then no reset is needed so it returns the empty -// string. -func (cc ColorConfig) Reset(key ColorKey) string { - if cc[key] == "" { - return "" - } - return color.Reset -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/patch.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/patch.go deleted file mode 100644 index 39a66a1a806..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/patch.go +++ /dev/null @@ -1,58 +0,0 @@ -package diff - -import ( - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/filemode" -) - -// Operation defines the operation of a diff item. -type Operation int - -const ( - // Equal item represents a equals diff. - Equal Operation = iota - // Add item represents an insert diff. - Add - // Delete item represents a delete diff. - Delete -) - -// Patch represents a collection of steps to transform several files. -type Patch interface { - // FilePatches returns a slice of patches per file. - FilePatches() []FilePatch - // Message returns an optional message that can be at the top of the - // Patch representation. - Message() string -} - -// FilePatch represents the necessary steps to transform one file to another. -type FilePatch interface { - // IsBinary returns true if this patch is representing a binary file. - IsBinary() bool - // Files returns the from and to Files, with all the necessary metadata to - // about them. If the patch creates a new file, "from" will be nil. - // If the patch deletes a file, "to" will be nil. - Files() (from, to File) - // Chunks returns a slice of ordered changes to transform "from" File to - // "to" File. If the file is a binary one, Chunks will be empty. - Chunks() []Chunk -} - -// File contains all the file metadata necessary to print some patch formats. -type File interface { - // Hash returns the File Hash. - Hash() plumbing.Hash - // Mode returns the FileMode. - Mode() filemode.FileMode - // Path returns the complete Path to the file, including the filename. - Path() string -} - -// Chunk represents a portion of a file transformation to another. -type Chunk interface { - // Content contains the portion of the file. - Content() string - // Type contains the Operation to do with this Chunk. - Type() Operation -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/unified_encoder.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/unified_encoder.go deleted file mode 100644 index fa605b1985e..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/unified_encoder.go +++ /dev/null @@ -1,395 +0,0 @@ -package diff - -import ( - "fmt" - "io" - "regexp" - "strconv" - "strings" - - "github.com/go-git/go-git/v5/plumbing" -) - -// DefaultContextLines is the default number of context lines. -const DefaultContextLines = 3 - -var ( - splitLinesRegexp = regexp.MustCompile(`[^\n]*(\n|$)`) - - operationChar = map[Operation]byte{ - Add: '+', - Delete: '-', - Equal: ' ', - } - - operationColorKey = map[Operation]ColorKey{ - Add: New, - Delete: Old, - Equal: Context, - } -) - -// UnifiedEncoder encodes an unified diff into the provided Writer. It does not -// support similarity index for renames or sorting hash representations. -type UnifiedEncoder struct { - io.Writer - - // contextLines is the count of unchanged lines that will appear surrounding - // a change. - contextLines int - - // srcPrefix and dstPrefix are prepended to file paths when encoding a diff. - srcPrefix string - dstPrefix string - - // colorConfig is the color configuration. The default is no color. - color ColorConfig -} - -// NewUnifiedEncoder returns a new UnifiedEncoder that writes to w. -func NewUnifiedEncoder(w io.Writer, contextLines int) *UnifiedEncoder { - return &UnifiedEncoder{ - Writer: w, - srcPrefix: "a/", - dstPrefix: "b/", - contextLines: contextLines, - } -} - -// SetColor sets e's color configuration and returns e. -func (e *UnifiedEncoder) SetColor(colorConfig ColorConfig) *UnifiedEncoder { - e.color = colorConfig - return e -} - -// SetSrcPrefix sets e's srcPrefix and returns e. -func (e *UnifiedEncoder) SetSrcPrefix(prefix string) *UnifiedEncoder { - e.srcPrefix = prefix - return e -} - -// SetDstPrefix sets e's dstPrefix and returns e. -func (e *UnifiedEncoder) SetDstPrefix(prefix string) *UnifiedEncoder { - e.dstPrefix = prefix - return e -} - -// Encode encodes patch. -func (e *UnifiedEncoder) Encode(patch Patch) error { - sb := &strings.Builder{} - - if message := patch.Message(); message != "" { - sb.WriteString(message) - if !strings.HasSuffix(message, "\n") { - sb.WriteByte('\n') - } - } - - for _, filePatch := range patch.FilePatches() { - e.writeFilePatchHeader(sb, filePatch) - g := newHunksGenerator(filePatch.Chunks(), e.contextLines) - for _, hunk := range g.Generate() { - hunk.writeTo(sb, e.color) - } - } - - _, err := e.Write([]byte(sb.String())) - return err -} - -func (e *UnifiedEncoder) writeFilePatchHeader(sb *strings.Builder, filePatch FilePatch) { - from, to := filePatch.Files() - if from == nil && to == nil { - return - } - isBinary := filePatch.IsBinary() - - var lines []string - switch { - case from != nil && to != nil: - hashEquals := from.Hash() == to.Hash() - lines = append(lines, - fmt.Sprintf("diff --git %s%s %s%s", - e.srcPrefix, from.Path(), e.dstPrefix, to.Path()), - ) - if from.Mode() != to.Mode() { - lines = append(lines, - fmt.Sprintf("old mode %o", from.Mode()), - fmt.Sprintf("new mode %o", to.Mode()), - ) - } - if from.Path() != to.Path() { - lines = append(lines, - fmt.Sprintf("rename from %s", from.Path()), - fmt.Sprintf("rename to %s", to.Path()), - ) - } - if from.Mode() != to.Mode() && !hashEquals { - lines = append(lines, - fmt.Sprintf("index %s..%s", from.Hash(), to.Hash()), - ) - } else if !hashEquals { - lines = append(lines, - fmt.Sprintf("index %s..%s %o", from.Hash(), to.Hash(), from.Mode()), - ) - } - if !hashEquals { - lines = e.appendPathLines(lines, e.srcPrefix+from.Path(), e.dstPrefix+to.Path(), isBinary) - } - case from == nil: - lines = append(lines, - fmt.Sprintf("diff --git %s %s", e.srcPrefix+to.Path(), e.dstPrefix+to.Path()), - fmt.Sprintf("new file mode %o", to.Mode()), - fmt.Sprintf("index %s..%s", plumbing.ZeroHash, to.Hash()), - ) - lines = e.appendPathLines(lines, "/dev/null", e.dstPrefix+to.Path(), isBinary) - case to == nil: - lines = append(lines, - fmt.Sprintf("diff --git %s %s", e.srcPrefix+from.Path(), e.dstPrefix+from.Path()), - fmt.Sprintf("deleted file mode %o", from.Mode()), - fmt.Sprintf("index %s..%s", from.Hash(), plumbing.ZeroHash), - ) - lines = e.appendPathLines(lines, e.srcPrefix+from.Path(), "/dev/null", isBinary) - } - - sb.WriteString(e.color[Meta]) - sb.WriteString(lines[0]) - for _, line := range lines[1:] { - sb.WriteByte('\n') - sb.WriteString(line) - } - sb.WriteString(e.color.Reset(Meta)) - sb.WriteByte('\n') -} - -func (e *UnifiedEncoder) appendPathLines(lines []string, fromPath, toPath string, isBinary bool) []string { - if isBinary { - return append(lines, - fmt.Sprintf("Binary files %s and %s differ", fromPath, toPath), - ) - } - return append(lines, - fmt.Sprintf("--- %s", fromPath), - fmt.Sprintf("+++ %s", toPath), - ) -} - -type hunksGenerator struct { - fromLine, toLine int - ctxLines int - chunks []Chunk - current *hunk - hunks []*hunk - beforeContext, afterContext []string -} - -func newHunksGenerator(chunks []Chunk, ctxLines int) *hunksGenerator { - return &hunksGenerator{ - chunks: chunks, - ctxLines: ctxLines, - } -} - -func (g *hunksGenerator) Generate() []*hunk { - for i, chunk := range g.chunks { - lines := splitLines(chunk.Content()) - nLines := len(lines) - - switch chunk.Type() { - case Equal: - g.fromLine += nLines - g.toLine += nLines - g.processEqualsLines(lines, i) - case Delete: - if nLines != 0 { - g.fromLine++ - } - - g.processHunk(i, chunk.Type()) - g.fromLine += nLines - 1 - g.current.AddOp(chunk.Type(), lines...) - case Add: - if nLines != 0 { - g.toLine++ - } - g.processHunk(i, chunk.Type()) - g.toLine += nLines - 1 - g.current.AddOp(chunk.Type(), lines...) - } - - if i == len(g.chunks)-1 && g.current != nil { - g.hunks = append(g.hunks, g.current) - } - } - - return g.hunks -} - -func (g *hunksGenerator) processHunk(i int, op Operation) { - if g.current != nil { - return - } - - var ctxPrefix string - linesBefore := len(g.beforeContext) - if linesBefore > g.ctxLines { - ctxPrefix = g.beforeContext[linesBefore-g.ctxLines-1] - g.beforeContext = g.beforeContext[linesBefore-g.ctxLines:] - linesBefore = g.ctxLines - } - - g.current = &hunk{ctxPrefix: strings.TrimSuffix(ctxPrefix, "\n")} - g.current.AddOp(Equal, g.beforeContext...) - - switch op { - case Delete: - g.current.fromLine, g.current.toLine = - g.addLineNumbers(g.fromLine, g.toLine, linesBefore, i, Add) - case Add: - g.current.toLine, g.current.fromLine = - g.addLineNumbers(g.toLine, g.fromLine, linesBefore, i, Delete) - } - - g.beforeContext = nil -} - -// addLineNumbers obtains the line numbers in a new chunk. -func (g *hunksGenerator) addLineNumbers(la, lb int, linesBefore int, i int, op Operation) (cla, clb int) { - cla = la - linesBefore - // we need to search for a reference for the next diff - switch { - case linesBefore != 0 && g.ctxLines != 0: - if lb > g.ctxLines { - clb = lb - g.ctxLines + 1 - } else { - clb = 1 - } - case g.ctxLines == 0: - clb = lb - case i != len(g.chunks)-1: - next := g.chunks[i+1] - if next.Type() == op || next.Type() == Equal { - // this diff will be into this chunk - clb = lb + 1 - } - } - - return -} - -func (g *hunksGenerator) processEqualsLines(ls []string, i int) { - if g.current == nil { - g.beforeContext = append(g.beforeContext, ls...) - return - } - - g.afterContext = append(g.afterContext, ls...) - if len(g.afterContext) <= g.ctxLines*2 && i != len(g.chunks)-1 { - g.current.AddOp(Equal, g.afterContext...) - g.afterContext = nil - } else { - ctxLines := g.ctxLines - if ctxLines > len(g.afterContext) { - ctxLines = len(g.afterContext) - } - g.current.AddOp(Equal, g.afterContext[:ctxLines]...) - g.hunks = append(g.hunks, g.current) - - g.current = nil - g.beforeContext = g.afterContext[ctxLines:] - g.afterContext = nil - } -} - -func splitLines(s string) []string { - out := splitLinesRegexp.FindAllString(s, -1) - if out[len(out)-1] == "" { - out = out[:len(out)-1] - } - return out -} - -type hunk struct { - fromLine int - toLine int - - fromCount int - toCount int - - ctxPrefix string - ops []*op -} - -func (h *hunk) writeTo(sb *strings.Builder, color ColorConfig) { - sb.WriteString(color[Frag]) - sb.WriteString("@@ -") - - if h.fromCount == 1 { - sb.WriteString(strconv.Itoa(h.fromLine)) - } else { - sb.WriteString(strconv.Itoa(h.fromLine)) - sb.WriteByte(',') - sb.WriteString(strconv.Itoa(h.fromCount)) - } - - sb.WriteString(" +") - - if h.toCount == 1 { - sb.WriteString(strconv.Itoa(h.toLine)) - } else { - sb.WriteString(strconv.Itoa(h.toLine)) - sb.WriteByte(',') - sb.WriteString(strconv.Itoa(h.toCount)) - } - - sb.WriteString(" @@") - sb.WriteString(color.Reset(Frag)) - - if h.ctxPrefix != "" { - sb.WriteByte(' ') - sb.WriteString(color[Func]) - sb.WriteString(h.ctxPrefix) - sb.WriteString(color.Reset(Func)) - } - - sb.WriteByte('\n') - - for _, op := range h.ops { - op.writeTo(sb, color) - } -} - -func (h *hunk) AddOp(t Operation, ss ...string) { - n := len(ss) - switch t { - case Add: - h.toCount += n - case Delete: - h.fromCount += n - case Equal: - h.toCount += n - h.fromCount += n - } - - for _, s := range ss { - h.ops = append(h.ops, &op{s, t}) - } -} - -type op struct { - text string - t Operation -} - -func (o *op) writeTo(sb *strings.Builder, color ColorConfig) { - colorKey := operationColorKey[o.t] - sb.WriteString(color[colorKey]) - sb.WriteByte(operationChar[o.t]) - if strings.HasSuffix(o.text, "\n") { - sb.WriteString(strings.TrimSuffix(o.text, "\n")) - } else { - sb.WriteString(o.text + "\n\\ No newline at end of file") - } - sb.WriteString(color.Reset(colorKey)) - sb.WriteByte('\n') -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/dir.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/dir.go deleted file mode 100644 index 7cea50cd8fb..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/dir.go +++ /dev/null @@ -1,135 +0,0 @@ -package gitignore - -import ( - "bufio" - "bytes" - "io/ioutil" - "os" - "strings" - - "github.com/go-git/go-billy/v5" - "github.com/go-git/go-git/v5/plumbing/format/config" - gioutil "github.com/go-git/go-git/v5/utils/ioutil" -) - -const ( - commentPrefix = "#" - coreSection = "core" - excludesfile = "excludesfile" - gitDir = ".git" - gitignoreFile = ".gitignore" - gitconfigFile = ".gitconfig" - systemFile = "/etc/gitconfig" -) - -// readIgnoreFile reads a specific git ignore file. -func readIgnoreFile(fs billy.Filesystem, path []string, ignoreFile string) (ps []Pattern, err error) { - f, err := fs.Open(fs.Join(append(path, ignoreFile)...)) - if err == nil { - defer f.Close() - - scanner := bufio.NewScanner(f) - for scanner.Scan() { - s := scanner.Text() - if !strings.HasPrefix(s, commentPrefix) && len(strings.TrimSpace(s)) > 0 { - ps = append(ps, ParsePattern(s, path)) - } - } - } else if !os.IsNotExist(err) { - return nil, err - } - - return -} - -// ReadPatterns reads gitignore patterns recursively traversing through the directory -// structure. The result is in the ascending order of priority (last higher). -func ReadPatterns(fs billy.Filesystem, path []string) (ps []Pattern, err error) { - ps, _ = readIgnoreFile(fs, path, gitignoreFile) - - var fis []os.FileInfo - fis, err = fs.ReadDir(fs.Join(path...)) - if err != nil { - return - } - - for _, fi := range fis { - if fi.IsDir() && fi.Name() != gitDir { - var subps []Pattern - subps, err = ReadPatterns(fs, append(path, fi.Name())) - if err != nil { - return - } - - if len(subps) > 0 { - ps = append(ps, subps...) - } - } - } - - return -} - -func loadPatterns(fs billy.Filesystem, path string) (ps []Pattern, err error) { - f, err := fs.Open(path) - if err != nil { - if os.IsNotExist(err) { - return nil, nil - } - return nil, err - } - - defer gioutil.CheckClose(f, &err) - - b, err := ioutil.ReadAll(f) - if err != nil { - return - } - - d := config.NewDecoder(bytes.NewBuffer(b)) - - raw := config.New() - if err = d.Decode(raw); err != nil { - return - } - - s := raw.Section(coreSection) - efo := s.Options.Get(excludesfile) - if efo == "" { - return nil, nil - } - - ps, err = readIgnoreFile(fs, nil, efo) - if os.IsNotExist(err) { - return nil, nil - } - - return -} - -// LoadGlobalPatterns loads gitignore patterns from from the gitignore file -// declared in a user's ~/.gitconfig file. If the ~/.gitconfig file does not -// exist the function will return nil. If the core.excludesfile property -// is not declared, the function will return nil. If the file pointed to by -// the core.excludesfile property does not exist, the function will return nil. -// -// The function assumes fs is rooted at the root filesystem. -func LoadGlobalPatterns(fs billy.Filesystem) (ps []Pattern, err error) { - home, err := os.UserHomeDir() - if err != nil { - return - } - - return loadPatterns(fs, fs.Join(home, gitconfigFile)) -} - -// LoadSystemPatterns loads gitignore patterns from from the gitignore file -// declared in a system's /etc/gitconfig file. If the /etc/gitconfig file does -// not exist the function will return nil. If the core.excludesfile property -// is not declared, the function will return nil. If the file pointed to by -// the core.excludesfile property does not exist, the function will return nil. -// -// The function assumes fs is rooted at the root filesystem. -func LoadSystemPatterns(fs billy.Filesystem) (ps []Pattern, err error) { - return loadPatterns(fs, systemFile) -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/doc.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/doc.go deleted file mode 100644 index eecd4baccb2..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/doc.go +++ /dev/null @@ -1,70 +0,0 @@ -// Package gitignore implements matching file system paths to gitignore patterns that -// can be automatically read from a git repository tree in the order of definition -// priorities. It support all pattern formats as specified in the original gitignore -// documentation, copied below: -// -// Pattern format -// ============== -// -// - A blank line matches no files, so it can serve as a separator for readability. -// -// - A line starting with # serves as a comment. Put a backslash ("\") in front of -// the first hash for patterns that begin with a hash. -// -// - Trailing spaces are ignored unless they are quoted with backslash ("\"). -// -// - An optional prefix "!" which negates the pattern; any matching file excluded -// by a previous pattern will become included again. It is not possible to -// re-include a file if a parent directory of that file is excluded. -// Git doesn’t list excluded directories for performance reasons, so -// any patterns on contained files have no effect, no matter where they are -// defined. Put a backslash ("\") in front of the first "!" for patterns -// that begin with a literal "!", for example, "\!important!.txt". -// -// - If the pattern ends with a slash, it is removed for the purpose of the -// following description, but it would only find a match with a directory. -// In other words, foo/ will match a directory foo and paths underneath it, -// but will not match a regular file or a symbolic link foo (this is consistent -// with the way how pathspec works in general in Git). -// -// - If the pattern does not contain a slash /, Git treats it as a shell glob -// pattern and checks for a match against the pathname relative to the location -// of the .gitignore file (relative to the toplevel of the work tree if not -// from a .gitignore file). -// -// - Otherwise, Git treats the pattern as a shell glob suitable for consumption -// by fnmatch(3) with the FNM_PATHNAME flag: wildcards in the pattern will -// not match a / in the pathname. For example, "Documentation/*.html" matches -// "Documentation/git.html" but not "Documentation/ppc/ppc.html" or -// "tools/perf/Documentation/perf.html". -// -// - A leading slash matches the beginning of the pathname. For example, -// "/*.c" matches "cat-file.c" but not "mozilla-sha1/sha1.c". -// -// Two consecutive asterisks ("**") in patterns matched against full pathname -// may have special meaning: -// -// - A leading "**" followed by a slash means match in all directories. -// For example, "**/foo" matches file or directory "foo" anywhere, the same as -// pattern "foo". "**/foo/bar" matches file or directory "bar" -// anywhere that is directly under directory "foo". -// -// - A trailing "/**" matches everything inside. For example, "abc/**" matches -// all files inside directory "abc", relative to the location of the -// .gitignore file, with infinite depth. -// -// - A slash followed by two consecutive asterisks then a slash matches -// zero or more directories. For example, "a/**/b" matches "a/b", "a/x/b", -// "a/x/y/b" and so on. -// -// - Other consecutive asterisks are considered invalid. -// -// Copyright and license -// ===================== -// -// Copyright (c) Oleg Sklyar, Silvertern and source{d} -// -// The package code was donated to source{d} to include, modify and develop -// further as a part of the `go-git` project, release it on the license of -// the whole project or delete it from the project. -package gitignore diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/matcher.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/matcher.go deleted file mode 100644 index bd1e9e2d4cf..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/matcher.go +++ /dev/null @@ -1,30 +0,0 @@ -package gitignore - -// Matcher defines a global multi-pattern matcher for gitignore patterns -type Matcher interface { - // Match matches patterns in the order of priorities. As soon as an inclusion or - // exclusion is found, not further matching is performed. - Match(path []string, isDir bool) bool -} - -// NewMatcher constructs a new global matcher. Patterns must be given in the order of -// increasing priority. That is most generic settings files first, then the content of -// the repo .gitignore, then content of .gitignore down the path or the repo and then -// the content command line arguments. -func NewMatcher(ps []Pattern) Matcher { - return &matcher{ps} -} - -type matcher struct { - patterns []Pattern -} - -func (m *matcher) Match(path []string, isDir bool) bool { - n := len(m.patterns) - for i := n - 1; i >= 0; i-- { - if match := m.patterns[i].Match(path, isDir); match > NoMatch { - return match == Exclude - } - } - return false -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/pattern.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/pattern.go deleted file mode 100644 index 098cb502127..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/pattern.go +++ /dev/null @@ -1,153 +0,0 @@ -package gitignore - -import ( - "path/filepath" - "strings" -) - -// MatchResult defines outcomes of a match, no match, exclusion or inclusion. -type MatchResult int - -const ( - // NoMatch defines the no match outcome of a match check - NoMatch MatchResult = iota - // Exclude defines an exclusion of a file as a result of a match check - Exclude - // Include defines an explicit inclusion of a file as a result of a match check - Include -) - -const ( - inclusionPrefix = "!" - zeroToManyDirs = "**" - patternDirSep = "/" -) - -// Pattern defines a single gitignore pattern. -type Pattern interface { - // Match matches the given path to the pattern. - Match(path []string, isDir bool) MatchResult -} - -type pattern struct { - domain []string - pattern []string - inclusion bool - dirOnly bool - isGlob bool -} - -// ParsePattern parses a gitignore pattern string into the Pattern structure. -func ParsePattern(p string, domain []string) Pattern { - res := pattern{domain: domain} - - if strings.HasPrefix(p, inclusionPrefix) { - res.inclusion = true - p = p[1:] - } - - if !strings.HasSuffix(p, "\\ ") { - p = strings.TrimRight(p, " ") - } - - if strings.HasSuffix(p, patternDirSep) { - res.dirOnly = true - p = p[:len(p)-1] - } - - if strings.Contains(p, patternDirSep) { - res.isGlob = true - } - - res.pattern = strings.Split(p, patternDirSep) - return &res -} - -func (p *pattern) Match(path []string, isDir bool) MatchResult { - if len(path) <= len(p.domain) { - return NoMatch - } - for i, e := range p.domain { - if path[i] != e { - return NoMatch - } - } - - path = path[len(p.domain):] - if p.isGlob && !p.globMatch(path, isDir) { - return NoMatch - } else if !p.isGlob && !p.simpleNameMatch(path, isDir) { - return NoMatch - } - - if p.inclusion { - return Include - } else { - return Exclude - } -} - -func (p *pattern) simpleNameMatch(path []string, isDir bool) bool { - for i, name := range path { - if match, err := filepath.Match(p.pattern[0], name); err != nil { - return false - } else if !match { - continue - } - if p.dirOnly && !isDir && i == len(path)-1 { - return false - } - return true - } - return false -} - -func (p *pattern) globMatch(path []string, isDir bool) bool { - matched := false - canTraverse := false - for i, pattern := range p.pattern { - if pattern == "" { - canTraverse = false - continue - } - if pattern == zeroToManyDirs { - if i == len(p.pattern)-1 { - break - } - canTraverse = true - continue - } - if strings.Contains(pattern, zeroToManyDirs) { - return false - } - if len(path) == 0 { - return false - } - if canTraverse { - canTraverse = false - for len(path) > 0 { - e := path[0] - path = path[1:] - if match, err := filepath.Match(pattern, e); err != nil { - return false - } else if match { - matched = true - break - } else if len(path) == 0 { - // if nothing left then fail - matched = false - } - } - } else { - if match, err := filepath.Match(pattern, path[0]); err != nil || !match { - return false - } - matched = true - path = path[1:] - } - } - if matched && p.dirOnly && !isDir && len(path) == 0 { - matched = false - } - return matched -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/decoder.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/decoder.go deleted file mode 100644 index 7768bd6505d..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/decoder.go +++ /dev/null @@ -1,177 +0,0 @@ -package idxfile - -import ( - "bufio" - "bytes" - "errors" - "io" - - "github.com/go-git/go-git/v5/utils/binary" -) - -var ( - // ErrUnsupportedVersion is returned by Decode when the idx file version - // is not supported. - ErrUnsupportedVersion = errors.New("Unsupported version") - // ErrMalformedIdxFile is returned by Decode when the idx file is corrupted. - ErrMalformedIdxFile = errors.New("Malformed IDX file") -) - -const ( - fanout = 256 - objectIDLength = 20 -) - -// Decoder reads and decodes idx files from an input stream. -type Decoder struct { - *bufio.Reader -} - -// NewDecoder builds a new idx stream decoder, that reads from r. -func NewDecoder(r io.Reader) *Decoder { - return &Decoder{bufio.NewReader(r)} -} - -// Decode reads from the stream and decode the content into the MemoryIndex struct. -func (d *Decoder) Decode(idx *MemoryIndex) error { - if err := validateHeader(d); err != nil { - return err - } - - flow := []func(*MemoryIndex, io.Reader) error{ - readVersion, - readFanout, - readObjectNames, - readCRC32, - readOffsets, - readChecksums, - } - - for _, f := range flow { - if err := f(idx, d); err != nil { - return err - } - } - - return nil -} - -func validateHeader(r io.Reader) error { - var h = make([]byte, 4) - if _, err := io.ReadFull(r, h); err != nil { - return err - } - - if !bytes.Equal(h, idxHeader) { - return ErrMalformedIdxFile - } - - return nil -} - -func readVersion(idx *MemoryIndex, r io.Reader) error { - v, err := binary.ReadUint32(r) - if err != nil { - return err - } - - if v > VersionSupported { - return ErrUnsupportedVersion - } - - idx.Version = v - return nil -} - -func readFanout(idx *MemoryIndex, r io.Reader) error { - for k := 0; k < fanout; k++ { - n, err := binary.ReadUint32(r) - if err != nil { - return err - } - - idx.Fanout[k] = n - idx.FanoutMapping[k] = noMapping - } - - return nil -} - -func readObjectNames(idx *MemoryIndex, r io.Reader) error { - for k := 0; k < fanout; k++ { - var buckets uint32 - if k == 0 { - buckets = idx.Fanout[k] - } else { - buckets = idx.Fanout[k] - idx.Fanout[k-1] - } - - if buckets == 0 { - continue - } - - idx.FanoutMapping[k] = len(idx.Names) - - nameLen := int(buckets * objectIDLength) - bin := make([]byte, nameLen) - if _, err := io.ReadFull(r, bin); err != nil { - return err - } - - idx.Names = append(idx.Names, bin) - idx.Offset32 = append(idx.Offset32, make([]byte, buckets*4)) - idx.CRC32 = append(idx.CRC32, make([]byte, buckets*4)) - } - - return nil -} - -func readCRC32(idx *MemoryIndex, r io.Reader) error { - for k := 0; k < fanout; k++ { - if pos := idx.FanoutMapping[k]; pos != noMapping { - if _, err := io.ReadFull(r, idx.CRC32[pos]); err != nil { - return err - } - } - } - - return nil -} - -func readOffsets(idx *MemoryIndex, r io.Reader) error { - var o64cnt int - for k := 0; k < fanout; k++ { - if pos := idx.FanoutMapping[k]; pos != noMapping { - if _, err := io.ReadFull(r, idx.Offset32[pos]); err != nil { - return err - } - - for p := 0; p < len(idx.Offset32[pos]); p += 4 { - if idx.Offset32[pos][p]&(byte(1)<<7) > 0 { - o64cnt++ - } - } - } - } - - if o64cnt > 0 { - idx.Offset64 = make([]byte, o64cnt*8) - if _, err := io.ReadFull(r, idx.Offset64); err != nil { - return err - } - } - - return nil -} - -func readChecksums(idx *MemoryIndex, r io.Reader) error { - if _, err := io.ReadFull(r, idx.PackfileChecksum[:]); err != nil { - return err - } - - if _, err := io.ReadFull(r, idx.IdxChecksum[:]); err != nil { - return err - } - - return nil -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/doc.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/doc.go deleted file mode 100644 index 1e628ab4a5e..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/doc.go +++ /dev/null @@ -1,128 +0,0 @@ -// Package idxfile implements encoding and decoding of packfile idx files. -// -// == Original (version 1) pack-*.idx files have the following format: -// -// - The header consists of 256 4-byte network byte order -// integers. N-th entry of this table records the number of -// objects in the corresponding pack, the first byte of whose -// object name is less than or equal to N. This is called the -// 'first-level fan-out' table. -// -// - The header is followed by sorted 24-byte entries, one entry -// per object in the pack. Each entry is: -// -// 4-byte network byte order integer, recording where the -// object is stored in the packfile as the offset from the -// beginning. -// -// 20-byte object name. -// -// - The file is concluded with a trailer: -// -// A copy of the 20-byte SHA1 checksum at the end of -// corresponding packfile. -// -// 20-byte SHA1-checksum of all of the above. -// -// Pack Idx file: -// -// -- +--------------------------------+ -// fanout | fanout[0] = 2 (for example) |-. -// table +--------------------------------+ | -// | fanout[1] | | -// +--------------------------------+ | -// | fanout[2] | | -// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | -// | fanout[255] = total objects |---. -// -- +--------------------------------+ | | -// main | offset | | | -// index | object name 00XXXXXXXXXXXXXXXX | | | -// tab +--------------------------------+ | | -// | offset | | | -// | object name 00XXXXXXXXXXXXXXXX | | | -// +--------------------------------+<+ | -// .-| offset | | -// | | object name 01XXXXXXXXXXXXXXXX | | -// | +--------------------------------+ | -// | | offset | | -// | | object name 01XXXXXXXXXXXXXXXX | | -// | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | -// | | offset | | -// | | object name FFXXXXXXXXXXXXXXXX | | -// --| +--------------------------------+<--+ -// trailer | | packfile checksum | -// | +--------------------------------+ -// | | idxfile checksum | -// | +--------------------------------+ -// .---------. -// | -// Pack file entry: <+ -// -// packed object header: -// 1-byte size extension bit (MSB) -// type (next 3 bit) -// size0 (lower 4-bit) -// n-byte sizeN (as long as MSB is set, each 7-bit) -// size0..sizeN form 4+7+7+..+7 bit integer, size0 -// is the least significant part, and sizeN is the -// most significant part. -// packed object data: -// If it is not DELTA, then deflated bytes (the size above -// is the size before compression). -// If it is REF_DELTA, then -// 20-byte base object name SHA1 (the size above is the -// size of the delta data that follows). -// delta data, deflated. -// If it is OFS_DELTA, then -// n-byte offset (see below) interpreted as a negative -// offset from the type-byte of the header of the -// ofs-delta entry (the size above is the size of -// the delta data that follows). -// delta data, deflated. -// -// offset encoding: -// n bytes with MSB set in all but the last one. -// The offset is then the number constructed by -// concatenating the lower 7 bit of each byte, and -// for n >= 2 adding 2^7 + 2^14 + ... + 2^(7*(n-1)) -// to the result. -// -// == Version 2 pack-*.idx files support packs larger than 4 GiB, and -// have some other reorganizations. They have the format: -// -// - A 4-byte magic number '\377tOc' which is an unreasonable -// fanout[0] value. -// -// - A 4-byte version number (= 2) -// -// - A 256-entry fan-out table just like v1. -// -// - A table of sorted 20-byte SHA1 object names. These are -// packed together without offset values to reduce the cache -// footprint of the binary search for a specific object name. -// -// - A table of 4-byte CRC32 values of the packed object data. -// This is new in v2 so compressed data can be copied directly -// from pack to pack during repacking without undetected -// data corruption. -// -// - A table of 4-byte offset values (in network byte order). -// These are usually 31-bit pack file offsets, but large -// offsets are encoded as an index into the next table with -// the msbit set. -// -// - A table of 8-byte offset entries (empty for pack files less -// than 2 GiB). Pack files are organized with heavily used -// objects toward the front, so most object references should -// not need to refer to this table. -// -// - The same trailer as a v1 pack file: -// -// A copy of the 20-byte SHA1 checksum at the end of -// corresponding packfile. -// -// 20-byte SHA1-checksum of all of the above. -// -// Source: -// https://www.kernel.org/pub/software/scm/git/docs/v1.7.5/technical/pack-format.txt -package idxfile diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/encoder.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/encoder.go deleted file mode 100644 index 26b2e4d6b57..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/encoder.go +++ /dev/null @@ -1,142 +0,0 @@ -package idxfile - -import ( - "crypto/sha1" - "hash" - "io" - - "github.com/go-git/go-git/v5/utils/binary" -) - -// Encoder writes MemoryIndex structs to an output stream. -type Encoder struct { - io.Writer - hash hash.Hash -} - -// NewEncoder returns a new stream encoder that writes to w. -func NewEncoder(w io.Writer) *Encoder { - h := sha1.New() - mw := io.MultiWriter(w, h) - return &Encoder{mw, h} -} - -// Encode encodes an MemoryIndex to the encoder writer. -func (e *Encoder) Encode(idx *MemoryIndex) (int, error) { - flow := []func(*MemoryIndex) (int, error){ - e.encodeHeader, - e.encodeFanout, - e.encodeHashes, - e.encodeCRC32, - e.encodeOffsets, - e.encodeChecksums, - } - - sz := 0 - for _, f := range flow { - i, err := f(idx) - sz += i - - if err != nil { - return sz, err - } - } - - return sz, nil -} - -func (e *Encoder) encodeHeader(idx *MemoryIndex) (int, error) { - c, err := e.Write(idxHeader) - if err != nil { - return c, err - } - - return c + 4, binary.WriteUint32(e, idx.Version) -} - -func (e *Encoder) encodeFanout(idx *MemoryIndex) (int, error) { - for _, c := range idx.Fanout { - if err := binary.WriteUint32(e, c); err != nil { - return 0, err - } - } - - return fanout * 4, nil -} - -func (e *Encoder) encodeHashes(idx *MemoryIndex) (int, error) { - var size int - for k := 0; k < fanout; k++ { - pos := idx.FanoutMapping[k] - if pos == noMapping { - continue - } - - n, err := e.Write(idx.Names[pos]) - if err != nil { - return size, err - } - size += n - } - return size, nil -} - -func (e *Encoder) encodeCRC32(idx *MemoryIndex) (int, error) { - var size int - for k := 0; k < fanout; k++ { - pos := idx.FanoutMapping[k] - if pos == noMapping { - continue - } - - n, err := e.Write(idx.CRC32[pos]) - if err != nil { - return size, err - } - - size += n - } - - return size, nil -} - -func (e *Encoder) encodeOffsets(idx *MemoryIndex) (int, error) { - var size int - for k := 0; k < fanout; k++ { - pos := idx.FanoutMapping[k] - if pos == noMapping { - continue - } - - n, err := e.Write(idx.Offset32[pos]) - if err != nil { - return size, err - } - - size += n - } - - if len(idx.Offset64) > 0 { - n, err := e.Write(idx.Offset64) - if err != nil { - return size, err - } - - size += n - } - - return size, nil -} - -func (e *Encoder) encodeChecksums(idx *MemoryIndex) (int, error) { - if _, err := e.Write(idx.PackfileChecksum[:]); err != nil { - return 0, err - } - - copy(idx.IdxChecksum[:], e.hash.Sum(nil)[:20]) - if _, err := e.Write(idx.IdxChecksum[:]); err != nil { - return 0, err - } - - return 40, nil -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/idxfile.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/idxfile.go deleted file mode 100644 index 64dd8dcef4a..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/idxfile.go +++ /dev/null @@ -1,346 +0,0 @@ -package idxfile - -import ( - "bytes" - "io" - "sort" - - encbin "encoding/binary" - - "github.com/go-git/go-git/v5/plumbing" -) - -const ( - // VersionSupported is the only idx version supported. - VersionSupported = 2 - - noMapping = -1 -) - -var ( - idxHeader = []byte{255, 't', 'O', 'c'} -) - -// Index represents an index of a packfile. -type Index interface { - // Contains checks whether the given hash is in the index. - Contains(h plumbing.Hash) (bool, error) - // FindOffset finds the offset in the packfile for the object with - // the given hash. - FindOffset(h plumbing.Hash) (int64, error) - // FindCRC32 finds the CRC32 of the object with the given hash. - FindCRC32(h plumbing.Hash) (uint32, error) - // FindHash finds the hash for the object with the given offset. - FindHash(o int64) (plumbing.Hash, error) - // Count returns the number of entries in the index. - Count() (int64, error) - // Entries returns an iterator to retrieve all index entries. - Entries() (EntryIter, error) - // EntriesByOffset returns an iterator to retrieve all index entries ordered - // by offset. - EntriesByOffset() (EntryIter, error) -} - -// MemoryIndex is the in memory representation of an idx file. -type MemoryIndex struct { - Version uint32 - Fanout [256]uint32 - // FanoutMapping maps the position in the fanout table to the position - // in the Names, Offset32 and CRC32 slices. This improves the memory - // usage by not needing an array with unnecessary empty slots. - FanoutMapping [256]int - Names [][]byte - Offset32 [][]byte - CRC32 [][]byte - Offset64 []byte - PackfileChecksum [20]byte - IdxChecksum [20]byte - - offsetHash map[int64]plumbing.Hash - offsetHashIsFull bool -} - -var _ Index = (*MemoryIndex)(nil) - -// NewMemoryIndex returns an instance of a new MemoryIndex. -func NewMemoryIndex() *MemoryIndex { - return &MemoryIndex{} -} - -func (idx *MemoryIndex) findHashIndex(h plumbing.Hash) (int, bool) { - k := idx.FanoutMapping[h[0]] - if k == noMapping { - return 0, false - } - - if len(idx.Names) <= k { - return 0, false - } - - data := idx.Names[k] - high := uint64(len(idx.Offset32[k])) >> 2 - if high == 0 { - return 0, false - } - - low := uint64(0) - for { - mid := (low + high) >> 1 - offset := mid * objectIDLength - - cmp := bytes.Compare(h[:], data[offset:offset+objectIDLength]) - if cmp < 0 { - high = mid - } else if cmp == 0 { - return int(mid), true - } else { - low = mid + 1 - } - - if low >= high { - break - } - } - - return 0, false -} - -// Contains implements the Index interface. -func (idx *MemoryIndex) Contains(h plumbing.Hash) (bool, error) { - _, ok := idx.findHashIndex(h) - return ok, nil -} - -// FindOffset implements the Index interface. -func (idx *MemoryIndex) FindOffset(h plumbing.Hash) (int64, error) { - if len(idx.FanoutMapping) <= int(h[0]) { - return 0, plumbing.ErrObjectNotFound - } - - k := idx.FanoutMapping[h[0]] - i, ok := idx.findHashIndex(h) - if !ok { - return 0, plumbing.ErrObjectNotFound - } - - offset := idx.getOffset(k, i) - - if !idx.offsetHashIsFull { - // Save the offset for reverse lookup - if idx.offsetHash == nil { - idx.offsetHash = make(map[int64]plumbing.Hash) - } - idx.offsetHash[int64(offset)] = h - } - - return int64(offset), nil -} - -const isO64Mask = uint64(1) << 31 - -func (idx *MemoryIndex) getOffset(firstLevel, secondLevel int) uint64 { - offset := secondLevel << 2 - ofs := encbin.BigEndian.Uint32(idx.Offset32[firstLevel][offset : offset+4]) - - if (uint64(ofs) & isO64Mask) != 0 { - offset := 8 * (uint64(ofs) & ^isO64Mask) - n := encbin.BigEndian.Uint64(idx.Offset64[offset : offset+8]) - return n - } - - return uint64(ofs) -} - -// FindCRC32 implements the Index interface. -func (idx *MemoryIndex) FindCRC32(h plumbing.Hash) (uint32, error) { - k := idx.FanoutMapping[h[0]] - i, ok := idx.findHashIndex(h) - if !ok { - return 0, plumbing.ErrObjectNotFound - } - - return idx.getCRC32(k, i), nil -} - -func (idx *MemoryIndex) getCRC32(firstLevel, secondLevel int) uint32 { - offset := secondLevel << 2 - return encbin.BigEndian.Uint32(idx.CRC32[firstLevel][offset : offset+4]) -} - -// FindHash implements the Index interface. -func (idx *MemoryIndex) FindHash(o int64) (plumbing.Hash, error) { - var hash plumbing.Hash - var ok bool - - if idx.offsetHash != nil { - if hash, ok = idx.offsetHash[o]; ok { - return hash, nil - } - } - - // Lazily generate the reverse offset/hash map if required. - if !idx.offsetHashIsFull || idx.offsetHash == nil { - if err := idx.genOffsetHash(); err != nil { - return plumbing.ZeroHash, err - } - - hash, ok = idx.offsetHash[o] - } - - if !ok { - return plumbing.ZeroHash, plumbing.ErrObjectNotFound - } - - return hash, nil -} - -// genOffsetHash generates the offset/hash mapping for reverse search. -func (idx *MemoryIndex) genOffsetHash() error { - count, err := idx.Count() - if err != nil { - return err - } - - idx.offsetHash = make(map[int64]plumbing.Hash, count) - idx.offsetHashIsFull = true - - var hash plumbing.Hash - i := uint32(0) - for firstLevel, fanoutValue := range idx.Fanout { - mappedFirstLevel := idx.FanoutMapping[firstLevel] - for secondLevel := uint32(0); i < fanoutValue; i++ { - copy(hash[:], idx.Names[mappedFirstLevel][secondLevel*objectIDLength:]) - offset := int64(idx.getOffset(mappedFirstLevel, int(secondLevel))) - idx.offsetHash[offset] = hash - secondLevel++ - } - } - - return nil -} - -// Count implements the Index interface. -func (idx *MemoryIndex) Count() (int64, error) { - return int64(idx.Fanout[fanout-1]), nil -} - -// Entries implements the Index interface. -func (idx *MemoryIndex) Entries() (EntryIter, error) { - return &idxfileEntryIter{idx, 0, 0, 0}, nil -} - -// EntriesByOffset implements the Index interface. -func (idx *MemoryIndex) EntriesByOffset() (EntryIter, error) { - count, err := idx.Count() - if err != nil { - return nil, err - } - - iter := &idxfileEntryOffsetIter{ - entries: make(entriesByOffset, count), - } - - entries, err := idx.Entries() - if err != nil { - return nil, err - } - - for pos := 0; int64(pos) < count; pos++ { - entry, err := entries.Next() - if err != nil { - return nil, err - } - - iter.entries[pos] = entry - } - - sort.Sort(iter.entries) - - return iter, nil -} - -// EntryIter is an iterator that will return the entries in a packfile index. -type EntryIter interface { - // Next returns the next entry in the packfile index. - Next() (*Entry, error) - // Close closes the iterator. - Close() error -} - -type idxfileEntryIter struct { - idx *MemoryIndex - total int - firstLevel, secondLevel int -} - -func (i *idxfileEntryIter) Next() (*Entry, error) { - for { - if i.firstLevel >= fanout { - return nil, io.EOF - } - - if i.total >= int(i.idx.Fanout[i.firstLevel]) { - i.firstLevel++ - i.secondLevel = 0 - continue - } - - mappedFirstLevel := i.idx.FanoutMapping[i.firstLevel] - entry := new(Entry) - copy(entry.Hash[:], i.idx.Names[mappedFirstLevel][i.secondLevel*objectIDLength:]) - entry.Offset = i.idx.getOffset(mappedFirstLevel, i.secondLevel) - entry.CRC32 = i.idx.getCRC32(mappedFirstLevel, i.secondLevel) - - i.secondLevel++ - i.total++ - - return entry, nil - } -} - -func (i *idxfileEntryIter) Close() error { - i.firstLevel = fanout - return nil -} - -// Entry is the in memory representation of an object entry in the idx file. -type Entry struct { - Hash plumbing.Hash - CRC32 uint32 - Offset uint64 -} - -type idxfileEntryOffsetIter struct { - entries entriesByOffset - pos int -} - -func (i *idxfileEntryOffsetIter) Next() (*Entry, error) { - if i.pos >= len(i.entries) { - return nil, io.EOF - } - - entry := i.entries[i.pos] - i.pos++ - - return entry, nil -} - -func (i *idxfileEntryOffsetIter) Close() error { - i.pos = len(i.entries) + 1 - return nil -} - -type entriesByOffset []*Entry - -func (o entriesByOffset) Len() int { - return len(o) -} - -func (o entriesByOffset) Less(i int, j int) bool { - return o[i].Offset < o[j].Offset -} - -func (o entriesByOffset) Swap(i int, j int) { - o[i], o[j] = o[j], o[i] -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/writer.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/writer.go deleted file mode 100644 index daa160502ae..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/writer.go +++ /dev/null @@ -1,186 +0,0 @@ -package idxfile - -import ( - "bytes" - "fmt" - "math" - "sort" - "sync" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/utils/binary" -) - -// objects implements sort.Interface and uses hash as sorting key. -type objects []Entry - -// Writer implements a packfile Observer interface and is used to generate -// indexes. -type Writer struct { - m sync.Mutex - - count uint32 - checksum plumbing.Hash - objects objects - offset64 uint32 - finished bool - index *MemoryIndex - added map[plumbing.Hash]struct{} -} - -// Index returns a previously created MemoryIndex or creates a new one if -// needed. -func (w *Writer) Index() (*MemoryIndex, error) { - w.m.Lock() - defer w.m.Unlock() - - if w.index == nil { - return w.createIndex() - } - - return w.index, nil -} - -// Add appends new object data. -func (w *Writer) Add(h plumbing.Hash, pos uint64, crc uint32) { - w.m.Lock() - defer w.m.Unlock() - - if w.added == nil { - w.added = make(map[plumbing.Hash]struct{}) - } - - if _, ok := w.added[h]; !ok { - w.added[h] = struct{}{} - w.objects = append(w.objects, Entry{h, crc, pos}) - } - -} - -func (w *Writer) Finished() bool { - return w.finished -} - -// OnHeader implements packfile.Observer interface. -func (w *Writer) OnHeader(count uint32) error { - w.count = count - w.objects = make(objects, 0, count) - return nil -} - -// OnInflatedObjectHeader implements packfile.Observer interface. -func (w *Writer) OnInflatedObjectHeader(t plumbing.ObjectType, objSize int64, pos int64) error { - return nil -} - -// OnInflatedObjectContent implements packfile.Observer interface. -func (w *Writer) OnInflatedObjectContent(h plumbing.Hash, pos int64, crc uint32, _ []byte) error { - w.Add(h, uint64(pos), crc) - return nil -} - -// OnFooter implements packfile.Observer interface. -func (w *Writer) OnFooter(h plumbing.Hash) error { - w.checksum = h - w.finished = true - _, err := w.createIndex() - if err != nil { - return err - } - - return nil -} - -// creatIndex returns a filled MemoryIndex with the information filled by -// the observer callbacks. -func (w *Writer) createIndex() (*MemoryIndex, error) { - if !w.finished { - return nil, fmt.Errorf("the index still hasn't finished building") - } - - idx := new(MemoryIndex) - w.index = idx - - sort.Sort(w.objects) - - // unmap all fans by default - for i := range idx.FanoutMapping { - idx.FanoutMapping[i] = noMapping - } - - buf := new(bytes.Buffer) - - last := -1 - bucket := -1 - for i, o := range w.objects { - fan := o.Hash[0] - - // fill the gaps between fans - for j := last + 1; j < int(fan); j++ { - idx.Fanout[j] = uint32(i) - } - - // update the number of objects for this position - idx.Fanout[fan] = uint32(i + 1) - - // we move from one bucket to another, update counters and allocate - // memory - if last != int(fan) { - bucket++ - idx.FanoutMapping[fan] = bucket - last = int(fan) - - idx.Names = append(idx.Names, make([]byte, 0)) - idx.Offset32 = append(idx.Offset32, make([]byte, 0)) - idx.CRC32 = append(idx.CRC32, make([]byte, 0)) - } - - idx.Names[bucket] = append(idx.Names[bucket], o.Hash[:]...) - - offset := o.Offset - if offset > math.MaxInt32 { - offset = w.addOffset64(offset) - } - - buf.Truncate(0) - binary.WriteUint32(buf, uint32(offset)) - idx.Offset32[bucket] = append(idx.Offset32[bucket], buf.Bytes()...) - - buf.Truncate(0) - binary.WriteUint32(buf, o.CRC32) - idx.CRC32[bucket] = append(idx.CRC32[bucket], buf.Bytes()...) - } - - for j := last + 1; j < 256; j++ { - idx.Fanout[j] = uint32(len(w.objects)) - } - - idx.Version = VersionSupported - idx.PackfileChecksum = w.checksum - - return idx, nil -} - -func (w *Writer) addOffset64(pos uint64) uint64 { - buf := new(bytes.Buffer) - binary.WriteUint64(buf, pos) - w.index.Offset64 = append(w.index.Offset64, buf.Bytes()...) - - index := uint64(w.offset64 | (1 << 31)) - w.offset64++ - - return index -} - -func (o objects) Len() int { - return len(o) -} - -func (o objects) Less(i int, j int) bool { - cmp := bytes.Compare(o[i].Hash[:], o[j].Hash[:]) - return cmp < 0 -} - -func (o objects) Swap(i int, j int) { - o[i], o[j] = o[j], o[i] -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/index/decoder.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/index/decoder.go deleted file mode 100644 index 036b6365e6c..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/index/decoder.go +++ /dev/null @@ -1,479 +0,0 @@ -package index - -import ( - "bufio" - "bytes" - "crypto/sha1" - "errors" - "hash" - "io" - "io/ioutil" - "strconv" - "time" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/utils/binary" -) - -var ( - // DecodeVersionSupported is the range of supported index versions - DecodeVersionSupported = struct{ Min, Max uint32 }{Min: 2, Max: 4} - - // ErrMalformedSignature is returned by Decode when the index header file is - // malformed - ErrMalformedSignature = errors.New("malformed index signature file") - // ErrInvalidChecksum is returned by Decode if the SHA1 hash mismatch with - // the read content - ErrInvalidChecksum = errors.New("invalid checksum") - - errUnknownExtension = errors.New("unknown extension") -) - -const ( - entryHeaderLength = 62 - entryExtended = 0x4000 - entryValid = 0x8000 - nameMask = 0xfff - intentToAddMask = 1 << 13 - skipWorkTreeMask = 1 << 14 -) - -// A Decoder reads and decodes index files from an input stream. -type Decoder struct { - r io.Reader - hash hash.Hash - lastEntry *Entry - - extReader *bufio.Reader -} - -// NewDecoder returns a new decoder that reads from r. -func NewDecoder(r io.Reader) *Decoder { - h := sha1.New() - return &Decoder{ - r: io.TeeReader(r, h), - hash: h, - extReader: bufio.NewReader(nil), - } -} - -// Decode reads the whole index object from its input and stores it in the -// value pointed to by idx. -func (d *Decoder) Decode(idx *Index) error { - var err error - idx.Version, err = validateHeader(d.r) - if err != nil { - return err - } - - entryCount, err := binary.ReadUint32(d.r) - if err != nil { - return err - } - - if err := d.readEntries(idx, int(entryCount)); err != nil { - return err - } - - return d.readExtensions(idx) -} - -func (d *Decoder) readEntries(idx *Index, count int) error { - for i := 0; i < count; i++ { - e, err := d.readEntry(idx) - if err != nil { - return err - } - - d.lastEntry = e - idx.Entries = append(idx.Entries, e) - } - - return nil -} - -func (d *Decoder) readEntry(idx *Index) (*Entry, error) { - e := &Entry{} - - var msec, mnsec, sec, nsec uint32 - var flags uint16 - - flow := []interface{}{ - &sec, &nsec, - &msec, &mnsec, - &e.Dev, - &e.Inode, - &e.Mode, - &e.UID, - &e.GID, - &e.Size, - &e.Hash, - &flags, - } - - if err := binary.Read(d.r, flow...); err != nil { - return nil, err - } - - read := entryHeaderLength - - if sec != 0 || nsec != 0 { - e.CreatedAt = time.Unix(int64(sec), int64(nsec)) - } - - if msec != 0 || mnsec != 0 { - e.ModifiedAt = time.Unix(int64(msec), int64(mnsec)) - } - - e.Stage = Stage(flags>>12) & 0x3 - - if flags&entryExtended != 0 { - extended, err := binary.ReadUint16(d.r) - if err != nil { - return nil, err - } - - read += 2 - e.IntentToAdd = extended&intentToAddMask != 0 - e.SkipWorktree = extended&skipWorkTreeMask != 0 - } - - if err := d.readEntryName(idx, e, flags); err != nil { - return nil, err - } - - return e, d.padEntry(idx, e, read) -} - -func (d *Decoder) readEntryName(idx *Index, e *Entry, flags uint16) error { - var name string - var err error - - switch idx.Version { - case 2, 3: - len := flags & nameMask - name, err = d.doReadEntryName(len) - case 4: - name, err = d.doReadEntryNameV4() - default: - return ErrUnsupportedVersion - } - - if err != nil { - return err - } - - e.Name = name - return nil -} - -func (d *Decoder) doReadEntryNameV4() (string, error) { - l, err := binary.ReadVariableWidthInt(d.r) - if err != nil { - return "", err - } - - var base string - if d.lastEntry != nil { - base = d.lastEntry.Name[:len(d.lastEntry.Name)-int(l)] - } - - name, err := binary.ReadUntil(d.r, '\x00') - if err != nil { - return "", err - } - - return base + string(name), nil -} - -func (d *Decoder) doReadEntryName(len uint16) (string, error) { - name := make([]byte, len) - _, err := io.ReadFull(d.r, name) - - return string(name), err -} - -// Index entries are padded out to the next 8 byte alignment -// for historical reasons related to how C Git read the files. -func (d *Decoder) padEntry(idx *Index, e *Entry, read int) error { - if idx.Version == 4 { - return nil - } - - entrySize := read + len(e.Name) - padLen := 8 - entrySize%8 - _, err := io.CopyN(ioutil.Discard, d.r, int64(padLen)) - return err -} - -func (d *Decoder) readExtensions(idx *Index) error { - // TODO: support 'Split index' and 'Untracked cache' extensions, take in - // count that they are not supported by jgit or libgit - - var expected []byte - var err error - - var header [4]byte - for { - expected = d.hash.Sum(nil) - - var n int - if n, err = io.ReadFull(d.r, header[:]); err != nil { - if n == 0 { - err = io.EOF - } - - break - } - - err = d.readExtension(idx, header[:]) - if err != nil { - break - } - } - - if err != errUnknownExtension { - return err - } - - return d.readChecksum(expected, header) -} - -func (d *Decoder) readExtension(idx *Index, header []byte) error { - switch { - case bytes.Equal(header, treeExtSignature): - r, err := d.getExtensionReader() - if err != nil { - return err - } - - idx.Cache = &Tree{} - d := &treeExtensionDecoder{r} - if err := d.Decode(idx.Cache); err != nil { - return err - } - case bytes.Equal(header, resolveUndoExtSignature): - r, err := d.getExtensionReader() - if err != nil { - return err - } - - idx.ResolveUndo = &ResolveUndo{} - d := &resolveUndoDecoder{r} - if err := d.Decode(idx.ResolveUndo); err != nil { - return err - } - case bytes.Equal(header, endOfIndexEntryExtSignature): - r, err := d.getExtensionReader() - if err != nil { - return err - } - - idx.EndOfIndexEntry = &EndOfIndexEntry{} - d := &endOfIndexEntryDecoder{r} - if err := d.Decode(idx.EndOfIndexEntry); err != nil { - return err - } - default: - return errUnknownExtension - } - - return nil -} - -func (d *Decoder) getExtensionReader() (*bufio.Reader, error) { - len, err := binary.ReadUint32(d.r) - if err != nil { - return nil, err - } - - d.extReader.Reset(&io.LimitedReader{R: d.r, N: int64(len)}) - return d.extReader, nil -} - -func (d *Decoder) readChecksum(expected []byte, alreadyRead [4]byte) error { - var h plumbing.Hash - copy(h[:4], alreadyRead[:]) - - if _, err := io.ReadFull(d.r, h[4:]); err != nil { - return err - } - - if !bytes.Equal(h[:], expected) { - return ErrInvalidChecksum - } - - return nil -} - -func validateHeader(r io.Reader) (version uint32, err error) { - var s = make([]byte, 4) - if _, err := io.ReadFull(r, s); err != nil { - return 0, err - } - - if !bytes.Equal(s, indexSignature) { - return 0, ErrMalformedSignature - } - - version, err = binary.ReadUint32(r) - if err != nil { - return 0, err - } - - if version < DecodeVersionSupported.Min || version > DecodeVersionSupported.Max { - return 0, ErrUnsupportedVersion - } - - return -} - -type treeExtensionDecoder struct { - r *bufio.Reader -} - -func (d *treeExtensionDecoder) Decode(t *Tree) error { - for { - e, err := d.readEntry() - if err != nil { - if err == io.EOF { - return nil - } - - return err - } - - if e == nil { - continue - } - - t.Entries = append(t.Entries, *e) - } -} - -func (d *treeExtensionDecoder) readEntry() (*TreeEntry, error) { - e := &TreeEntry{} - - path, err := binary.ReadUntil(d.r, '\x00') - if err != nil { - return nil, err - } - - e.Path = string(path) - - count, err := binary.ReadUntil(d.r, ' ') - if err != nil { - return nil, err - } - - i, err := strconv.Atoi(string(count)) - if err != nil { - return nil, err - } - - // An entry can be in an invalidated state and is represented by having a - // negative number in the entry_count field. - if i == -1 { - return nil, nil - } - - e.Entries = i - trees, err := binary.ReadUntil(d.r, '\n') - if err != nil { - return nil, err - } - - i, err = strconv.Atoi(string(trees)) - if err != nil { - return nil, err - } - - e.Trees = i - _, err = io.ReadFull(d.r, e.Hash[:]) - if err != nil { - return nil, err - } - return e, nil -} - -type resolveUndoDecoder struct { - r *bufio.Reader -} - -func (d *resolveUndoDecoder) Decode(ru *ResolveUndo) error { - for { - e, err := d.readEntry() - if err != nil { - if err == io.EOF { - return nil - } - - return err - } - - ru.Entries = append(ru.Entries, *e) - } -} - -func (d *resolveUndoDecoder) readEntry() (*ResolveUndoEntry, error) { - e := &ResolveUndoEntry{ - Stages: make(map[Stage]plumbing.Hash), - } - - path, err := binary.ReadUntil(d.r, '\x00') - if err != nil { - return nil, err - } - - e.Path = string(path) - - for i := 0; i < 3; i++ { - if err := d.readStage(e, Stage(i+1)); err != nil { - return nil, err - } - } - - for s := range e.Stages { - var hash plumbing.Hash - if _, err := io.ReadFull(d.r, hash[:]); err != nil { - return nil, err - } - - e.Stages[s] = hash - } - - return e, nil -} - -func (d *resolveUndoDecoder) readStage(e *ResolveUndoEntry, s Stage) error { - ascii, err := binary.ReadUntil(d.r, '\x00') - if err != nil { - return err - } - - stage, err := strconv.ParseInt(string(ascii), 8, 64) - if err != nil { - return err - } - - if stage != 0 { - e.Stages[s] = plumbing.ZeroHash - } - - return nil -} - -type endOfIndexEntryDecoder struct { - r *bufio.Reader -} - -func (d *endOfIndexEntryDecoder) Decode(e *EndOfIndexEntry) error { - var err error - e.Offset, err = binary.ReadUint32(d.r) - if err != nil { - return err - } - - _, err = io.ReadFull(d.r, e.Hash[:]) - return err -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/index/doc.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/index/doc.go deleted file mode 100644 index 39ae6ad5f91..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/index/doc.go +++ /dev/null @@ -1,360 +0,0 @@ -// Package index implements encoding and decoding of index format files. -// -// Git index format -// ================ -// -// == The Git index file has the following format -// -// All binary numbers are in network byte order. Version 2 is described -// here unless stated otherwise. -// -// - A 12-byte header consisting of -// -// 4-byte signature: -// The signature is { 'D', 'I', 'R', 'C' } (stands for "dircache") -// -// 4-byte version number: -// The current supported versions are 2, 3 and 4. -// -// 32-bit number of index entries. -// -// - A number of sorted index entries (see below). -// -// - Extensions -// -// Extensions are identified by signature. Optional extensions can -// be ignored if Git does not understand them. -// -// Git currently supports cached tree and resolve undo extensions. -// -// 4-byte extension signature. If the first byte is 'A'..'Z' the -// extension is optional and can be ignored. -// -// 32-bit size of the extension -// -// Extension data -// -// - 160-bit SHA-1 over the content of the index file before this -// checksum. -// -// == Index entry -// -// Index entries are sorted in ascending order on the name field, -// interpreted as a string of unsigned bytes (i.e. memcmp() order, no -// localization, no special casing of directory separator '/'). Entries -// with the same name are sorted by their stage field. -// -// 32-bit ctime seconds, the last time a file's metadata changed -// this is stat(2) data -// -// 32-bit ctime nanosecond fractions -// this is stat(2) data -// -// 32-bit mtime seconds, the last time a file's data changed -// this is stat(2) data -// -// 32-bit mtime nanosecond fractions -// this is stat(2) data -// -// 32-bit dev -// this is stat(2) data -// -// 32-bit ino -// this is stat(2) data -// -// 32-bit mode, split into (high to low bits) -// -// 4-bit object type -// valid values in binary are 1000 (regular file), 1010 (symbolic link) -// and 1110 (gitlink) -// -// 3-bit unused -// -// 9-bit unix permission. Only 0755 and 0644 are valid for regular files. -// Symbolic links and gitlinks have value 0 in this field. -// -// 32-bit uid -// this is stat(2) data -// -// 32-bit gid -// this is stat(2) data -// -// 32-bit file size -// This is the on-disk size from stat(2), truncated to 32-bit. -// -// 160-bit SHA-1 for the represented object -// -// A 16-bit 'flags' field split into (high to low bits) -// -// 1-bit assume-valid flag -// -// 1-bit extended flag (must be zero in version 2) -// -// 2-bit stage (during merge) -// -// 12-bit name length if the length is less than 0xFFF; otherwise 0xFFF -// is stored in this field. -// -// (Version 3 or later) A 16-bit field, only applicable if the -// "extended flag" above is 1, split into (high to low bits). -// -// 1-bit reserved for future -// -// 1-bit skip-worktree flag (used by sparse checkout) -// -// 1-bit intent-to-add flag (used by "git add -N") -// -// 13-bit unused, must be zero -// -// Entry path name (variable length) relative to top level directory -// (without leading slash). '/' is used as path separator. The special -// path components ".", ".." and ".git" (without quotes) are disallowed. -// Trailing slash is also disallowed. -// -// The exact encoding is undefined, but the '.' and '/' characters -// are encoded in 7-bit ASCII and the encoding cannot contain a NUL -// byte (iow, this is a UNIX pathname). -// -// (Version 4) In version 4, the entry path name is prefix-compressed -// relative to the path name for the previous entry (the very first -// entry is encoded as if the path name for the previous entry is an -// empty string). At the beginning of an entry, an integer N in the -// variable width encoding (the same encoding as the offset is encoded -// for OFS_DELTA pack entries; see pack-format.txt) is stored, followed -// by a NUL-terminated string S. Removing N bytes from the end of the -// path name for the previous entry, and replacing it with the string S -// yields the path name for this entry. -// -// 1-8 nul bytes as necessary to pad the entry to a multiple of eight bytes -// while keeping the name NUL-terminated. -// -// (Version 4) In version 4, the padding after the pathname does not -// exist. -// -// Interpretation of index entries in split index mode is completely -// different. See below for details. -// -// == Extensions -// -// === Cached tree -// -// Cached tree extension contains pre-computed hashes for trees that can -// be derived from the index. It helps speed up tree object generation -// from index for a new commit. -// -// When a path is updated in index, the path must be invalidated and -// removed from tree cache. -// -// The signature for this extension is { 'T', 'R', 'E', 'E' }. -// -// A series of entries fill the entire extension; each of which -// consists of: -// -// - NUL-terminated path component (relative to its parent directory); -// -// - ASCII decimal number of entries in the index that is covered by the -// tree this entry represents (entry_count); -// -// - A space (ASCII 32); -// -// - ASCII decimal number that represents the number of subtrees this -// tree has; -// -// - A newline (ASCII 10); and -// -// - 160-bit object name for the object that would result from writing -// this span of index as a tree. -// -// An entry can be in an invalidated state and is represented by having -// a negative number in the entry_count field. In this case, there is no -// object name and the next entry starts immediately after the newline. -// When writing an invalid entry, -1 should always be used as entry_count. -// -// The entries are written out in the top-down, depth-first order. The -// first entry represents the root level of the repository, followed by the -// first subtree--let's call this A--of the root level (with its name -// relative to the root level), followed by the first subtree of A (with -// its name relative to A), ... -// -// === Resolve undo -// -// A conflict is represented in the index as a set of higher stage entries. -// When a conflict is resolved (e.g. with "git add path"), these higher -// stage entries will be removed and a stage-0 entry with proper resolution -// is added. -// -// When these higher stage entries are removed, they are saved in the -// resolve undo extension, so that conflicts can be recreated (e.g. with -// "git checkout -m"), in case users want to redo a conflict resolution -// from scratch. -// -// The signature for this extension is { 'R', 'E', 'U', 'C' }. -// -// A series of entries fill the entire extension; each of which -// consists of: -// -// - NUL-terminated pathname the entry describes (relative to the root of -// the repository, i.e. full pathname); -// -// - Three NUL-terminated ASCII octal numbers, entry mode of entries in -// stage 1 to 3 (a missing stage is represented by "0" in this field); -// and -// -// - At most three 160-bit object names of the entry in stages from 1 to 3 -// (nothing is written for a missing stage). -// -// === Split index -// -// In split index mode, the majority of index entries could be stored -// in a separate file. This extension records the changes to be made on -// top of that to produce the final index. -// -// The signature for this extension is { 'l', 'i', 'n', 'k' }. -// -// The extension consists of: -// -// - 160-bit SHA-1 of the shared index file. The shared index file path -// is $GIT_DIR/sharedindex.. If all 160 bits are zero, the -// index does not require a shared index file. -// -// - An ewah-encoded delete bitmap, each bit represents an entry in the -// shared index. If a bit is set, its corresponding entry in the -// shared index will be removed from the final index. Note, because -// a delete operation changes index entry positions, but we do need -// original positions in replace phase, it's best to just mark -// entries for removal, then do a mass deletion after replacement. -// -// - An ewah-encoded replace bitmap, each bit represents an entry in -// the shared index. If a bit is set, its corresponding entry in the -// shared index will be replaced with an entry in this index -// file. All replaced entries are stored in sorted order in this -// index. The first "1" bit in the replace bitmap corresponds to the -// first index entry, the second "1" bit to the second entry and so -// on. Replaced entries may have empty path names to save space. -// -// The remaining index entries after replaced ones will be added to the -// final index. These added entries are also sorted by entry name then -// stage. -// -// == Untracked cache -// -// Untracked cache saves the untracked file list and necessary data to -// verify the cache. The signature for this extension is { 'U', 'N', -// 'T', 'R' }. -// -// The extension starts with -// -// - A sequence of NUL-terminated strings, preceded by the size of the -// sequence in variable width encoding. Each string describes the -// environment where the cache can be used. -// -// - Stat data of $GIT_DIR/info/exclude. See "Index entry" section from -// ctime field until "file size". -// -// - Stat data of plumbing.excludesfile -// -// - 32-bit dir_flags (see struct dir_struct) -// -// - 160-bit SHA-1 of $GIT_DIR/info/exclude. Null SHA-1 means the file -// does not exist. -// -// - 160-bit SHA-1 of plumbing.excludesfile. Null SHA-1 means the file does -// not exist. -// -// - NUL-terminated string of per-dir exclude file name. This usually -// is ".gitignore". -// -// - The number of following directory blocks, variable width -// encoding. If this number is zero, the extension ends here with a -// following NUL. -// -// - A number of directory blocks in depth-first-search order, each -// consists of -// -// - The number of untracked entries, variable width encoding. -// -// - The number of sub-directory blocks, variable width encoding. -// -// - The directory name terminated by NUL. -// -// - A number of untracked file/dir names terminated by NUL. -// -// The remaining data of each directory block is grouped by type: -// -// - An ewah bitmap, the n-th bit marks whether the n-th directory has -// valid untracked cache entries. -// -// - An ewah bitmap, the n-th bit records "check-only" bit of -// read_directory_recursive() for the n-th directory. -// -// - An ewah bitmap, the n-th bit indicates whether SHA-1 and stat data -// is valid for the n-th directory and exists in the next data. -// -// - An array of stat data. The n-th data corresponds with the n-th -// "one" bit in the previous ewah bitmap. -// -// - An array of SHA-1. The n-th SHA-1 corresponds with the n-th "one" bit -// in the previous ewah bitmap. -// -// - One NUL. -// -// == File System Monitor cache -// -// The file system monitor cache tracks files for which the core.fsmonitor -// hook has told us about changes. The signature for this extension is -// { 'F', 'S', 'M', 'N' }. -// -// The extension starts with -// -// - 32-bit version number: the current supported version is 1. -// -// - 64-bit time: the extension data reflects all changes through the given -// time which is stored as the nanoseconds elapsed since midnight, -// January 1, 1970. -// -// - 32-bit bitmap size: the size of the CE_FSMONITOR_VALID bitmap. -// -// - An ewah bitmap, the n-th bit indicates whether the n-th index entry -// is not CE_FSMONITOR_VALID. -// -// == End of Index Entry -// -// The End of Index Entry (EOIE) is used to locate the end of the variable -// length index entries and the beginning of the extensions. Code can take -// advantage of this to quickly locate the index extensions without having -// to parse through all of the index entries. -// -// Because it must be able to be loaded before the variable length cache -// entries and other index extensions, this extension must be written last. -// The signature for this extension is { 'E', 'O', 'I', 'E' }. -// -// The extension consists of: -// -// - 32-bit offset to the end of the index entries -// -// - 160-bit SHA-1 over the extension types and their sizes (but not -// their contents). E.g. if we have "TREE" extension that is N-bytes -// long, "REUC" extension that is M-bytes long, followed by "EOIE", -// then the hash would be: -// -// SHA-1("TREE" + + -// "REUC" + ) -// -// == Index Entry Offset Table -// -// The Index Entry Offset Table (IEOT) is used to help address the CPU -// cost of loading the index by enabling multi-threading the process of -// converting cache entries from the on-disk format to the in-memory format. -// The signature for this extension is { 'I', 'E', 'O', 'T' }. -// -// The extension consists of: -// -// - 32-bit version (currently 1) -// -// - A number of index offset entries each consisting of: -// -// - 32-bit offset from the beginning of the file to the first cache entry -// in this block of entries. -// -// - 32-bit count of cache entries in this blockpackage index -package index diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/index/encoder.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/index/encoder.go deleted file mode 100644 index 00d4e7a3178..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/index/encoder.go +++ /dev/null @@ -1,150 +0,0 @@ -package index - -import ( - "bytes" - "crypto/sha1" - "errors" - "hash" - "io" - "sort" - "time" - - "github.com/go-git/go-git/v5/utils/binary" -) - -var ( - // EncodeVersionSupported is the range of supported index versions - EncodeVersionSupported uint32 = 2 - - // ErrInvalidTimestamp is returned by Encode if a Index with a Entry with - // negative timestamp values - ErrInvalidTimestamp = errors.New("negative timestamps are not allowed") -) - -// An Encoder writes an Index to an output stream. -type Encoder struct { - w io.Writer - hash hash.Hash -} - -// NewEncoder returns a new encoder that writes to w. -func NewEncoder(w io.Writer) *Encoder { - h := sha1.New() - mw := io.MultiWriter(w, h) - return &Encoder{mw, h} -} - -// Encode writes the Index to the stream of the encoder. -func (e *Encoder) Encode(idx *Index) error { - // TODO: support versions v3 and v4 - // TODO: support extensions - if idx.Version != EncodeVersionSupported { - return ErrUnsupportedVersion - } - - if err := e.encodeHeader(idx); err != nil { - return err - } - - if err := e.encodeEntries(idx); err != nil { - return err - } - - return e.encodeFooter() -} - -func (e *Encoder) encodeHeader(idx *Index) error { - return binary.Write(e.w, - indexSignature, - idx.Version, - uint32(len(idx.Entries)), - ) -} - -func (e *Encoder) encodeEntries(idx *Index) error { - sort.Sort(byName(idx.Entries)) - - for _, entry := range idx.Entries { - if err := e.encodeEntry(entry); err != nil { - return err - } - - wrote := entryHeaderLength + len(entry.Name) - if err := e.padEntry(wrote); err != nil { - return err - } - } - - return nil -} - -func (e *Encoder) encodeEntry(entry *Entry) error { - if entry.IntentToAdd || entry.SkipWorktree { - return ErrUnsupportedVersion - } - - sec, nsec, err := e.timeToUint32(&entry.CreatedAt) - if err != nil { - return err - } - - msec, mnsec, err := e.timeToUint32(&entry.ModifiedAt) - if err != nil { - return err - } - - flags := uint16(entry.Stage&0x3) << 12 - if l := len(entry.Name); l < nameMask { - flags |= uint16(l) - } else { - flags |= nameMask - } - - flow := []interface{}{ - sec, nsec, - msec, mnsec, - entry.Dev, - entry.Inode, - entry.Mode, - entry.UID, - entry.GID, - entry.Size, - entry.Hash[:], - flags, - } - - if err := binary.Write(e.w, flow...); err != nil { - return err - } - - return binary.Write(e.w, []byte(entry.Name)) -} - -func (e *Encoder) timeToUint32(t *time.Time) (uint32, uint32, error) { - if t.IsZero() { - return 0, 0, nil - } - - if t.Unix() < 0 || t.UnixNano() < 0 { - return 0, 0, ErrInvalidTimestamp - } - - return uint32(t.Unix()), uint32(t.Nanosecond()), nil -} - -func (e *Encoder) padEntry(wrote int) error { - padLen := 8 - wrote%8 - - _, err := e.w.Write(bytes.Repeat([]byte{'\x00'}, padLen)) - return err -} - -func (e *Encoder) encodeFooter() error { - return binary.Write(e.w, e.hash.Sum(nil)) -} - -type byName []*Entry - -func (l byName) Len() int { return len(l) } -func (l byName) Swap(i, j int) { l[i], l[j] = l[j], l[i] } -func (l byName) Less(i, j int) bool { return l[i].Name < l[j].Name } diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/index/index.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/index/index.go deleted file mode 100644 index 649416a2b44..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/index/index.go +++ /dev/null @@ -1,213 +0,0 @@ -package index - -import ( - "bytes" - "errors" - "fmt" - "path/filepath" - "time" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/filemode" -) - -var ( - // ErrUnsupportedVersion is returned by Decode when the index file version - // is not supported. - ErrUnsupportedVersion = errors.New("unsupported version") - // ErrEntryNotFound is returned by Index.Entry, if an entry is not found. - ErrEntryNotFound = errors.New("entry not found") - - indexSignature = []byte{'D', 'I', 'R', 'C'} - treeExtSignature = []byte{'T', 'R', 'E', 'E'} - resolveUndoExtSignature = []byte{'R', 'E', 'U', 'C'} - endOfIndexEntryExtSignature = []byte{'E', 'O', 'I', 'E'} -) - -// Stage during merge -type Stage int - -const ( - // Merged is the default stage, fully merged - Merged Stage = 1 - // AncestorMode is the base revision - AncestorMode Stage = 1 - // OurMode is the first tree revision, ours - OurMode Stage = 2 - // TheirMode is the second tree revision, theirs - TheirMode Stage = 3 -) - -// Index contains the information about which objects are currently checked out -// in the worktree, having information about the working files. Changes in -// worktree are detected using this Index. The Index is also used during merges -type Index struct { - // Version is index version - Version uint32 - // Entries collection of entries represented by this Index. The order of - // this collection is not guaranteed - Entries []*Entry - // Cache represents the 'Cached tree' extension - Cache *Tree - // ResolveUndo represents the 'Resolve undo' extension - ResolveUndo *ResolveUndo - // EndOfIndexEntry represents the 'End of Index Entry' extension - EndOfIndexEntry *EndOfIndexEntry -} - -// Add creates a new Entry and returns it. The caller should first check that -// another entry with the same path does not exist. -func (i *Index) Add(path string) *Entry { - e := &Entry{ - Name: filepath.ToSlash(path), - } - - i.Entries = append(i.Entries, e) - return e -} - -// Entry returns the entry that match the given path, if any. -func (i *Index) Entry(path string) (*Entry, error) { - path = filepath.ToSlash(path) - for _, e := range i.Entries { - if e.Name == path { - return e, nil - } - } - - return nil, ErrEntryNotFound -} - -// Remove remove the entry that match the give path and returns deleted entry. -func (i *Index) Remove(path string) (*Entry, error) { - path = filepath.ToSlash(path) - for index, e := range i.Entries { - if e.Name == path { - i.Entries = append(i.Entries[:index], i.Entries[index+1:]...) - return e, nil - } - } - - return nil, ErrEntryNotFound -} - -// Glob returns the all entries matching pattern or nil if there is no matching -// entry. The syntax of patterns is the same as in filepath.Glob. -func (i *Index) Glob(pattern string) (matches []*Entry, err error) { - pattern = filepath.ToSlash(pattern) - for _, e := range i.Entries { - m, err := match(pattern, e.Name) - if err != nil { - return nil, err - } - - if m { - matches = append(matches, e) - } - } - - return -} - -// String is equivalent to `git ls-files --stage --debug` -func (i *Index) String() string { - buf := bytes.NewBuffer(nil) - for _, e := range i.Entries { - buf.WriteString(e.String()) - } - - return buf.String() -} - -// Entry represents a single file (or stage of a file) in the cache. An entry -// represents exactly one stage of a file. If a file path is unmerged then -// multiple Entry instances may appear for the same path name. -type Entry struct { - // Hash is the SHA1 of the represented file - Hash plumbing.Hash - // Name is the Entry path name relative to top level directory - Name string - // CreatedAt time when the tracked path was created - CreatedAt time.Time - // ModifiedAt time when the tracked path was changed - ModifiedAt time.Time - // Dev and Inode of the tracked path - Dev, Inode uint32 - // Mode of the path - Mode filemode.FileMode - // UID and GID, userid and group id of the owner - UID, GID uint32 - // Size is the length in bytes for regular files - Size uint32 - // Stage on a merge is defines what stage is representing this entry - // https://git-scm.com/book/en/v2/Git-Tools-Advanced-Merging - Stage Stage - // SkipWorktree used in sparse checkouts - // https://git-scm.com/docs/git-read-tree#_sparse_checkout - SkipWorktree bool - // IntentToAdd record only the fact that the path will be added later - // https://git-scm.com/docs/git-add ("git add -N") - IntentToAdd bool -} - -func (e Entry) String() string { - buf := bytes.NewBuffer(nil) - - fmt.Fprintf(buf, "%06o %s %d\t%s\n", e.Mode, e.Hash, e.Stage, e.Name) - fmt.Fprintf(buf, " ctime: %d:%d\n", e.CreatedAt.Unix(), e.CreatedAt.Nanosecond()) - fmt.Fprintf(buf, " mtime: %d:%d\n", e.ModifiedAt.Unix(), e.ModifiedAt.Nanosecond()) - fmt.Fprintf(buf, " dev: %d\tino: %d\n", e.Dev, e.Inode) - fmt.Fprintf(buf, " uid: %d\tgid: %d\n", e.UID, e.GID) - fmt.Fprintf(buf, " size: %d\tflags: %x\n", e.Size, 0) - - return buf.String() -} - -// Tree contains pre-computed hashes for trees that can be derived from the -// index. It helps speed up tree object generation from index for a new commit. -type Tree struct { - Entries []TreeEntry -} - -// TreeEntry entry of a cached Tree -type TreeEntry struct { - // Path component (relative to its parent directory) - Path string - // Entries is the number of entries in the index that is covered by the tree - // this entry represents. - Entries int - // Trees is the number that represents the number of subtrees this tree has - Trees int - // Hash object name for the object that would result from writing this span - // of index as a tree. - Hash plumbing.Hash -} - -// ResolveUndo is used when a conflict is resolved (e.g. with "git add path"), -// these higher stage entries are removed and a stage-0 entry with proper -// resolution is added. When these higher stage entries are removed, they are -// saved in the resolve undo extension. -type ResolveUndo struct { - Entries []ResolveUndoEntry -} - -// ResolveUndoEntry contains the information about a conflict when is resolved -type ResolveUndoEntry struct { - Path string - Stages map[Stage]plumbing.Hash -} - -// EndOfIndexEntry is the End of Index Entry (EOIE) is used to locate the end of -// the variable length index entries and the beginning of the extensions. Code -// can take advantage of this to quickly locate the index extensions without -// having to parse through all of the index entries. -// -// Because it must be able to be loaded before the variable length cache -// entries and other index extensions, this extension must be written last. -type EndOfIndexEntry struct { - // Offset to the end of the index entries - Offset uint32 - // Hash is a SHA-1 over the extension types and their sizes (but not - // their contents). - Hash plumbing.Hash -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/index/match.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/index/match.go deleted file mode 100644 index 2891d7d34cc..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/index/match.go +++ /dev/null @@ -1,186 +0,0 @@ -package index - -import ( - "path/filepath" - "runtime" - "unicode/utf8" -) - -// match is filepath.Match with support to match fullpath and not only filenames -// code from: -// https://github.com/golang/go/blob/39852bf4cce6927e01d0136c7843f65a801738cb/src/path/filepath/match.go#L44-L224 -func match(pattern, name string) (matched bool, err error) { -Pattern: - for len(pattern) > 0 { - var star bool - var chunk string - star, chunk, pattern = scanChunk(pattern) - - // Look for match at current position. - t, ok, err := matchChunk(chunk, name) - // if we're the last chunk, make sure we've exhausted the name - // otherwise we'll give a false result even if we could still match - // using the star - if ok && (len(t) == 0 || len(pattern) > 0) { - name = t - continue - } - if err != nil { - return false, err - } - if star { - // Look for match skipping i+1 bytes. - // Cannot skip /. - for i := 0; i < len(name); i++ { - t, ok, err := matchChunk(chunk, name[i+1:]) - if ok { - // if we're the last chunk, make sure we exhausted the name - if len(pattern) == 0 && len(t) > 0 { - continue - } - name = t - continue Pattern - } - if err != nil { - return false, err - } - } - } - return false, nil - } - return len(name) == 0, nil -} - -// scanChunk gets the next segment of pattern, which is a non-star string -// possibly preceded by a star. -func scanChunk(pattern string) (star bool, chunk, rest string) { - for len(pattern) > 0 && pattern[0] == '*' { - pattern = pattern[1:] - star = true - } - inrange := false - var i int -Scan: - for i = 0; i < len(pattern); i++ { - switch pattern[i] { - case '\\': - if runtime.GOOS != "windows" { - // error check handled in matchChunk: bad pattern. - if i+1 < len(pattern) { - i++ - } - } - case '[': - inrange = true - case ']': - inrange = false - case '*': - if !inrange { - break Scan - } - } - } - return star, pattern[0:i], pattern[i:] -} - -// matchChunk checks whether chunk matches the beginning of s. -// If so, it returns the remainder of s (after the match). -// Chunk is all single-character operators: literals, char classes, and ?. -func matchChunk(chunk, s string) (rest string, ok bool, err error) { - for len(chunk) > 0 { - if len(s) == 0 { - return - } - switch chunk[0] { - case '[': - // character class - r, n := utf8.DecodeRuneInString(s) - s = s[n:] - chunk = chunk[1:] - // We can't end right after '[', we're expecting at least - // a closing bracket and possibly a caret. - if len(chunk) == 0 { - err = filepath.ErrBadPattern - return - } - // possibly negated - negated := chunk[0] == '^' - if negated { - chunk = chunk[1:] - } - // parse all ranges - match := false - nrange := 0 - for { - if len(chunk) > 0 && chunk[0] == ']' && nrange > 0 { - chunk = chunk[1:] - break - } - var lo, hi rune - if lo, chunk, err = getEsc(chunk); err != nil { - return - } - hi = lo - if chunk[0] == '-' { - if hi, chunk, err = getEsc(chunk[1:]); err != nil { - return - } - } - if lo <= r && r <= hi { - match = true - } - nrange++ - } - if match == negated { - return - } - - case '?': - _, n := utf8.DecodeRuneInString(s) - s = s[n:] - chunk = chunk[1:] - - case '\\': - if runtime.GOOS != "windows" { - chunk = chunk[1:] - if len(chunk) == 0 { - err = filepath.ErrBadPattern - return - } - } - fallthrough - - default: - if chunk[0] != s[0] { - return - } - s = s[1:] - chunk = chunk[1:] - } - } - return s, true, nil -} - -// getEsc gets a possibly-escaped character from chunk, for a character class. -func getEsc(chunk string) (r rune, nchunk string, err error) { - if len(chunk) == 0 || chunk[0] == '-' || chunk[0] == ']' { - err = filepath.ErrBadPattern - return - } - if chunk[0] == '\\' && runtime.GOOS != "windows" { - chunk = chunk[1:] - if len(chunk) == 0 { - err = filepath.ErrBadPattern - return - } - } - r, n := utf8.DecodeRuneInString(chunk) - if r == utf8.RuneError && n == 1 { - err = filepath.ErrBadPattern - } - nchunk = chunk[n:] - if len(nchunk) == 0 { - err = filepath.ErrBadPattern - } - return -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/doc.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/doc.go deleted file mode 100644 index a7145160ae0..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package objfile implements encoding and decoding of object files. -package objfile diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/reader.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/reader.go deleted file mode 100644 index b6b2ca06dd9..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/reader.go +++ /dev/null @@ -1,114 +0,0 @@ -package objfile - -import ( - "compress/zlib" - "errors" - "io" - "strconv" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/format/packfile" -) - -var ( - ErrClosed = errors.New("objfile: already closed") - ErrHeader = errors.New("objfile: invalid header") - ErrNegativeSize = errors.New("objfile: negative object size") -) - -// Reader reads and decodes compressed objfile data from a provided io.Reader. -// Reader implements io.ReadCloser. Close should be called when finished with -// the Reader. Close will not close the underlying io.Reader. -type Reader struct { - multi io.Reader - zlib io.ReadCloser - hasher plumbing.Hasher -} - -// NewReader returns a new Reader reading from r. -func NewReader(r io.Reader) (*Reader, error) { - zlib, err := zlib.NewReader(r) - if err != nil { - return nil, packfile.ErrZLib.AddDetails(err.Error()) - } - - return &Reader{ - zlib: zlib, - }, nil -} - -// Header reads the type and the size of object, and prepares the reader for read -func (r *Reader) Header() (t plumbing.ObjectType, size int64, err error) { - var raw []byte - raw, err = r.readUntil(' ') - if err != nil { - return - } - - t, err = plumbing.ParseObjectType(string(raw)) - if err != nil { - return - } - - raw, err = r.readUntil(0) - if err != nil { - return - } - - size, err = strconv.ParseInt(string(raw), 10, 64) - if err != nil { - err = ErrHeader - return - } - - defer r.prepareForRead(t, size) - return -} - -// readSlice reads one byte at a time from r until it encounters delim or an -// error. -func (r *Reader) readUntil(delim byte) ([]byte, error) { - var buf [1]byte - value := make([]byte, 0, 16) - for { - if n, err := r.zlib.Read(buf[:]); err != nil && (err != io.EOF || n == 0) { - if err == io.EOF { - return nil, ErrHeader - } - return nil, err - } - - if buf[0] == delim { - return value, nil - } - - value = append(value, buf[0]) - } -} - -func (r *Reader) prepareForRead(t plumbing.ObjectType, size int64) { - r.hasher = plumbing.NewHasher(t, size) - r.multi = io.TeeReader(r.zlib, r.hasher) -} - -// Read reads len(p) bytes into p from the object data stream. It returns -// the number of bytes read (0 <= n <= len(p)) and any error encountered. Even -// if Read returns n < len(p), it may use all of p as scratch space during the -// call. -// -// If Read encounters the end of the data stream it will return err == io.EOF, -// either in the current call if n > 0 or in a subsequent call. -func (r *Reader) Read(p []byte) (n int, err error) { - return r.multi.Read(p) -} - -// Hash returns the hash of the object data stream that has been read so far. -func (r *Reader) Hash() plumbing.Hash { - return r.hasher.Sum() -} - -// Close releases any resources consumed by the Reader. Calling Close does not -// close the wrapped io.Reader originally passed to NewReader. -func (r *Reader) Close() error { - return r.zlib.Close() -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/writer.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/writer.go deleted file mode 100644 index 2a96a4370bc..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/writer.go +++ /dev/null @@ -1,109 +0,0 @@ -package objfile - -import ( - "compress/zlib" - "errors" - "io" - "strconv" - - "github.com/go-git/go-git/v5/plumbing" -) - -var ( - ErrOverflow = errors.New("objfile: declared data length exceeded (overflow)") -) - -// Writer writes and encodes data in compressed objfile format to a provided -// io.Writer. Close should be called when finished with the Writer. Close will -// not close the underlying io.Writer. -type Writer struct { - raw io.Writer - zlib io.WriteCloser - hasher plumbing.Hasher - multi io.Writer - - closed bool - pending int64 // number of unwritten bytes -} - -// NewWriter returns a new Writer writing to w. -// -// The returned Writer implements io.WriteCloser. Close should be called when -// finished with the Writer. Close will not close the underlying io.Writer. -func NewWriter(w io.Writer) *Writer { - return &Writer{ - raw: w, - zlib: zlib.NewWriter(w), - } -} - -// WriteHeader writes the type and the size and prepares to accept the object's -// contents. If an invalid t is provided, plumbing.ErrInvalidType is returned. If a -// negative size is provided, ErrNegativeSize is returned. -func (w *Writer) WriteHeader(t plumbing.ObjectType, size int64) error { - if !t.Valid() { - return plumbing.ErrInvalidType - } - if size < 0 { - return ErrNegativeSize - } - - b := t.Bytes() - b = append(b, ' ') - b = append(b, []byte(strconv.FormatInt(size, 10))...) - b = append(b, 0) - - defer w.prepareForWrite(t, size) - _, err := w.zlib.Write(b) - - return err -} - -func (w *Writer) prepareForWrite(t plumbing.ObjectType, size int64) { - w.pending = size - - w.hasher = plumbing.NewHasher(t, size) - w.multi = io.MultiWriter(w.zlib, w.hasher) -} - -// Write writes the object's contents. Write returns the error ErrOverflow if -// more than size bytes are written after WriteHeader. -func (w *Writer) Write(p []byte) (n int, err error) { - if w.closed { - return 0, ErrClosed - } - - overwrite := false - if int64(len(p)) > w.pending { - p = p[0:w.pending] - overwrite = true - } - - n, err = w.multi.Write(p) - w.pending -= int64(n) - if err == nil && overwrite { - err = ErrOverflow - return - } - - return -} - -// Hash returns the hash of the object data stream that has been written so far. -// It can be called before or after Close. -func (w *Writer) Hash() plumbing.Hash { - return w.hasher.Sum() // Not yet closed, return hash of data written so far -} - -// Close releases any resources consumed by the Writer. -// -// Calling Close does not close the wrapped io.Writer originally passed to -// NewWriter. -func (w *Writer) Close() error { - if err := w.zlib.Close(); err != nil { - return err - } - - w.closed = true - return nil -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/common.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/common.go deleted file mode 100644 index df423ad50c8..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/common.go +++ /dev/null @@ -1,78 +0,0 @@ -package packfile - -import ( - "bytes" - "compress/zlib" - "io" - "sync" - - "github.com/go-git/go-git/v5/plumbing/storer" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -var signature = []byte{'P', 'A', 'C', 'K'} - -const ( - // VersionSupported is the packfile version supported by this package - VersionSupported uint32 = 2 - - firstLengthBits = uint8(4) // the first byte into object header has 4 bits to store the length - lengthBits = uint8(7) // subsequent bytes has 7 bits to store the length - maskFirstLength = 15 // 0000 1111 - maskContinue = 0x80 // 1000 0000 - maskLength = uint8(127) // 0111 1111 - maskType = uint8(112) // 0111 0000 -) - -// UpdateObjectStorage updates the storer with the objects in the given -// packfile. -func UpdateObjectStorage(s storer.Storer, packfile io.Reader) error { - if pw, ok := s.(storer.PackfileWriter); ok { - return WritePackfileToObjectStorage(pw, packfile) - } - - p, err := NewParserWithStorage(NewScanner(packfile), s) - if err != nil { - return err - } - - _, err = p.Parse() - return err -} - -// WritePackfileToObjectStorage writes all the packfile objects into the given -// object storage. -func WritePackfileToObjectStorage( - sw storer.PackfileWriter, - packfile io.Reader, -) (err error) { - w, err := sw.PackfileWriter() - if err != nil { - return err - } - - defer ioutil.CheckClose(w, &err) - - var n int64 - n, err = io.Copy(w, packfile) - if err == nil && n == 0 { - return ErrEmptyPackfile - } - - return err -} - -var bufPool = sync.Pool{ - New: func() interface{} { - return bytes.NewBuffer(nil) - }, -} - -var zlibInitBytes = []byte{0x78, 0x9c, 0x01, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01} - -var zlibReaderPool = sync.Pool{ - New: func() interface{} { - r, _ := zlib.NewReader(bytes.NewReader(zlibInitBytes)) - return r - }, -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/delta_index.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/delta_index.go deleted file mode 100644 index 07a61120e5a..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/delta_index.go +++ /dev/null @@ -1,297 +0,0 @@ -package packfile - -const blksz = 16 -const maxChainLength = 64 - -// deltaIndex is a modified version of JGit's DeltaIndex adapted to our current -// design. -type deltaIndex struct { - table []int - entries []int - mask int -} - -func (idx *deltaIndex) init(buf []byte) { - scanner := newDeltaIndexScanner(buf, len(buf)) - idx.mask = scanner.mask - idx.table = scanner.table - idx.entries = make([]int, countEntries(scanner)+1) - idx.copyEntries(scanner) -} - -// findMatch returns the offset of src where the block starting at tgtOffset -// is and the length of the match. A length of 0 means there was no match. A -// length of -1 means the src length is lower than the blksz and whatever -// other positive length is the length of the match in bytes. -func (idx *deltaIndex) findMatch(src, tgt []byte, tgtOffset int) (srcOffset, l int) { - if len(tgt) < tgtOffset+s { - return 0, len(tgt) - tgtOffset - } - - if len(src) < blksz { - return 0, -1 - } - - if len(tgt) >= tgtOffset+s && len(src) >= blksz { - h := hashBlock(tgt, tgtOffset) - tIdx := h & idx.mask - eIdx := idx.table[tIdx] - if eIdx != 0 { - srcOffset = idx.entries[eIdx] - } else { - return - } - - l = matchLength(src, tgt, tgtOffset, srcOffset) - } - - return -} - -func matchLength(src, tgt []byte, otgt, osrc int) (l int) { - lensrc := len(src) - lentgt := len(tgt) - for (osrc < lensrc && otgt < lentgt) && src[osrc] == tgt[otgt] { - l++ - osrc++ - otgt++ - } - return -} - -func countEntries(scan *deltaIndexScanner) (cnt int) { - // Figure out exactly how many entries we need. As we do the - // enumeration truncate any delta chains longer than what we - // are willing to scan during encode. This keeps the encode - // logic linear in the size of the input rather than quadratic. - for i := 0; i < len(scan.table); i++ { - h := scan.table[i] - if h == 0 { - continue - } - - size := 0 - for { - size++ - if size == maxChainLength { - scan.next[h] = 0 - break - } - h = scan.next[h] - - if h == 0 { - break - } - } - cnt += size - } - - return -} - -func (idx *deltaIndex) copyEntries(scanner *deltaIndexScanner) { - // Rebuild the entries list from the scanner, positioning all - // blocks in the same hash chain next to each other. We can - // then later discard the next list, along with the scanner. - // - next := 1 - for i := 0; i < len(idx.table); i++ { - h := idx.table[i] - if h == 0 { - continue - } - - idx.table[i] = next - for { - idx.entries[next] = scanner.entries[h] - next++ - h = scanner.next[h] - - if h == 0 { - break - } - } - } -} - -type deltaIndexScanner struct { - table []int - entries []int - next []int - mask int - count int -} - -func newDeltaIndexScanner(buf []byte, size int) *deltaIndexScanner { - size -= size % blksz - worstCaseBlockCnt := size / blksz - if worstCaseBlockCnt < 1 { - return new(deltaIndexScanner) - } - - tableSize := tableSize(worstCaseBlockCnt) - scanner := &deltaIndexScanner{ - table: make([]int, tableSize), - mask: tableSize - 1, - entries: make([]int, worstCaseBlockCnt+1), - next: make([]int, worstCaseBlockCnt+1), - } - - scanner.scan(buf, size) - return scanner -} - -// slightly modified version of JGit's DeltaIndexScanner. We store the offset on the entries -// instead of the entries and the key, so we avoid operations to retrieve the offset later, as -// we don't use the key. -// See: https://github.com/eclipse/jgit/blob/005e5feb4ecd08c4e4d141a38b9e7942accb3212/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/pack/DeltaIndexScanner.java -func (s *deltaIndexScanner) scan(buf []byte, end int) { - lastHash := 0 - ptr := end - blksz - - for { - key := hashBlock(buf, ptr) - tIdx := key & s.mask - head := s.table[tIdx] - if head != 0 && lastHash == key { - s.entries[head] = ptr - } else { - s.count++ - eIdx := s.count - s.entries[eIdx] = ptr - s.next[eIdx] = head - s.table[tIdx] = eIdx - } - - lastHash = key - ptr -= blksz - - if 0 > ptr { - break - } - } -} - -func tableSize(worstCaseBlockCnt int) int { - shift := 32 - leadingZeros(uint32(worstCaseBlockCnt)) - sz := 1 << uint(shift-1) - if sz < worstCaseBlockCnt { - sz <<= 1 - } - return sz -} - -// use https://golang.org/pkg/math/bits/#LeadingZeros32 in the future -func leadingZeros(x uint32) (n int) { - if x >= 1<<16 { - x >>= 16 - n = 16 - } - if x >= 1<<8 { - x >>= 8 - n += 8 - } - n += int(len8tab[x]) - return 32 - n -} - -var len8tab = [256]uint8{ - 0x00, 0x01, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, - 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, - 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, - 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, - 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, - 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, - 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, - 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, - 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, - 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, - 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, - 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, - 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, - 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, - 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, - 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, -} - -func hashBlock(raw []byte, ptr int) int { - // The first 4 steps collapse out into a 4 byte big-endian decode, - // with a larger right shift as we combined shift lefts together. - // - hash := ((uint32(raw[ptr]) & 0xff) << 24) | - ((uint32(raw[ptr+1]) & 0xff) << 16) | - ((uint32(raw[ptr+2]) & 0xff) << 8) | - (uint32(raw[ptr+3]) & 0xff) - hash ^= T[hash>>31] - - hash = ((hash << 8) | (uint32(raw[ptr+4]) & 0xff)) ^ T[hash>>23] - hash = ((hash << 8) | (uint32(raw[ptr+5]) & 0xff)) ^ T[hash>>23] - hash = ((hash << 8) | (uint32(raw[ptr+6]) & 0xff)) ^ T[hash>>23] - hash = ((hash << 8) | (uint32(raw[ptr+7]) & 0xff)) ^ T[hash>>23] - - hash = ((hash << 8) | (uint32(raw[ptr+8]) & 0xff)) ^ T[hash>>23] - hash = ((hash << 8) | (uint32(raw[ptr+9]) & 0xff)) ^ T[hash>>23] - hash = ((hash << 8) | (uint32(raw[ptr+10]) & 0xff)) ^ T[hash>>23] - hash = ((hash << 8) | (uint32(raw[ptr+11]) & 0xff)) ^ T[hash>>23] - - hash = ((hash << 8) | (uint32(raw[ptr+12]) & 0xff)) ^ T[hash>>23] - hash = ((hash << 8) | (uint32(raw[ptr+13]) & 0xff)) ^ T[hash>>23] - hash = ((hash << 8) | (uint32(raw[ptr+14]) & 0xff)) ^ T[hash>>23] - hash = ((hash << 8) | (uint32(raw[ptr+15]) & 0xff)) ^ T[hash>>23] - - return int(hash) -} - -var T = []uint32{0x00000000, 0xd4c6b32d, 0x7d4bd577, - 0xa98d665a, 0x2e5119c3, 0xfa97aaee, 0x531accb4, 0x87dc7f99, - 0x5ca23386, 0x886480ab, 0x21e9e6f1, 0xf52f55dc, 0x72f32a45, - 0xa6359968, 0x0fb8ff32, 0xdb7e4c1f, 0x6d82d421, 0xb944670c, - 0x10c90156, 0xc40fb27b, 0x43d3cde2, 0x97157ecf, 0x3e981895, - 0xea5eabb8, 0x3120e7a7, 0xe5e6548a, 0x4c6b32d0, 0x98ad81fd, - 0x1f71fe64, 0xcbb74d49, 0x623a2b13, 0xb6fc983e, 0x0fc31b6f, - 0xdb05a842, 0x7288ce18, 0xa64e7d35, 0x219202ac, 0xf554b181, - 0x5cd9d7db, 0x881f64f6, 0x536128e9, 0x87a79bc4, 0x2e2afd9e, - 0xfaec4eb3, 0x7d30312a, 0xa9f68207, 0x007be45d, 0xd4bd5770, - 0x6241cf4e, 0xb6877c63, 0x1f0a1a39, 0xcbcca914, 0x4c10d68d, - 0x98d665a0, 0x315b03fa, 0xe59db0d7, 0x3ee3fcc8, 0xea254fe5, - 0x43a829bf, 0x976e9a92, 0x10b2e50b, 0xc4745626, 0x6df9307c, - 0xb93f8351, 0x1f8636de, 0xcb4085f3, 0x62cde3a9, 0xb60b5084, - 0x31d72f1d, 0xe5119c30, 0x4c9cfa6a, 0x985a4947, 0x43240558, - 0x97e2b675, 0x3e6fd02f, 0xeaa96302, 0x6d751c9b, 0xb9b3afb6, - 0x103ec9ec, 0xc4f87ac1, 0x7204e2ff, 0xa6c251d2, 0x0f4f3788, - 0xdb8984a5, 0x5c55fb3c, 0x88934811, 0x211e2e4b, 0xf5d89d66, - 0x2ea6d179, 0xfa606254, 0x53ed040e, 0x872bb723, 0x00f7c8ba, - 0xd4317b97, 0x7dbc1dcd, 0xa97aaee0, 0x10452db1, 0xc4839e9c, - 0x6d0ef8c6, 0xb9c84beb, 0x3e143472, 0xead2875f, 0x435fe105, - 0x97995228, 0x4ce71e37, 0x9821ad1a, 0x31accb40, 0xe56a786d, - 0x62b607f4, 0xb670b4d9, 0x1ffdd283, 0xcb3b61ae, 0x7dc7f990, - 0xa9014abd, 0x008c2ce7, 0xd44a9fca, 0x5396e053, 0x8750537e, - 0x2edd3524, 0xfa1b8609, 0x2165ca16, 0xf5a3793b, 0x5c2e1f61, - 0x88e8ac4c, 0x0f34d3d5, 0xdbf260f8, 0x727f06a2, 0xa6b9b58f, - 0x3f0c6dbc, 0xebcade91, 0x4247b8cb, 0x96810be6, 0x115d747f, - 0xc59bc752, 0x6c16a108, 0xb8d01225, 0x63ae5e3a, 0xb768ed17, - 0x1ee58b4d, 0xca233860, 0x4dff47f9, 0x9939f4d4, 0x30b4928e, - 0xe47221a3, 0x528eb99d, 0x86480ab0, 0x2fc56cea, 0xfb03dfc7, - 0x7cdfa05e, 0xa8191373, 0x01947529, 0xd552c604, 0x0e2c8a1b, - 0xdaea3936, 0x73675f6c, 0xa7a1ec41, 0x207d93d8, 0xf4bb20f5, - 0x5d3646af, 0x89f0f582, 0x30cf76d3, 0xe409c5fe, 0x4d84a3a4, - 0x99421089, 0x1e9e6f10, 0xca58dc3d, 0x63d5ba67, 0xb713094a, - 0x6c6d4555, 0xb8abf678, 0x11269022, 0xc5e0230f, 0x423c5c96, - 0x96faefbb, 0x3f7789e1, 0xebb13acc, 0x5d4da2f2, 0x898b11df, - 0x20067785, 0xf4c0c4a8, 0x731cbb31, 0xa7da081c, 0x0e576e46, - 0xda91dd6b, 0x01ef9174, 0xd5292259, 0x7ca44403, 0xa862f72e, - 0x2fbe88b7, 0xfb783b9a, 0x52f55dc0, 0x8633eeed, 0x208a5b62, - 0xf44ce84f, 0x5dc18e15, 0x89073d38, 0x0edb42a1, 0xda1df18c, - 0x739097d6, 0xa75624fb, 0x7c2868e4, 0xa8eedbc9, 0x0163bd93, - 0xd5a50ebe, 0x52797127, 0x86bfc20a, 0x2f32a450, 0xfbf4177d, - 0x4d088f43, 0x99ce3c6e, 0x30435a34, 0xe485e919, 0x63599680, - 0xb79f25ad, 0x1e1243f7, 0xcad4f0da, 0x11aabcc5, 0xc56c0fe8, - 0x6ce169b2, 0xb827da9f, 0x3ffba506, 0xeb3d162b, 0x42b07071, - 0x9676c35c, 0x2f49400d, 0xfb8ff320, 0x5202957a, 0x86c42657, - 0x011859ce, 0xd5deeae3, 0x7c538cb9, 0xa8953f94, 0x73eb738b, - 0xa72dc0a6, 0x0ea0a6fc, 0xda6615d1, 0x5dba6a48, 0x897cd965, - 0x20f1bf3f, 0xf4370c12, 0x42cb942c, 0x960d2701, 0x3f80415b, - 0xeb46f276, 0x6c9a8def, 0xb85c3ec2, 0x11d15898, 0xc517ebb5, - 0x1e69a7aa, 0xcaaf1487, 0x632272dd, 0xb7e4c1f0, 0x3038be69, - 0xe4fe0d44, 0x4d736b1e, 0x99b5d833, -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/delta_selector.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/delta_selector.go deleted file mode 100644 index 4b60ff39470..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/delta_selector.go +++ /dev/null @@ -1,369 +0,0 @@ -package packfile - -import ( - "sort" - "sync" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/storer" -) - -const ( - // deltas based on deltas, how many steps we can do. - // 50 is the default value used in JGit - maxDepth = int64(50) -) - -// applyDelta is the set of object types that we should apply deltas -var applyDelta = map[plumbing.ObjectType]bool{ - plumbing.BlobObject: true, - plumbing.TreeObject: true, -} - -type deltaSelector struct { - storer storer.EncodedObjectStorer -} - -func newDeltaSelector(s storer.EncodedObjectStorer) *deltaSelector { - return &deltaSelector{s} -} - -// ObjectsToPack creates a list of ObjectToPack from the hashes -// provided, creating deltas if it's suitable, using an specific -// internal logic. `packWindow` specifies the size of the sliding -// window used to compare objects for delta compression; 0 turns off -// delta compression entirely. -func (dw *deltaSelector) ObjectsToPack( - hashes []plumbing.Hash, - packWindow uint, -) ([]*ObjectToPack, error) { - otp, err := dw.objectsToPack(hashes, packWindow) - if err != nil { - return nil, err - } - - if packWindow == 0 { - return otp, nil - } - - dw.sort(otp) - - var objectGroups [][]*ObjectToPack - var prev *ObjectToPack - i := -1 - for _, obj := range otp { - if prev == nil || prev.Type() != obj.Type() { - objectGroups = append(objectGroups, []*ObjectToPack{obj}) - i++ - prev = obj - } else { - objectGroups[i] = append(objectGroups[i], obj) - } - } - - var wg sync.WaitGroup - var once sync.Once - for _, objs := range objectGroups { - objs := objs - wg.Add(1) - go func() { - if walkErr := dw.walk(objs, packWindow); walkErr != nil { - once.Do(func() { - err = walkErr - }) - } - wg.Done() - }() - } - wg.Wait() - - if err != nil { - return nil, err - } - - return otp, nil -} - -func (dw *deltaSelector) objectsToPack( - hashes []plumbing.Hash, - packWindow uint, -) ([]*ObjectToPack, error) { - var objectsToPack []*ObjectToPack - for _, h := range hashes { - var o plumbing.EncodedObject - var err error - if packWindow == 0 { - o, err = dw.encodedObject(h) - } else { - o, err = dw.encodedDeltaObject(h) - } - if err != nil { - return nil, err - } - - otp := newObjectToPack(o) - if _, ok := o.(plumbing.DeltaObject); ok { - otp.CleanOriginal() - } - - objectsToPack = append(objectsToPack, otp) - } - - if packWindow == 0 { - return objectsToPack, nil - } - - if err := dw.fixAndBreakChains(objectsToPack); err != nil { - return nil, err - } - - return objectsToPack, nil -} - -func (dw *deltaSelector) encodedDeltaObject(h plumbing.Hash) (plumbing.EncodedObject, error) { - edos, ok := dw.storer.(storer.DeltaObjectStorer) - if !ok { - return dw.encodedObject(h) - } - - return edos.DeltaObject(plumbing.AnyObject, h) -} - -func (dw *deltaSelector) encodedObject(h plumbing.Hash) (plumbing.EncodedObject, error) { - return dw.storer.EncodedObject(plumbing.AnyObject, h) -} - -func (dw *deltaSelector) fixAndBreakChains(objectsToPack []*ObjectToPack) error { - m := make(map[plumbing.Hash]*ObjectToPack, len(objectsToPack)) - for _, otp := range objectsToPack { - m[otp.Hash()] = otp - } - - for _, otp := range objectsToPack { - if err := dw.fixAndBreakChainsOne(m, otp); err != nil { - return err - } - } - - return nil -} - -func (dw *deltaSelector) fixAndBreakChainsOne(objectsToPack map[plumbing.Hash]*ObjectToPack, otp *ObjectToPack) error { - if !otp.Object.Type().IsDelta() { - return nil - } - - // Initial ObjectToPack instances might have a delta assigned to Object - // but no actual base initially. Once Base is assigned to a delta, it means - // we already fixed it. - if otp.Base != nil { - return nil - } - - do, ok := otp.Object.(plumbing.DeltaObject) - if !ok { - // if this is not a DeltaObject, then we cannot retrieve its base, - // so we have to break the delta chain here. - return dw.undeltify(otp) - } - - base, ok := objectsToPack[do.BaseHash()] - if !ok { - // The base of the delta is not in our list of objects to pack, so - // we break the chain. - return dw.undeltify(otp) - } - - if err := dw.fixAndBreakChainsOne(objectsToPack, base); err != nil { - return err - } - - otp.SetDelta(base, otp.Object) - return nil -} - -func (dw *deltaSelector) restoreOriginal(otp *ObjectToPack) error { - if otp.Original != nil { - return nil - } - - if !otp.Object.Type().IsDelta() { - return nil - } - - obj, err := dw.encodedObject(otp.Hash()) - if err != nil { - return err - } - - otp.SetOriginal(obj) - - return nil -} - -// undeltify undeltifies an *ObjectToPack by retrieving the original object from -// the storer and resetting it. -func (dw *deltaSelector) undeltify(otp *ObjectToPack) error { - if err := dw.restoreOriginal(otp); err != nil { - return err - } - - otp.Object = otp.Original - otp.Depth = 0 - return nil -} - -func (dw *deltaSelector) sort(objectsToPack []*ObjectToPack) { - sort.Sort(byTypeAndSize(objectsToPack)) -} - -func (dw *deltaSelector) walk( - objectsToPack []*ObjectToPack, - packWindow uint, -) error { - indexMap := make(map[plumbing.Hash]*deltaIndex) - for i := 0; i < len(objectsToPack); i++ { - // Clean up the index map and reconstructed delta objects for anything - // outside our pack window, to save memory. - if i > int(packWindow) { - obj := objectsToPack[i-int(packWindow)] - - delete(indexMap, obj.Hash()) - - if obj.IsDelta() { - obj.SaveOriginalMetadata() - obj.CleanOriginal() - } - } - - target := objectsToPack[i] - - // If we already have a delta, we don't try to find a new one for this - // object. This happens when a delta is set to be reused from an existing - // packfile. - if target.IsDelta() { - continue - } - - // We only want to create deltas from specific types. - if !applyDelta[target.Type()] { - continue - } - - for j := i - 1; j >= 0 && i-j < int(packWindow); j-- { - base := objectsToPack[j] - // Objects must use only the same type as their delta base. - // Since objectsToPack is sorted by type and size, once we find - // a different type, we know we won't find more of them. - if base.Type() != target.Type() { - break - } - - if err := dw.tryToDeltify(indexMap, base, target); err != nil { - return err - } - } - } - - return nil -} - -func (dw *deltaSelector) tryToDeltify(indexMap map[plumbing.Hash]*deltaIndex, base, target *ObjectToPack) error { - // Original object might not be present if we're reusing a delta, so we - // ensure it is restored. - if err := dw.restoreOriginal(target); err != nil { - return err - } - - if err := dw.restoreOriginal(base); err != nil { - return err - } - - // If the sizes are radically different, this is a bad pairing. - if target.Size() < base.Size()>>4 { - return nil - } - - msz := dw.deltaSizeLimit( - target.Object.Size(), - base.Depth, - target.Depth, - target.IsDelta(), - ) - - // Nearly impossible to fit useful delta. - if msz <= 8 { - return nil - } - - // If we have to insert a lot to make this work, find another. - if base.Size()-target.Size() > msz { - return nil - } - - if _, ok := indexMap[base.Hash()]; !ok { - indexMap[base.Hash()] = new(deltaIndex) - } - - // Now we can generate the delta using originals - delta, err := getDelta(indexMap[base.Hash()], base.Original, target.Original) - if err != nil { - return err - } - - // if delta better than target - if delta.Size() < msz { - target.SetDelta(base, delta) - } - - return nil -} - -func (dw *deltaSelector) deltaSizeLimit(targetSize int64, baseDepth int, - targetDepth int, targetDelta bool) int64 { - if !targetDelta { - // Any delta should be no more than 50% of the original size - // (for text files deflate of whole form should shrink 50%). - n := targetSize >> 1 - - // Evenly distribute delta size limits over allowed depth. - // If src is non-delta (depth = 0), delta <= 50% of original. - // If src is almost at limit (9/10), delta <= 10% of original. - return n * (maxDepth - int64(baseDepth)) / maxDepth - } - - // With a delta base chosen any new delta must be "better". - // Retain the distribution described above. - d := int64(targetDepth) - n := targetSize - - // If target depth is bigger than maxDepth, this delta is not suitable to be used. - if d >= maxDepth { - return 0 - } - - // If src is whole (depth=0) and base is near limit (depth=9/10) - // any delta using src can be 10x larger and still be better. - // - // If src is near limit (depth=9/10) and base is whole (depth=0) - // a new delta dependent on src must be 1/10th the size. - return n * (maxDepth - int64(baseDepth)) / (maxDepth - d) -} - -type byTypeAndSize []*ObjectToPack - -func (a byTypeAndSize) Len() int { return len(a) } - -func (a byTypeAndSize) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -func (a byTypeAndSize) Less(i, j int) bool { - if a[i].Type() < a[j].Type() { - return false - } - - if a[i].Type() > a[j].Type() { - return true - } - - return a[i].Size() > a[j].Size() -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/diff_delta.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/diff_delta.go deleted file mode 100644 index 1951b34ef19..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/diff_delta.go +++ /dev/null @@ -1,204 +0,0 @@ -package packfile - -import ( - "bytes" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -// See https://github.com/jelmer/dulwich/blob/master/dulwich/pack.py and -// https://github.com/tarruda/node-git-core/blob/master/src/js/delta.js -// for more info - -const ( - // Standard chunk size used to generate fingerprints - s = 16 - - // https://github.com/git/git/blob/f7466e94375b3be27f229c78873f0acf8301c0a5/diff-delta.c#L428 - // Max size of a copy operation (64KB) - maxCopySize = 64 * 1024 -) - -// GetDelta returns an EncodedObject of type OFSDeltaObject. Base and Target object, -// will be loaded into memory to be able to create the delta object. -// To generate target again, you will need the obtained object and "base" one. -// Error will be returned if base or target object cannot be read. -func GetDelta(base, target plumbing.EncodedObject) (plumbing.EncodedObject, error) { - return getDelta(new(deltaIndex), base, target) -} - -func getDelta(index *deltaIndex, base, target plumbing.EncodedObject) (o plumbing.EncodedObject, err error) { - br, err := base.Reader() - if err != nil { - return nil, err - } - - defer ioutil.CheckClose(br, &err) - - tr, err := target.Reader() - if err != nil { - return nil, err - } - - defer ioutil.CheckClose(tr, &err) - - bb := bufPool.Get().(*bytes.Buffer) - defer bufPool.Put(bb) - bb.Reset() - - _, err = bb.ReadFrom(br) - if err != nil { - return nil, err - } - - tb := bufPool.Get().(*bytes.Buffer) - defer bufPool.Put(tb) - tb.Reset() - - _, err = tb.ReadFrom(tr) - if err != nil { - return nil, err - } - - db := diffDelta(index, bb.Bytes(), tb.Bytes()) - delta := &plumbing.MemoryObject{} - _, err = delta.Write(db) - if err != nil { - return nil, err - } - - delta.SetSize(int64(len(db))) - delta.SetType(plumbing.OFSDeltaObject) - - return delta, nil -} - -// DiffDelta returns the delta that transforms src into tgt. -func DiffDelta(src, tgt []byte) []byte { - return diffDelta(new(deltaIndex), src, tgt) -} - -func diffDelta(index *deltaIndex, src []byte, tgt []byte) []byte { - buf := bufPool.Get().(*bytes.Buffer) - defer bufPool.Put(buf) - buf.Reset() - buf.Write(deltaEncodeSize(len(src))) - buf.Write(deltaEncodeSize(len(tgt))) - - if len(index.entries) == 0 { - index.init(src) - } - - ibuf := bufPool.Get().(*bytes.Buffer) - defer bufPool.Put(ibuf) - ibuf.Reset() - for i := 0; i < len(tgt); i++ { - offset, l := index.findMatch(src, tgt, i) - - if l == 0 { - // couldn't find a match, just write the current byte and continue - ibuf.WriteByte(tgt[i]) - } else if l < 0 { - // src is less than blksz, copy the rest of the target to avoid - // calls to findMatch - for ; i < len(tgt); i++ { - ibuf.WriteByte(tgt[i]) - } - } else if l < s { - // remaining target is less than blksz, copy what's left of it - // and avoid calls to findMatch - for j := i; j < i+l; j++ { - ibuf.WriteByte(tgt[j]) - } - i += l - 1 - } else { - encodeInsertOperation(ibuf, buf) - - rl := l - aOffset := offset - for rl > 0 { - if rl < maxCopySize { - buf.Write(encodeCopyOperation(aOffset, rl)) - break - } - - buf.Write(encodeCopyOperation(aOffset, maxCopySize)) - rl -= maxCopySize - aOffset += maxCopySize - } - - i += l - 1 - } - } - - encodeInsertOperation(ibuf, buf) - - // buf.Bytes() is only valid until the next modifying operation on the buffer. Copy it. - return append([]byte{}, buf.Bytes()...) -} - -func encodeInsertOperation(ibuf, buf *bytes.Buffer) { - if ibuf.Len() == 0 { - return - } - - b := ibuf.Bytes() - s := ibuf.Len() - o := 0 - for { - if s <= 127 { - break - } - buf.WriteByte(byte(127)) - buf.Write(b[o : o+127]) - s -= 127 - o += 127 - } - buf.WriteByte(byte(s)) - buf.Write(b[o : o+s]) - - ibuf.Reset() -} - -func deltaEncodeSize(size int) []byte { - var ret []byte - c := size & 0x7f - size >>= 7 - for { - if size == 0 { - break - } - - ret = append(ret, byte(c|0x80)) - c = size & 0x7f - size >>= 7 - } - ret = append(ret, byte(c)) - - return ret -} - -func encodeCopyOperation(offset, length int) []byte { - code := 0x80 - var opcodes []byte - - var i uint - for i = 0; i < 4; i++ { - f := 0xff << (i * 8) - if offset&f != 0 { - opcodes = append(opcodes, byte(offset&f>>(i*8))) - code |= 0x01 << i - } - } - - for i = 0; i < 3; i++ { - f := 0xff << (i * 8) - if length&f != 0 { - opcodes = append(opcodes, byte(length&f>>(i*8))) - code |= 0x10 << i - } - } - - return append([]byte{byte(code)}, opcodes...) -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/doc.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/doc.go deleted file mode 100644 index 2882a7f3782..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/doc.go +++ /dev/null @@ -1,39 +0,0 @@ -// Package packfile implements encoding and decoding of packfile format. -// -// == pack-*.pack files have the following format: -// -// - A header appears at the beginning and consists of the following: -// -// 4-byte signature: -// The signature is: {'P', 'A', 'C', 'K'} -// -// 4-byte version number (network byte order): -// GIT currently accepts version number 2 or 3 but -// generates version 2 only. -// -// 4-byte number of objects contained in the pack (network byte order) -// -// Observation: we cannot have more than 4G versions ;-) and -// more than 4G objects in a pack. -// -// - The header is followed by number of object entries, each of -// which looks like this: -// -// (undeltified representation) -// n-byte type and length (3-bit type, (n-1)*7+4-bit length) -// compressed data -// -// (deltified representation) -// n-byte type and length (3-bit type, (n-1)*7+4-bit length) -// 20-byte base object name -// compressed delta data -// -// Observation: length of each object is encoded in a variable -// length format and is not constrained to 32-bit or anything. -// -// - The trailer records 20-byte SHA1 checksum of all of the above. -// -// -// Source: -// https://www.kernel.org/pub/software/scm/git/docs/v1.7.5/technical/pack-protocol.txt -package packfile diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/encoder.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/encoder.go deleted file mode 100644 index 5501f8861cb..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/encoder.go +++ /dev/null @@ -1,225 +0,0 @@ -package packfile - -import ( - "compress/zlib" - "crypto/sha1" - "fmt" - "io" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/storer" - "github.com/go-git/go-git/v5/utils/binary" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -// Encoder gets the data from the storage and write it into the writer in PACK -// format -type Encoder struct { - selector *deltaSelector - w *offsetWriter - zw *zlib.Writer - hasher plumbing.Hasher - - useRefDeltas bool -} - -// NewEncoder creates a new packfile encoder using a specific Writer and -// EncodedObjectStorer. By default deltas used to generate the packfile will be -// OFSDeltaObject. To use Reference deltas, set useRefDeltas to true. -func NewEncoder(w io.Writer, s storer.EncodedObjectStorer, useRefDeltas bool) *Encoder { - h := plumbing.Hasher{ - Hash: sha1.New(), - } - mw := io.MultiWriter(w, h) - ow := newOffsetWriter(mw) - zw := zlib.NewWriter(mw) - return &Encoder{ - selector: newDeltaSelector(s), - w: ow, - zw: zw, - hasher: h, - useRefDeltas: useRefDeltas, - } -} - -// Encode creates a packfile containing all the objects referenced in -// hashes and writes it to the writer in the Encoder. `packWindow` -// specifies the size of the sliding window used to compare objects -// for delta compression; 0 turns off delta compression entirely. -func (e *Encoder) Encode( - hashes []plumbing.Hash, - packWindow uint, -) (plumbing.Hash, error) { - objects, err := e.selector.ObjectsToPack(hashes, packWindow) - if err != nil { - return plumbing.ZeroHash, err - } - - return e.encode(objects) -} - -func (e *Encoder) encode(objects []*ObjectToPack) (plumbing.Hash, error) { - if err := e.head(len(objects)); err != nil { - return plumbing.ZeroHash, err - } - - for _, o := range objects { - if err := e.entry(o); err != nil { - return plumbing.ZeroHash, err - } - } - - return e.footer() -} - -func (e *Encoder) head(numEntries int) error { - return binary.Write( - e.w, - signature, - int32(VersionSupported), - int32(numEntries), - ) -} - -func (e *Encoder) entry(o *ObjectToPack) (err error) { - if o.WantWrite() { - // A cycle exists in this delta chain. This should only occur if a - // selected object representation disappeared during writing - // (for example due to a concurrent repack) and a different base - // was chosen, forcing a cycle. Select something other than a - // delta, and write this object. - e.selector.restoreOriginal(o) - o.BackToOriginal() - } - - if o.IsWritten() { - return nil - } - - o.MarkWantWrite() - - if err := e.writeBaseIfDelta(o); err != nil { - return err - } - - // We need to check if we already write that object due a cyclic delta chain - if o.IsWritten() { - return nil - } - - o.Offset = e.w.Offset() - - if o.IsDelta() { - if err := e.writeDeltaHeader(o); err != nil { - return err - } - } else { - if err := e.entryHead(o.Type(), o.Size()); err != nil { - return err - } - } - - e.zw.Reset(e.w) - - defer ioutil.CheckClose(e.zw, &err) - - or, err := o.Object.Reader() - if err != nil { - return err - } - - defer ioutil.CheckClose(or, &err) - - _, err = io.Copy(e.zw, or) - if err != nil { - return err - } - - return nil -} - -func (e *Encoder) writeBaseIfDelta(o *ObjectToPack) error { - if o.IsDelta() && !o.Base.IsWritten() { - // We must write base first - return e.entry(o.Base) - } - - return nil -} - -func (e *Encoder) writeDeltaHeader(o *ObjectToPack) error { - // Write offset deltas by default - t := plumbing.OFSDeltaObject - if e.useRefDeltas { - t = plumbing.REFDeltaObject - } - - if err := e.entryHead(t, o.Object.Size()); err != nil { - return err - } - - if e.useRefDeltas { - return e.writeRefDeltaHeader(o.Base.Hash()) - } else { - return e.writeOfsDeltaHeader(o) - } -} - -func (e *Encoder) writeRefDeltaHeader(base plumbing.Hash) error { - return binary.Write(e.w, base) -} - -func (e *Encoder) writeOfsDeltaHeader(o *ObjectToPack) error { - // for OFS_DELTA, offset of the base is interpreted as negative offset - // relative to the type-byte of the header of the ofs-delta entry. - relativeOffset := o.Offset - o.Base.Offset - if relativeOffset <= 0 { - return fmt.Errorf("bad offset for OFS_DELTA entry: %d", relativeOffset) - } - - return binary.WriteVariableWidthInt(e.w, relativeOffset) -} - -func (e *Encoder) entryHead(typeNum plumbing.ObjectType, size int64) error { - t := int64(typeNum) - header := []byte{} - c := (t << firstLengthBits) | (size & maskFirstLength) - size >>= firstLengthBits - for { - if size == 0 { - break - } - header = append(header, byte(c|maskContinue)) - c = size & int64(maskLength) - size >>= lengthBits - } - - header = append(header, byte(c)) - _, err := e.w.Write(header) - - return err -} - -func (e *Encoder) footer() (plumbing.Hash, error) { - h := e.hasher.Sum() - return h, binary.Write(e.w, h) -} - -type offsetWriter struct { - w io.Writer - offset int64 -} - -func newOffsetWriter(w io.Writer) *offsetWriter { - return &offsetWriter{w: w} -} - -func (ow *offsetWriter) Write(p []byte) (n int, err error) { - n, err = ow.w.Write(p) - ow.offset += int64(n) - return n, err -} - -func (ow *offsetWriter) Offset() int64 { - return ow.offset -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/error.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/error.go deleted file mode 100644 index c0b91633131..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/error.go +++ /dev/null @@ -1,30 +0,0 @@ -package packfile - -import "fmt" - -// Error specifies errors returned during packfile parsing. -type Error struct { - reason, details string -} - -// NewError returns a new error. -func NewError(reason string) *Error { - return &Error{reason: reason} -} - -// Error returns a text representation of the error. -func (e *Error) Error() string { - if e.details == "" { - return e.reason - } - - return fmt.Sprintf("%s: %s", e.reason, e.details) -} - -// AddDetails adds details to an error, with additional text. -func (e *Error) AddDetails(format string, args ...interface{}) *Error { - return &Error{ - reason: e.reason, - details: fmt.Sprintf(format, args...), - } -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/fsobject.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/fsobject.go deleted file mode 100644 index c5edaf52ee3..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/fsobject.go +++ /dev/null @@ -1,116 +0,0 @@ -package packfile - -import ( - "io" - - billy "github.com/go-git/go-billy/v5" - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/cache" - "github.com/go-git/go-git/v5/plumbing/format/idxfile" -) - -// FSObject is an object from the packfile on the filesystem. -type FSObject struct { - hash plumbing.Hash - h *ObjectHeader - offset int64 - size int64 - typ plumbing.ObjectType - index idxfile.Index - fs billy.Filesystem - path string - cache cache.Object -} - -// NewFSObject creates a new filesystem object. -func NewFSObject( - hash plumbing.Hash, - finalType plumbing.ObjectType, - offset int64, - contentSize int64, - index idxfile.Index, - fs billy.Filesystem, - path string, - cache cache.Object, -) *FSObject { - return &FSObject{ - hash: hash, - offset: offset, - size: contentSize, - typ: finalType, - index: index, - fs: fs, - path: path, - cache: cache, - } -} - -// Reader implements the plumbing.EncodedObject interface. -func (o *FSObject) Reader() (io.ReadCloser, error) { - obj, ok := o.cache.Get(o.hash) - if ok && obj != o { - reader, err := obj.Reader() - if err != nil { - return nil, err - } - - return reader, nil - } - - f, err := o.fs.Open(o.path) - if err != nil { - return nil, err - } - - p := NewPackfileWithCache(o.index, nil, f, o.cache) - r, err := p.getObjectContent(o.offset) - if err != nil { - _ = f.Close() - return nil, err - } - - if err := f.Close(); err != nil { - return nil, err - } - - return r, nil -} - -// SetSize implements the plumbing.EncodedObject interface. This method -// is a noop. -func (o *FSObject) SetSize(int64) {} - -// SetType implements the plumbing.EncodedObject interface. This method is -// a noop. -func (o *FSObject) SetType(plumbing.ObjectType) {} - -// Hash implements the plumbing.EncodedObject interface. -func (o *FSObject) Hash() plumbing.Hash { return o.hash } - -// Size implements the plumbing.EncodedObject interface. -func (o *FSObject) Size() int64 { return o.size } - -// Type implements the plumbing.EncodedObject interface. -func (o *FSObject) Type() plumbing.ObjectType { - return o.typ -} - -// Writer implements the plumbing.EncodedObject interface. This method always -// returns a nil writer. -func (o *FSObject) Writer() (io.WriteCloser, error) { - return nil, nil -} - -type objectReader struct { - io.ReadCloser - f billy.File -} - -func (r *objectReader) Close() error { - if err := r.ReadCloser.Close(); err != nil { - _ = r.f.Close() - return err - } - - return r.f.Close() -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/object_pack.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/object_pack.go deleted file mode 100644 index 8ce29ef8ba0..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/object_pack.go +++ /dev/null @@ -1,164 +0,0 @@ -package packfile - -import ( - "github.com/go-git/go-git/v5/plumbing" -) - -// ObjectToPack is a representation of an object that is going to be into a -// pack file. -type ObjectToPack struct { - // The main object to pack, it could be any object, including deltas - Object plumbing.EncodedObject - // Base is the object that a delta is based on (it could be also another delta). - // If the main object is not a delta, Base will be null - Base *ObjectToPack - // Original is the object that we can generate applying the delta to - // Base, or the same object as Object in the case of a non-delta - // object. - Original plumbing.EncodedObject - // Depth is the amount of deltas needed to resolve to obtain Original - // (delta based on delta based on ...) - Depth int - - // offset in pack when object has been already written, or 0 if it - // has not been written yet - Offset int64 - - // Information from the original object - resolvedOriginal bool - originalType plumbing.ObjectType - originalSize int64 - originalHash plumbing.Hash -} - -// newObjectToPack creates a correct ObjectToPack based on a non-delta object -func newObjectToPack(o plumbing.EncodedObject) *ObjectToPack { - return &ObjectToPack{ - Object: o, - Original: o, - } -} - -// newDeltaObjectToPack creates a correct ObjectToPack for a delta object, based on -// his base (could be another delta), the delta target (in this case called original), -// and the delta Object itself -func newDeltaObjectToPack(base *ObjectToPack, original, delta plumbing.EncodedObject) *ObjectToPack { - return &ObjectToPack{ - Object: delta, - Base: base, - Original: original, - Depth: base.Depth + 1, - } -} - -// BackToOriginal converts that ObjectToPack to a non-deltified object if it was one -func (o *ObjectToPack) BackToOriginal() { - if o.IsDelta() && o.Original != nil { - o.Object = o.Original - o.Base = nil - o.Depth = 0 - } -} - -// IsWritten returns if that ObjectToPack was -// already written into the packfile or not -func (o *ObjectToPack) IsWritten() bool { - return o.Offset > 1 -} - -// MarkWantWrite marks this ObjectToPack as WantWrite -// to avoid delta chain loops -func (o *ObjectToPack) MarkWantWrite() { - o.Offset = 1 -} - -// WantWrite checks if this ObjectToPack was marked as WantWrite before -func (o *ObjectToPack) WantWrite() bool { - return o.Offset == 1 -} - -// SetOriginal sets both Original and saves size, type and hash. If object -// is nil Original is set but previous resolved values are kept -func (o *ObjectToPack) SetOriginal(obj plumbing.EncodedObject) { - o.Original = obj - o.SaveOriginalMetadata() -} - -// SaveOriginalMetadata saves size, type and hash of Original object -func (o *ObjectToPack) SaveOriginalMetadata() { - if o.Original != nil { - o.originalSize = o.Original.Size() - o.originalType = o.Original.Type() - o.originalHash = o.Original.Hash() - o.resolvedOriginal = true - } -} - -// CleanOriginal sets Original to nil -func (o *ObjectToPack) CleanOriginal() { - o.Original = nil -} - -func (o *ObjectToPack) Type() plumbing.ObjectType { - if o.Original != nil { - return o.Original.Type() - } - - if o.resolvedOriginal { - return o.originalType - } - - if o.Base != nil { - return o.Base.Type() - } - - if o.Object != nil { - return o.Object.Type() - } - - panic("cannot get type") -} - -func (o *ObjectToPack) Hash() plumbing.Hash { - if o.Original != nil { - return o.Original.Hash() - } - - if o.resolvedOriginal { - return o.originalHash - } - - do, ok := o.Object.(plumbing.DeltaObject) - if ok { - return do.ActualHash() - } - - panic("cannot get hash") -} - -func (o *ObjectToPack) Size() int64 { - if o.Original != nil { - return o.Original.Size() - } - - if o.resolvedOriginal { - return o.originalSize - } - - do, ok := o.Object.(plumbing.DeltaObject) - if ok { - return do.ActualSize() - } - - panic("cannot get ObjectToPack size") -} - -func (o *ObjectToPack) IsDelta() bool { - return o.Base != nil -} - -func (o *ObjectToPack) SetDelta(base *ObjectToPack, delta plumbing.EncodedObject) { - o.Object = delta - o.Base = base - o.Depth = base.Depth + 1 -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/packfile.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/packfile.go deleted file mode 100644 index ddd7f62fce4..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/packfile.go +++ /dev/null @@ -1,565 +0,0 @@ -package packfile - -import ( - "bytes" - "io" - "os" - - billy "github.com/go-git/go-billy/v5" - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/cache" - "github.com/go-git/go-git/v5/plumbing/format/idxfile" - "github.com/go-git/go-git/v5/plumbing/storer" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -var ( - // ErrInvalidObject is returned by Decode when an invalid object is - // found in the packfile. - ErrInvalidObject = NewError("invalid git object") - // ErrZLib is returned by Decode when there was an error unzipping - // the packfile contents. - ErrZLib = NewError("zlib reading error") -) - -// When reading small objects from packfile it is beneficial to do so at -// once to exploit the buffered I/O. In many cases the objects are so small -// that they were already loaded to memory when the object header was -// loaded from the packfile. Wrapping in FSObject would cause this buffered -// data to be thrown away and then re-read later, with the additional -// seeking causing reloads from disk. Objects smaller than this threshold -// are now always read into memory and stored in cache instead of being -// wrapped in FSObject. -const smallObjectThreshold = 16 * 1024 - -// Packfile allows retrieving information from inside a packfile. -type Packfile struct { - idxfile.Index - fs billy.Filesystem - file billy.File - s *Scanner - deltaBaseCache cache.Object - offsetToType map[int64]plumbing.ObjectType -} - -// NewPackfileWithCache creates a new Packfile with the given object cache. -// If the filesystem is provided, the packfile will return FSObjects, otherwise -// it will return MemoryObjects. -func NewPackfileWithCache( - index idxfile.Index, - fs billy.Filesystem, - file billy.File, - cache cache.Object, -) *Packfile { - s := NewScanner(file) - return &Packfile{ - index, - fs, - file, - s, - cache, - make(map[int64]plumbing.ObjectType), - } -} - -// NewPackfile returns a packfile representation for the given packfile file -// and packfile idx. -// If the filesystem is provided, the packfile will return FSObjects, otherwise -// it will return MemoryObjects. -func NewPackfile(index idxfile.Index, fs billy.Filesystem, file billy.File) *Packfile { - return NewPackfileWithCache(index, fs, file, cache.NewObjectLRUDefault()) -} - -// Get retrieves the encoded object in the packfile with the given hash. -func (p *Packfile) Get(h plumbing.Hash) (plumbing.EncodedObject, error) { - offset, err := p.FindOffset(h) - if err != nil { - return nil, err - } - - return p.objectAtOffset(offset, h) -} - -// GetByOffset retrieves the encoded object from the packfile at the given -// offset. -func (p *Packfile) GetByOffset(o int64) (plumbing.EncodedObject, error) { - hash, err := p.FindHash(o) - if err != nil { - return nil, err - } - - return p.objectAtOffset(o, hash) -} - -// GetSizeByOffset retrieves the size of the encoded object from the -// packfile with the given offset. -func (p *Packfile) GetSizeByOffset(o int64) (size int64, err error) { - if _, err := p.s.SeekFromStart(o); err != nil { - if err == io.EOF || isInvalid(err) { - return 0, plumbing.ErrObjectNotFound - } - - return 0, err - } - - h, err := p.nextObjectHeader() - if err != nil { - return 0, err - } - return p.getObjectSize(h) -} - -func (p *Packfile) objectHeaderAtOffset(offset int64) (*ObjectHeader, error) { - h, err := p.s.SeekObjectHeader(offset) - p.s.pendingObject = nil - return h, err -} - -func (p *Packfile) nextObjectHeader() (*ObjectHeader, error) { - h, err := p.s.NextObjectHeader() - p.s.pendingObject = nil - return h, err -} - -func (p *Packfile) getDeltaObjectSize(buf *bytes.Buffer) int64 { - delta := buf.Bytes() - _, delta = decodeLEB128(delta) // skip src size - sz, _ := decodeLEB128(delta) - return int64(sz) -} - -func (p *Packfile) getObjectSize(h *ObjectHeader) (int64, error) { - switch h.Type { - case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject: - return h.Length, nil - case plumbing.REFDeltaObject, plumbing.OFSDeltaObject: - buf := bufPool.Get().(*bytes.Buffer) - defer bufPool.Put(buf) - buf.Reset() - - if _, _, err := p.s.NextObject(buf); err != nil { - return 0, err - } - - return p.getDeltaObjectSize(buf), nil - default: - return 0, ErrInvalidObject.AddDetails("type %q", h.Type) - } -} - -func (p *Packfile) getObjectType(h *ObjectHeader) (typ plumbing.ObjectType, err error) { - switch h.Type { - case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject: - return h.Type, nil - case plumbing.REFDeltaObject, plumbing.OFSDeltaObject: - var offset int64 - if h.Type == plumbing.REFDeltaObject { - offset, err = p.FindOffset(h.Reference) - if err != nil { - return - } - } else { - offset = h.OffsetReference - } - - if baseType, ok := p.offsetToType[offset]; ok { - typ = baseType - } else { - h, err = p.objectHeaderAtOffset(offset) - if err != nil { - return - } - - typ, err = p.getObjectType(h) - if err != nil { - return - } - } - default: - err = ErrInvalidObject.AddDetails("type %q", h.Type) - } - - p.offsetToType[h.Offset] = typ - - return -} - -func (p *Packfile) objectAtOffset(offset int64, hash plumbing.Hash) (plumbing.EncodedObject, error) { - if obj, ok := p.cacheGet(hash); ok { - return obj, nil - } - - h, err := p.objectHeaderAtOffset(offset) - if err != nil { - if err == io.EOF || isInvalid(err) { - return nil, plumbing.ErrObjectNotFound - } - return nil, err - } - - return p.getNextObject(h, hash) -} - -func (p *Packfile) getNextObject(h *ObjectHeader, hash plumbing.Hash) (plumbing.EncodedObject, error) { - var err error - - // If we have no filesystem, we will return a MemoryObject instead - // of an FSObject. - if p.fs == nil { - return p.getNextMemoryObject(h) - } - - // If the object is small enough then read it completely into memory now since - // it is already read from disk into buffer anyway. For delta objects we want - // to perform the optimization too, but we have to be careful about applying - // small deltas on big objects. - var size int64 - if h.Length <= smallObjectThreshold { - if h.Type != plumbing.OFSDeltaObject && h.Type != plumbing.REFDeltaObject { - return p.getNextMemoryObject(h) - } - - // For delta objects we read the delta data and apply the small object - // optimization only if the expanded version of the object still meets - // the small object threshold condition. - buf := bufPool.Get().(*bytes.Buffer) - defer bufPool.Put(buf) - buf.Reset() - if _, _, err := p.s.NextObject(buf); err != nil { - return nil, err - } - - size = p.getDeltaObjectSize(buf) - if size <= smallObjectThreshold { - var obj = new(plumbing.MemoryObject) - obj.SetSize(size) - if h.Type == plumbing.REFDeltaObject { - err = p.fillREFDeltaObjectContentWithBuffer(obj, h.Reference, buf) - } else { - err = p.fillOFSDeltaObjectContentWithBuffer(obj, h.OffsetReference, buf) - } - return obj, err - } - } else { - size, err = p.getObjectSize(h) - if err != nil { - return nil, err - } - } - - typ, err := p.getObjectType(h) - if err != nil { - return nil, err - } - - p.offsetToType[h.Offset] = typ - - return NewFSObject( - hash, - typ, - h.Offset, - size, - p.Index, - p.fs, - p.file.Name(), - p.deltaBaseCache, - ), nil -} - -func (p *Packfile) getObjectContent(offset int64) (io.ReadCloser, error) { - h, err := p.objectHeaderAtOffset(offset) - if err != nil { - return nil, err - } - - // getObjectContent is called from FSObject, so we have to explicitly - // get memory object here to avoid recursive cycle - obj, err := p.getNextMemoryObject(h) - if err != nil { - return nil, err - } - - return obj.Reader() -} - -func (p *Packfile) getNextMemoryObject(h *ObjectHeader) (plumbing.EncodedObject, error) { - var obj = new(plumbing.MemoryObject) - obj.SetSize(h.Length) - obj.SetType(h.Type) - - var err error - switch h.Type { - case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject: - err = p.fillRegularObjectContent(obj) - case plumbing.REFDeltaObject: - err = p.fillREFDeltaObjectContent(obj, h.Reference) - case plumbing.OFSDeltaObject: - err = p.fillOFSDeltaObjectContent(obj, h.OffsetReference) - default: - err = ErrInvalidObject.AddDetails("type %q", h.Type) - } - - if err != nil { - return nil, err - } - - p.offsetToType[h.Offset] = obj.Type() - - return obj, nil -} - -func (p *Packfile) fillRegularObjectContent(obj plumbing.EncodedObject) (err error) { - w, err := obj.Writer() - if err != nil { - return err - } - - defer ioutil.CheckClose(w, &err) - - _, _, err = p.s.NextObject(w) - p.cachePut(obj) - - return err -} - -func (p *Packfile) fillREFDeltaObjectContent(obj plumbing.EncodedObject, ref plumbing.Hash) error { - buf := bufPool.Get().(*bytes.Buffer) - defer bufPool.Put(buf) - buf.Reset() - _, _, err := p.s.NextObject(buf) - if err != nil { - return err - } - - return p.fillREFDeltaObjectContentWithBuffer(obj, ref, buf) -} - -func (p *Packfile) fillREFDeltaObjectContentWithBuffer(obj plumbing.EncodedObject, ref plumbing.Hash, buf *bytes.Buffer) error { - var err error - - base, ok := p.cacheGet(ref) - if !ok { - base, err = p.Get(ref) - if err != nil { - return err - } - } - - obj.SetType(base.Type()) - err = ApplyDelta(obj, base, buf.Bytes()) - p.cachePut(obj) - - return err -} - -func (p *Packfile) fillOFSDeltaObjectContent(obj plumbing.EncodedObject, offset int64) error { - buf := bufPool.Get().(*bytes.Buffer) - defer bufPool.Put(buf) - buf.Reset() - _, _, err := p.s.NextObject(buf) - if err != nil { - return err - } - - return p.fillOFSDeltaObjectContentWithBuffer(obj, offset, buf) -} - -func (p *Packfile) fillOFSDeltaObjectContentWithBuffer(obj plumbing.EncodedObject, offset int64, buf *bytes.Buffer) error { - hash, err := p.FindHash(offset) - if err != nil { - return err - } - - base, err := p.objectAtOffset(offset, hash) - if err != nil { - return err - } - - obj.SetType(base.Type()) - err = ApplyDelta(obj, base, buf.Bytes()) - p.cachePut(obj) - - return err -} - -func (p *Packfile) cacheGet(h plumbing.Hash) (plumbing.EncodedObject, bool) { - if p.deltaBaseCache == nil { - return nil, false - } - - return p.deltaBaseCache.Get(h) -} - -func (p *Packfile) cachePut(obj plumbing.EncodedObject) { - if p.deltaBaseCache == nil { - return - } - - p.deltaBaseCache.Put(obj) -} - -// GetAll returns an iterator with all encoded objects in the packfile. -// The iterator returned is not thread-safe, it should be used in the same -// thread as the Packfile instance. -func (p *Packfile) GetAll() (storer.EncodedObjectIter, error) { - return p.GetByType(plumbing.AnyObject) -} - -// GetByType returns all the objects of the given type. -func (p *Packfile) GetByType(typ plumbing.ObjectType) (storer.EncodedObjectIter, error) { - switch typ { - case plumbing.AnyObject, - plumbing.BlobObject, - plumbing.TreeObject, - plumbing.CommitObject, - plumbing.TagObject: - entries, err := p.EntriesByOffset() - if err != nil { - return nil, err - } - - return &objectIter{ - // Easiest way to provide an object decoder is just to pass a Packfile - // instance. To not mess with the seeks, it's a new instance with a - // different scanner but the same cache and offset to hash map for - // reusing as much cache as possible. - p: p, - iter: entries, - typ: typ, - }, nil - default: - return nil, plumbing.ErrInvalidType - } -} - -// ID returns the ID of the packfile, which is the checksum at the end of it. -func (p *Packfile) ID() (plumbing.Hash, error) { - prev, err := p.file.Seek(-20, io.SeekEnd) - if err != nil { - return plumbing.ZeroHash, err - } - - var hash plumbing.Hash - if _, err := io.ReadFull(p.file, hash[:]); err != nil { - return plumbing.ZeroHash, err - } - - if _, err := p.file.Seek(prev, io.SeekStart); err != nil { - return plumbing.ZeroHash, err - } - - return hash, nil -} - -// Scanner returns the packfile's Scanner -func (p *Packfile) Scanner() *Scanner { - return p.s -} - -// Close the packfile and its resources. -func (p *Packfile) Close() error { - closer, ok := p.file.(io.Closer) - if !ok { - return nil - } - - return closer.Close() -} - -type objectIter struct { - p *Packfile - typ plumbing.ObjectType - iter idxfile.EntryIter -} - -func (i *objectIter) Next() (plumbing.EncodedObject, error) { - for { - e, err := i.iter.Next() - if err != nil { - return nil, err - } - - if i.typ != plumbing.AnyObject { - if typ, ok := i.p.offsetToType[int64(e.Offset)]; ok { - if typ != i.typ { - continue - } - } else if obj, ok := i.p.cacheGet(e.Hash); ok { - if obj.Type() != i.typ { - i.p.offsetToType[int64(e.Offset)] = obj.Type() - continue - } - return obj, nil - } else { - h, err := i.p.objectHeaderAtOffset(int64(e.Offset)) - if err != nil { - return nil, err - } - - if h.Type == plumbing.REFDeltaObject || h.Type == plumbing.OFSDeltaObject { - typ, err := i.p.getObjectType(h) - if err != nil { - return nil, err - } - if typ != i.typ { - i.p.offsetToType[int64(e.Offset)] = typ - continue - } - // getObjectType will seek in the file so we cannot use getNextObject safely - return i.p.objectAtOffset(int64(e.Offset), e.Hash) - } else { - if h.Type != i.typ { - i.p.offsetToType[int64(e.Offset)] = h.Type - continue - } - return i.p.getNextObject(h, e.Hash) - } - } - } - - obj, err := i.p.objectAtOffset(int64(e.Offset), e.Hash) - if err != nil { - return nil, err - } - - return obj, nil - } -} - -func (i *objectIter) ForEach(f func(plumbing.EncodedObject) error) error { - for { - o, err := i.Next() - if err != nil { - if err == io.EOF { - return nil - } - return err - } - - if err := f(o); err != nil { - return err - } - } -} - -func (i *objectIter) Close() { - i.iter.Close() -} - -// isInvalid checks whether an error is an os.PathError with an os.ErrInvalid -// error inside. It also checks for the windows error, which is different from -// os.ErrInvalid. -func isInvalid(err error) bool { - pe, ok := err.(*os.PathError) - if !ok { - return false - } - - errstr := pe.Err.Error() - return errstr == errInvalidUnix || errstr == errInvalidWindows -} - -// errInvalidWindows is the Windows equivalent to os.ErrInvalid -const errInvalidWindows = "The parameter is incorrect." - -var errInvalidUnix = os.ErrInvalid.Error() diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/parser.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/parser.go deleted file mode 100644 index 4b5a5708cc2..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/parser.go +++ /dev/null @@ -1,495 +0,0 @@ -package packfile - -import ( - "bytes" - "errors" - "io" - stdioutil "io/ioutil" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/cache" - "github.com/go-git/go-git/v5/plumbing/storer" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -var ( - // ErrReferenceDeltaNotFound is returned when the reference delta is not - // found. - ErrReferenceDeltaNotFound = errors.New("reference delta not found") - - // ErrNotSeekableSource is returned when the source for the parser is not - // seekable and a storage was not provided, so it can't be parsed. - ErrNotSeekableSource = errors.New("parser source is not seekable and storage was not provided") - - // ErrDeltaNotCached is returned when the delta could not be found in cache. - ErrDeltaNotCached = errors.New("delta could not be found in cache") -) - -// Observer interface is implemented by index encoders. -type Observer interface { - // OnHeader is called when a new packfile is opened. - OnHeader(count uint32) error - // OnInflatedObjectHeader is called for each object header read. - OnInflatedObjectHeader(t plumbing.ObjectType, objSize int64, pos int64) error - // OnInflatedObjectContent is called for each decoded object. - OnInflatedObjectContent(h plumbing.Hash, pos int64, crc uint32, content []byte) error - // OnFooter is called when decoding is done. - OnFooter(h plumbing.Hash) error -} - -// Parser decodes a packfile and calls any observer associated to it. Is used -// to generate indexes. -type Parser struct { - storage storer.EncodedObjectStorer - scanner *Scanner - count uint32 - oi []*objectInfo - oiByHash map[plumbing.Hash]*objectInfo - oiByOffset map[int64]*objectInfo - hashOffset map[plumbing.Hash]int64 - checksum plumbing.Hash - - cache *cache.BufferLRU - // delta content by offset, only used if source is not seekable - deltas map[int64][]byte - - ob []Observer -} - -// NewParser creates a new Parser. The Scanner source must be seekable. -// If it's not, NewParserWithStorage should be used instead. -func NewParser(scanner *Scanner, ob ...Observer) (*Parser, error) { - return NewParserWithStorage(scanner, nil, ob...) -} - -// NewParserWithStorage creates a new Parser. The scanner source must either -// be seekable or a storage must be provided. -func NewParserWithStorage( - scanner *Scanner, - storage storer.EncodedObjectStorer, - ob ...Observer, -) (*Parser, error) { - if !scanner.IsSeekable && storage == nil { - return nil, ErrNotSeekableSource - } - - var deltas map[int64][]byte - if !scanner.IsSeekable { - deltas = make(map[int64][]byte) - } - - return &Parser{ - storage: storage, - scanner: scanner, - ob: ob, - count: 0, - cache: cache.NewBufferLRUDefault(), - deltas: deltas, - }, nil -} - -func (p *Parser) forEachObserver(f func(o Observer) error) error { - for _, o := range p.ob { - if err := f(o); err != nil { - return err - } - } - return nil -} - -func (p *Parser) onHeader(count uint32) error { - return p.forEachObserver(func(o Observer) error { - return o.OnHeader(count) - }) -} - -func (p *Parser) onInflatedObjectHeader( - t plumbing.ObjectType, - objSize int64, - pos int64, -) error { - return p.forEachObserver(func(o Observer) error { - return o.OnInflatedObjectHeader(t, objSize, pos) - }) -} - -func (p *Parser) onInflatedObjectContent( - h plumbing.Hash, - pos int64, - crc uint32, - content []byte, -) error { - return p.forEachObserver(func(o Observer) error { - return o.OnInflatedObjectContent(h, pos, crc, content) - }) -} - -func (p *Parser) onFooter(h plumbing.Hash) error { - return p.forEachObserver(func(o Observer) error { - return o.OnFooter(h) - }) -} - -// Parse start decoding phase of the packfile. -func (p *Parser) Parse() (plumbing.Hash, error) { - if err := p.init(); err != nil { - return plumbing.ZeroHash, err - } - - if err := p.indexObjects(); err != nil { - return plumbing.ZeroHash, err - } - - var err error - p.checksum, err = p.scanner.Checksum() - if err != nil && err != io.EOF { - return plumbing.ZeroHash, err - } - - if err := p.resolveDeltas(); err != nil { - return plumbing.ZeroHash, err - } - - if err := p.onFooter(p.checksum); err != nil { - return plumbing.ZeroHash, err - } - - return p.checksum, nil -} - -func (p *Parser) init() error { - _, c, err := p.scanner.Header() - if err != nil { - return err - } - - if err := p.onHeader(c); err != nil { - return err - } - - p.count = c - p.oiByHash = make(map[plumbing.Hash]*objectInfo, p.count) - p.oiByOffset = make(map[int64]*objectInfo, p.count) - p.oi = make([]*objectInfo, p.count) - - return nil -} - -func (p *Parser) indexObjects() error { - buf := new(bytes.Buffer) - - for i := uint32(0); i < p.count; i++ { - buf.Reset() - - oh, err := p.scanner.NextObjectHeader() - if err != nil { - return err - } - - delta := false - var ota *objectInfo - switch t := oh.Type; t { - case plumbing.OFSDeltaObject: - delta = true - - parent, ok := p.oiByOffset[oh.OffsetReference] - if !ok { - return plumbing.ErrObjectNotFound - } - - ota = newDeltaObject(oh.Offset, oh.Length, t, parent) - parent.Children = append(parent.Children, ota) - case plumbing.REFDeltaObject: - delta = true - parent, ok := p.oiByHash[oh.Reference] - if !ok { - // can't find referenced object in this pack file - // this must be a "thin" pack. - parent = &objectInfo{ //Placeholder parent - SHA1: oh.Reference, - ExternalRef: true, // mark as an external reference that must be resolved - Type: plumbing.AnyObject, - DiskType: plumbing.AnyObject, - } - p.oiByHash[oh.Reference] = parent - } - ota = newDeltaObject(oh.Offset, oh.Length, t, parent) - parent.Children = append(parent.Children, ota) - - default: - ota = newBaseObject(oh.Offset, oh.Length, t) - } - - _, crc, err := p.scanner.NextObject(buf) - if err != nil { - return err - } - - ota.Crc32 = crc - ota.Length = oh.Length - - data := buf.Bytes() - if !delta { - sha1, err := getSHA1(ota.Type, data) - if err != nil { - return err - } - - ota.SHA1 = sha1 - p.oiByHash[ota.SHA1] = ota - } - - if p.storage != nil && !delta { - obj := new(plumbing.MemoryObject) - obj.SetSize(oh.Length) - obj.SetType(oh.Type) - if _, err := obj.Write(data); err != nil { - return err - } - - if _, err := p.storage.SetEncodedObject(obj); err != nil { - return err - } - } - - if delta && !p.scanner.IsSeekable { - p.deltas[oh.Offset] = make([]byte, len(data)) - copy(p.deltas[oh.Offset], data) - } - - p.oiByOffset[oh.Offset] = ota - p.oi[i] = ota - } - - return nil -} - -func (p *Parser) resolveDeltas() error { - buf := &bytes.Buffer{} - for _, obj := range p.oi { - buf.Reset() - err := p.get(obj, buf) - if err != nil { - return err - } - content := buf.Bytes() - - if err := p.onInflatedObjectHeader(obj.Type, obj.Length, obj.Offset); err != nil { - return err - } - - if err := p.onInflatedObjectContent(obj.SHA1, obj.Offset, obj.Crc32, content); err != nil { - return err - } - - if !obj.IsDelta() && len(obj.Children) > 0 { - for _, child := range obj.Children { - if err := p.resolveObject(stdioutil.Discard, child, content); err != nil { - return err - } - } - - // Remove the delta from the cache. - if obj.DiskType.IsDelta() && !p.scanner.IsSeekable { - delete(p.deltas, obj.Offset) - } - } - } - - return nil -} - -func (p *Parser) get(o *objectInfo, buf *bytes.Buffer) (err error) { - if !o.ExternalRef { // skip cache check for placeholder parents - b, ok := p.cache.Get(o.Offset) - if ok { - _, err := buf.Write(b) - return err - } - } - - // If it's not on the cache and is not a delta we can try to find it in the - // storage, if there's one. External refs must enter here. - if p.storage != nil && !o.Type.IsDelta() { - var e plumbing.EncodedObject - e, err = p.storage.EncodedObject(plumbing.AnyObject, o.SHA1) - if err != nil { - return err - } - o.Type = e.Type() - - var r io.ReadCloser - r, err = e.Reader() - if err != nil { - return err - } - - defer ioutil.CheckClose(r, &err) - - _, err = buf.ReadFrom(io.LimitReader(r, e.Size())) - return err - } - - if o.ExternalRef { - // we were not able to resolve a ref in a thin pack - return ErrReferenceDeltaNotFound - } - - if o.DiskType.IsDelta() { - b := bufPool.Get().(*bytes.Buffer) - defer bufPool.Put(b) - b.Reset() - err := p.get(o.Parent, b) - if err != nil { - return err - } - base := b.Bytes() - - err = p.resolveObject(buf, o, base) - if err != nil { - return err - } - } else { - err := p.readData(buf, o) - if err != nil { - return err - } - } - - if len(o.Children) > 0 { - data := make([]byte, buf.Len()) - copy(data, buf.Bytes()) - p.cache.Put(o.Offset, data) - } - return nil -} - -func (p *Parser) resolveObject( - w io.Writer, - o *objectInfo, - base []byte, -) error { - if !o.DiskType.IsDelta() { - return nil - } - buf := bufPool.Get().(*bytes.Buffer) - defer bufPool.Put(buf) - buf.Reset() - err := p.readData(buf, o) - if err != nil { - return err - } - data := buf.Bytes() - - data, err = applyPatchBase(o, data, base) - if err != nil { - return err - } - - if p.storage != nil { - obj := new(plumbing.MemoryObject) - obj.SetSize(o.Size()) - obj.SetType(o.Type) - if _, err := obj.Write(data); err != nil { - return err - } - - if _, err := p.storage.SetEncodedObject(obj); err != nil { - return err - } - } - _, err = w.Write(data) - return err -} - -func (p *Parser) readData(w io.Writer, o *objectInfo) error { - if !p.scanner.IsSeekable && o.DiskType.IsDelta() { - data, ok := p.deltas[o.Offset] - if !ok { - return ErrDeltaNotCached - } - _, err := w.Write(data) - return err - } - - if _, err := p.scanner.SeekObjectHeader(o.Offset); err != nil { - return err - } - - if _, _, err := p.scanner.NextObject(w); err != nil { - return err - } - return nil -} - -func applyPatchBase(ota *objectInfo, data, base []byte) ([]byte, error) { - patched, err := PatchDelta(base, data) - if err != nil { - return nil, err - } - - if ota.SHA1 == plumbing.ZeroHash { - ota.Type = ota.Parent.Type - sha1, err := getSHA1(ota.Type, patched) - if err != nil { - return nil, err - } - - ota.SHA1 = sha1 - ota.Length = int64(len(patched)) - } - - return patched, nil -} - -func getSHA1(t plumbing.ObjectType, data []byte) (plumbing.Hash, error) { - hasher := plumbing.NewHasher(t, int64(len(data))) - if _, err := hasher.Write(data); err != nil { - return plumbing.ZeroHash, err - } - - return hasher.Sum(), nil -} - -type objectInfo struct { - Offset int64 - Length int64 - Type plumbing.ObjectType - DiskType plumbing.ObjectType - ExternalRef bool // indicates this is an external reference in a thin pack file - - Crc32 uint32 - - Parent *objectInfo - Children []*objectInfo - SHA1 plumbing.Hash -} - -func newBaseObject(offset, length int64, t plumbing.ObjectType) *objectInfo { - return newDeltaObject(offset, length, t, nil) -} - -func newDeltaObject( - offset, length int64, - t plumbing.ObjectType, - parent *objectInfo, -) *objectInfo { - obj := &objectInfo{ - Offset: offset, - Length: length, - Type: t, - DiskType: t, - Crc32: 0, - Parent: parent, - } - - return obj -} - -func (o *objectInfo) IsDelta() bool { - return o.Type.IsDelta() -} - -func (o *objectInfo) Size() int64 { - return o.Length -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/patch_delta.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/patch_delta.go deleted file mode 100644 index 9e90f30a72f..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/patch_delta.go +++ /dev/null @@ -1,252 +0,0 @@ -package packfile - -import ( - "bytes" - "errors" - "io" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -// See https://github.com/git/git/blob/49fa3dc76179e04b0833542fa52d0f287a4955ac/delta.h -// https://github.com/git/git/blob/c2c5f6b1e479f2c38e0e01345350620944e3527f/patch-delta.c, -// and https://github.com/tarruda/node-git-core/blob/master/src/js/delta.js -// for details about the delta format. - -const deltaSizeMin = 4 - -// ApplyDelta writes to target the result of applying the modification deltas in delta to base. -func ApplyDelta(target, base plumbing.EncodedObject, delta []byte) (err error) { - r, err := base.Reader() - if err != nil { - return err - } - - defer ioutil.CheckClose(r, &err) - - w, err := target.Writer() - if err != nil { - return err - } - - defer ioutil.CheckClose(w, &err) - - buf := bufPool.Get().(*bytes.Buffer) - defer bufPool.Put(buf) - buf.Reset() - _, err = buf.ReadFrom(r) - if err != nil { - return err - } - src := buf.Bytes() - - dst := bufPool.Get().(*bytes.Buffer) - defer bufPool.Put(dst) - dst.Reset() - err = patchDelta(dst, src, delta) - if err != nil { - return err - } - - target.SetSize(int64(dst.Len())) - - b := byteSlicePool.Get().([]byte) - _, err = io.CopyBuffer(w, dst, b) - byteSlicePool.Put(b) - return err -} - -var ( - ErrInvalidDelta = errors.New("invalid delta") - ErrDeltaCmd = errors.New("wrong delta command") -) - -// PatchDelta returns the result of applying the modification deltas in delta to src. -// An error will be returned if delta is corrupted (ErrDeltaLen) or an action command -// is not copy from source or copy from delta (ErrDeltaCmd). -func PatchDelta(src, delta []byte) ([]byte, error) { - b := &bytes.Buffer{} - if err := patchDelta(b, src, delta); err != nil { - return nil, err - } - return b.Bytes(), nil -} - -func patchDelta(dst *bytes.Buffer, src, delta []byte) error { - if len(delta) < deltaSizeMin { - return ErrInvalidDelta - } - - srcSz, delta := decodeLEB128(delta) - if srcSz != uint(len(src)) { - return ErrInvalidDelta - } - - targetSz, delta := decodeLEB128(delta) - remainingTargetSz := targetSz - - var cmd byte - dst.Grow(int(targetSz)) - for { - if len(delta) == 0 { - return ErrInvalidDelta - } - - cmd = delta[0] - delta = delta[1:] - if isCopyFromSrc(cmd) { - var offset, sz uint - var err error - offset, delta, err = decodeOffset(cmd, delta) - if err != nil { - return err - } - - sz, delta, err = decodeSize(cmd, delta) - if err != nil { - return err - } - - if invalidSize(sz, targetSz) || - invalidOffsetSize(offset, sz, srcSz) { - break - } - dst.Write(src[offset : offset+sz]) - remainingTargetSz -= sz - } else if isCopyFromDelta(cmd) { - sz := uint(cmd) // cmd is the size itself - if invalidSize(sz, targetSz) { - return ErrInvalidDelta - } - - if uint(len(delta)) < sz { - return ErrInvalidDelta - } - - dst.Write(delta[0:sz]) - remainingTargetSz -= sz - delta = delta[sz:] - } else { - return ErrDeltaCmd - } - - if remainingTargetSz <= 0 { - break - } - } - - return nil -} - -// Decodes a number encoded as an unsigned LEB128 at the start of some -// binary data and returns the decoded number and the rest of the -// stream. -// -// This must be called twice on the delta data buffer, first to get the -// expected source buffer size, and again to get the target buffer size. -func decodeLEB128(input []byte) (uint, []byte) { - var num, sz uint - var b byte - for { - b = input[sz] - num |= (uint(b) & payload) << (sz * 7) // concats 7 bits chunks - sz++ - - if uint(b)&continuation == 0 || sz == uint(len(input)) { - break - } - } - - return num, input[sz:] -} - -const ( - payload = 0x7f // 0111 1111 - continuation = 0x80 // 1000 0000 -) - -func isCopyFromSrc(cmd byte) bool { - return (cmd & 0x80) != 0 -} - -func isCopyFromDelta(cmd byte) bool { - return (cmd&0x80) == 0 && cmd != 0 -} - -func decodeOffset(cmd byte, delta []byte) (uint, []byte, error) { - var offset uint - if (cmd & 0x01) != 0 { - if len(delta) == 0 { - return 0, nil, ErrInvalidDelta - } - offset = uint(delta[0]) - delta = delta[1:] - } - if (cmd & 0x02) != 0 { - if len(delta) == 0 { - return 0, nil, ErrInvalidDelta - } - offset |= uint(delta[0]) << 8 - delta = delta[1:] - } - if (cmd & 0x04) != 0 { - if len(delta) == 0 { - return 0, nil, ErrInvalidDelta - } - offset |= uint(delta[0]) << 16 - delta = delta[1:] - } - if (cmd & 0x08) != 0 { - if len(delta) == 0 { - return 0, nil, ErrInvalidDelta - } - offset |= uint(delta[0]) << 24 - delta = delta[1:] - } - - return offset, delta, nil -} - -func decodeSize(cmd byte, delta []byte) (uint, []byte, error) { - var sz uint - if (cmd & 0x10) != 0 { - if len(delta) == 0 { - return 0, nil, ErrInvalidDelta - } - sz = uint(delta[0]) - delta = delta[1:] - } - if (cmd & 0x20) != 0 { - if len(delta) == 0 { - return 0, nil, ErrInvalidDelta - } - sz |= uint(delta[0]) << 8 - delta = delta[1:] - } - if (cmd & 0x40) != 0 { - if len(delta) == 0 { - return 0, nil, ErrInvalidDelta - } - sz |= uint(delta[0]) << 16 - delta = delta[1:] - } - if sz == 0 { - sz = 0x10000 - } - - return sz, delta, nil -} - -func invalidSize(sz, targetSz uint) bool { - return sz > targetSz -} - -func invalidOffsetSize(offset, sz, srcSz uint) bool { - return sumOverflows(offset, sz) || - offset+sz > srcSz -} - -func sumOverflows(a, b uint) bool { - return a+b < a -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/scanner.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/scanner.go deleted file mode 100644 index 6e6a687886a..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/scanner.go +++ /dev/null @@ -1,466 +0,0 @@ -package packfile - -import ( - "bufio" - "bytes" - "compress/zlib" - "fmt" - "hash" - "hash/crc32" - "io" - stdioutil "io/ioutil" - "sync" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/utils/binary" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -var ( - // ErrEmptyPackfile is returned by ReadHeader when no data is found in the packfile - ErrEmptyPackfile = NewError("empty packfile") - // ErrBadSignature is returned by ReadHeader when the signature in the packfile is incorrect. - ErrBadSignature = NewError("malformed pack file signature") - // ErrUnsupportedVersion is returned by ReadHeader when the packfile version is - // different than VersionSupported. - ErrUnsupportedVersion = NewError("unsupported packfile version") - // ErrSeekNotSupported returned if seek is not support - ErrSeekNotSupported = NewError("not seek support") -) - -// ObjectHeader contains the information related to the object, this information -// is collected from the previous bytes to the content of the object. -type ObjectHeader struct { - Type plumbing.ObjectType - Offset int64 - Length int64 - Reference plumbing.Hash - OffsetReference int64 -} - -type Scanner struct { - r *scannerReader - crc hash.Hash32 - - // pendingObject is used to detect if an object has been read, or still - // is waiting to be read - pendingObject *ObjectHeader - version, objects uint32 - - // lsSeekable says if this scanner can do Seek or not, to have a Scanner - // seekable a r implementing io.Seeker is required - IsSeekable bool -} - -// NewScanner returns a new Scanner based on a reader, if the given reader -// implements io.ReadSeeker the Scanner will be also Seekable -func NewScanner(r io.Reader) *Scanner { - _, ok := r.(io.ReadSeeker) - - crc := crc32.NewIEEE() - return &Scanner{ - r: newScannerReader(r, crc), - crc: crc, - IsSeekable: ok, - } -} - -func (s *Scanner) Reset(r io.Reader) { - _, ok := r.(io.ReadSeeker) - - s.r.Reset(r) - s.crc.Reset() - s.IsSeekable = ok - s.pendingObject = nil - s.version = 0 - s.objects = 0 -} - -// Header reads the whole packfile header (signature, version and object count). -// It returns the version and the object count and performs checks on the -// validity of the signature and the version fields. -func (s *Scanner) Header() (version, objects uint32, err error) { - if s.version != 0 { - return s.version, s.objects, nil - } - - sig, err := s.readSignature() - if err != nil { - if err == io.EOF { - err = ErrEmptyPackfile - } - - return - } - - if !s.isValidSignature(sig) { - err = ErrBadSignature - return - } - - version, err = s.readVersion() - s.version = version - if err != nil { - return - } - - if !s.isSupportedVersion(version) { - err = ErrUnsupportedVersion.AddDetails("%d", version) - return - } - - objects, err = s.readCount() - s.objects = objects - return -} - -// readSignature reads an returns the signature field in the packfile. -func (s *Scanner) readSignature() ([]byte, error) { - var sig = make([]byte, 4) - if _, err := io.ReadFull(s.r, sig); err != nil { - return []byte{}, err - } - - return sig, nil -} - -// isValidSignature returns if sig is a valid packfile signature. -func (s *Scanner) isValidSignature(sig []byte) bool { - return bytes.Equal(sig, signature) -} - -// readVersion reads and returns the version field of a packfile. -func (s *Scanner) readVersion() (uint32, error) { - return binary.ReadUint32(s.r) -} - -// isSupportedVersion returns whether version v is supported by the parser. -// The current supported version is VersionSupported, defined above. -func (s *Scanner) isSupportedVersion(v uint32) bool { - return v == VersionSupported -} - -// readCount reads and returns the count of objects field of a packfile. -func (s *Scanner) readCount() (uint32, error) { - return binary.ReadUint32(s.r) -} - -// SeekObjectHeader seeks to specified offset and returns the ObjectHeader -// for the next object in the reader -func (s *Scanner) SeekObjectHeader(offset int64) (*ObjectHeader, error) { - // if seeking we assume that you are not interested in the header - if s.version == 0 { - s.version = VersionSupported - } - - if _, err := s.r.Seek(offset, io.SeekStart); err != nil { - return nil, err - } - - h, err := s.nextObjectHeader() - if err != nil { - return nil, err - } - - h.Offset = offset - return h, nil -} - -// NextObjectHeader returns the ObjectHeader for the next object in the reader -func (s *Scanner) NextObjectHeader() (*ObjectHeader, error) { - if err := s.doPending(); err != nil { - return nil, err - } - - offset, err := s.r.Seek(0, io.SeekCurrent) - if err != nil { - return nil, err - } - - h, err := s.nextObjectHeader() - if err != nil { - return nil, err - } - - h.Offset = offset - return h, nil -} - -// nextObjectHeader returns the ObjectHeader for the next object in the reader -// without the Offset field -func (s *Scanner) nextObjectHeader() (*ObjectHeader, error) { - s.r.Flush() - s.crc.Reset() - - h := &ObjectHeader{} - s.pendingObject = h - - var err error - h.Offset, err = s.r.Seek(0, io.SeekCurrent) - if err != nil { - return nil, err - } - - h.Type, h.Length, err = s.readObjectTypeAndLength() - if err != nil { - return nil, err - } - - switch h.Type { - case plumbing.OFSDeltaObject: - no, err := binary.ReadVariableWidthInt(s.r) - if err != nil { - return nil, err - } - - h.OffsetReference = h.Offset - no - case plumbing.REFDeltaObject: - var err error - h.Reference, err = binary.ReadHash(s.r) - if err != nil { - return nil, err - } - } - - return h, nil -} - -func (s *Scanner) doPending() error { - if s.version == 0 { - var err error - s.version, s.objects, err = s.Header() - if err != nil { - return err - } - } - - return s.discardObjectIfNeeded() -} - -func (s *Scanner) discardObjectIfNeeded() error { - if s.pendingObject == nil { - return nil - } - - h := s.pendingObject - n, _, err := s.NextObject(stdioutil.Discard) - if err != nil { - return err - } - - if n != h.Length { - return fmt.Errorf( - "error discarding object, discarded %d, expected %d", - n, h.Length, - ) - } - - return nil -} - -// ReadObjectTypeAndLength reads and returns the object type and the -// length field from an object entry in a packfile. -func (s *Scanner) readObjectTypeAndLength() (plumbing.ObjectType, int64, error) { - t, c, err := s.readType() - if err != nil { - return t, 0, err - } - - l, err := s.readLength(c) - - return t, l, err -} - -func (s *Scanner) readType() (plumbing.ObjectType, byte, error) { - var c byte - var err error - if c, err = s.r.ReadByte(); err != nil { - return plumbing.ObjectType(0), 0, err - } - - typ := parseType(c) - - return typ, c, nil -} - -func parseType(b byte) plumbing.ObjectType { - return plumbing.ObjectType((b & maskType) >> firstLengthBits) -} - -// the length is codified in the last 4 bits of the first byte and in -// the last 7 bits of subsequent bytes. Last byte has a 0 MSB. -func (s *Scanner) readLength(first byte) (int64, error) { - length := int64(first & maskFirstLength) - - c := first - shift := firstLengthBits - var err error - for c&maskContinue > 0 { - if c, err = s.r.ReadByte(); err != nil { - return 0, err - } - - length += int64(c&maskLength) << shift - shift += lengthBits - } - - return length, nil -} - -// NextObject writes the content of the next object into the reader, returns -// the number of bytes written, the CRC32 of the content and an error, if any -func (s *Scanner) NextObject(w io.Writer) (written int64, crc32 uint32, err error) { - s.pendingObject = nil - written, err = s.copyObject(w) - - s.r.Flush() - crc32 = s.crc.Sum32() - s.crc.Reset() - - return -} - -// ReadRegularObject reads and write a non-deltified object -// from it zlib stream in an object entry in the packfile. -func (s *Scanner) copyObject(w io.Writer) (n int64, err error) { - zr := zlibReaderPool.Get().(io.ReadCloser) - defer zlibReaderPool.Put(zr) - - if err = zr.(zlib.Resetter).Reset(s.r, nil); err != nil { - return 0, fmt.Errorf("zlib reset error: %s", err) - } - - defer ioutil.CheckClose(zr, &err) - buf := byteSlicePool.Get().([]byte) - n, err = io.CopyBuffer(w, zr, buf) - byteSlicePool.Put(buf) - return -} - -var byteSlicePool = sync.Pool{ - New: func() interface{} { - return make([]byte, 32*1024) - }, -} - -// SeekFromStart sets a new offset from start, returns the old position before -// the change. -func (s *Scanner) SeekFromStart(offset int64) (previous int64, err error) { - // if seeking we assume that you are not interested in the header - if s.version == 0 { - s.version = VersionSupported - } - - previous, err = s.r.Seek(0, io.SeekCurrent) - if err != nil { - return -1, err - } - - _, err = s.r.Seek(offset, io.SeekStart) - return previous, err -} - -// Checksum returns the checksum of the packfile -func (s *Scanner) Checksum() (plumbing.Hash, error) { - err := s.discardObjectIfNeeded() - if err != nil { - return plumbing.ZeroHash, err - } - - return binary.ReadHash(s.r) -} - -// Close reads the reader until io.EOF -func (s *Scanner) Close() error { - buf := byteSlicePool.Get().([]byte) - _, err := io.CopyBuffer(stdioutil.Discard, s.r, buf) - byteSlicePool.Put(buf) - return err -} - -// Flush is a no-op (deprecated) -func (s *Scanner) Flush() error { - return nil -} - -// scannerReader has the following characteristics: -// - Provides an io.SeekReader impl for bufio.Reader, when the underlying -// reader supports it. -// - Keeps track of the current read position, for when the underlying reader -// isn't an io.SeekReader, but we still want to know the current offset. -// - Writes to the hash writer what it reads, with the aid of a smaller buffer. -// The buffer helps avoid a performance penality for performing small writes -// to the crc32 hash writer. -type scannerReader struct { - reader io.Reader - crc io.Writer - rbuf *bufio.Reader - wbuf *bufio.Writer - offset int64 -} - -func newScannerReader(r io.Reader, h io.Writer) *scannerReader { - sr := &scannerReader{ - rbuf: bufio.NewReader(nil), - wbuf: bufio.NewWriterSize(nil, 64), - crc: h, - } - sr.Reset(r) - - return sr -} - -func (r *scannerReader) Reset(reader io.Reader) { - r.reader = reader - r.rbuf.Reset(r.reader) - r.wbuf.Reset(r.crc) - - r.offset = 0 - if seeker, ok := r.reader.(io.ReadSeeker); ok { - r.offset, _ = seeker.Seek(0, io.SeekCurrent) - } -} - -func (r *scannerReader) Read(p []byte) (n int, err error) { - n, err = r.rbuf.Read(p) - - r.offset += int64(n) - if _, err := r.wbuf.Write(p[:n]); err != nil { - return n, err - } - return -} - -func (r *scannerReader) ReadByte() (b byte, err error) { - b, err = r.rbuf.ReadByte() - if err == nil { - r.offset++ - return b, r.wbuf.WriteByte(b) - } - return -} - -func (r *scannerReader) Flush() error { - return r.wbuf.Flush() -} - -// Seek seeks to a location. If the underlying reader is not an io.ReadSeeker, -// then only whence=io.SeekCurrent is supported, any other operation fails. -func (r *scannerReader) Seek(offset int64, whence int) (int64, error) { - var err error - - if seeker, ok := r.reader.(io.ReadSeeker); !ok { - if whence != io.SeekCurrent || offset != 0 { - return -1, ErrSeekNotSupported - } - } else { - if whence == io.SeekCurrent && offset == 0 { - return r.offset, nil - } - - r.offset, err = seeker.Seek(offset, whence) - r.rbuf.Reset(r.reader) - } - - return r.offset, err -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/encoder.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/encoder.go deleted file mode 100644 index 6d409795b0e..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/encoder.go +++ /dev/null @@ -1,122 +0,0 @@ -// Package pktline implements reading payloads form pkt-lines and encoding -// pkt-lines from payloads. -package pktline - -import ( - "bytes" - "errors" - "fmt" - "io" -) - -// An Encoder writes pkt-lines to an output stream. -type Encoder struct { - w io.Writer -} - -const ( - // MaxPayloadSize is the maximum payload size of a pkt-line in bytes. - MaxPayloadSize = 65516 - - // For compatibility with canonical Git implementation, accept longer pkt-lines - OversizePayloadMax = 65520 -) - -var ( - // FlushPkt are the contents of a flush-pkt pkt-line. - FlushPkt = []byte{'0', '0', '0', '0'} - // Flush is the payload to use with the Encode method to encode a flush-pkt. - Flush = []byte{} - // FlushString is the payload to use with the EncodeString method to encode a flush-pkt. - FlushString = "" - // ErrPayloadTooLong is returned by the Encode methods when any of the - // provided payloads is bigger than MaxPayloadSize. - ErrPayloadTooLong = errors.New("payload is too long") -) - -// NewEncoder returns a new encoder that writes to w. -func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ - w: w, - } -} - -// Flush encodes a flush-pkt to the output stream. -func (e *Encoder) Flush() error { - _, err := e.w.Write(FlushPkt) - return err -} - -// Encode encodes a pkt-line with the payload specified and write it to -// the output stream. If several payloads are specified, each of them -// will get streamed in their own pkt-lines. -func (e *Encoder) Encode(payloads ...[]byte) error { - for _, p := range payloads { - if err := e.encodeLine(p); err != nil { - return err - } - } - - return nil -} - -func (e *Encoder) encodeLine(p []byte) error { - if len(p) > MaxPayloadSize { - return ErrPayloadTooLong - } - - if bytes.Equal(p, Flush) { - return e.Flush() - } - - n := len(p) + 4 - if _, err := e.w.Write(asciiHex16(n)); err != nil { - return err - } - _, err := e.w.Write(p) - return err -} - -// Returns the hexadecimal ascii representation of the 16 less -// significant bits of n. The length of the returned slice will always -// be 4. Example: if n is 1234 (0x4d2), the return value will be -// []byte{'0', '4', 'd', '2'}. -func asciiHex16(n int) []byte { - var ret [4]byte - ret[0] = byteToASCIIHex(byte(n & 0xf000 >> 12)) - ret[1] = byteToASCIIHex(byte(n & 0x0f00 >> 8)) - ret[2] = byteToASCIIHex(byte(n & 0x00f0 >> 4)) - ret[3] = byteToASCIIHex(byte(n & 0x000f)) - - return ret[:] -} - -// turns a byte into its hexadecimal ascii representation. Example: -// from 11 (0xb) to 'b'. -func byteToASCIIHex(n byte) byte { - if n < 10 { - return '0' + n - } - - return 'a' - 10 + n -} - -// EncodeString works similarly as Encode but payloads are specified as strings. -func (e *Encoder) EncodeString(payloads ...string) error { - for _, p := range payloads { - if err := e.Encode([]byte(p)); err != nil { - return err - } - } - - return nil -} - -// Encodef encodes a single pkt-line with the payload formatted as -// the format specifier. The rest of the arguments will be used in -// the format string. -func (e *Encoder) Encodef(format string, a ...interface{}) error { - return e.EncodeString( - fmt.Sprintf(format, a...), - ) -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/scanner.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/scanner.go deleted file mode 100644 index 99aab46e88d..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/scanner.go +++ /dev/null @@ -1,134 +0,0 @@ -package pktline - -import ( - "errors" - "io" -) - -const ( - lenSize = 4 -) - -// ErrInvalidPktLen is returned by Err() when an invalid pkt-len is found. -var ErrInvalidPktLen = errors.New("invalid pkt-len found") - -// Scanner provides a convenient interface for reading the payloads of a -// series of pkt-lines. It takes an io.Reader providing the source, -// which then can be tokenized through repeated calls to the Scan -// method. -// -// After each Scan call, the Bytes method will return the payload of the -// corresponding pkt-line on a shared buffer, which will be 65516 bytes -// or smaller. Flush pkt-lines are represented by empty byte slices. -// -// Scanning stops at EOF or the first I/O error. -type Scanner struct { - r io.Reader // The reader provided by the client - err error // Sticky error - payload []byte // Last pkt-payload - len [lenSize]byte // Last pkt-len -} - -// NewScanner returns a new Scanner to read from r. -func NewScanner(r io.Reader) *Scanner { - return &Scanner{ - r: r, - } -} - -// Err returns the first error encountered by the Scanner. -func (s *Scanner) Err() error { - return s.err -} - -// Scan advances the Scanner to the next pkt-line, whose payload will -// then be available through the Bytes method. Scanning stops at EOF -// or the first I/O error. After Scan returns false, the Err method -// will return any error that occurred during scanning, except that if -// it was io.EOF, Err will return nil. -func (s *Scanner) Scan() bool { - var l int - l, s.err = s.readPayloadLen() - if s.err == io.EOF { - s.err = nil - return false - } - if s.err != nil { - return false - } - - if cap(s.payload) < l { - s.payload = make([]byte, 0, l) - } - - if _, s.err = io.ReadFull(s.r, s.payload[:l]); s.err != nil { - return false - } - s.payload = s.payload[:l] - - return true -} - -// Bytes returns the most recent payload generated by a call to Scan. -// The underlying array may point to data that will be overwritten by a -// subsequent call to Scan. It does no allocation. -func (s *Scanner) Bytes() []byte { - return s.payload -} - -// Method readPayloadLen returns the payload length by reading the -// pkt-len and subtracting the pkt-len size. -func (s *Scanner) readPayloadLen() (int, error) { - if _, err := io.ReadFull(s.r, s.len[:]); err != nil { - if err == io.ErrUnexpectedEOF { - return 0, ErrInvalidPktLen - } - - return 0, err - } - - n, err := hexDecode(s.len) - if err != nil { - return 0, err - } - - switch { - case n == 0: - return 0, nil - case n <= lenSize: - return 0, ErrInvalidPktLen - case n > OversizePayloadMax+lenSize: - return 0, ErrInvalidPktLen - default: - return n - lenSize, nil - } -} - -// Turns the hexadecimal representation of a number in a byte slice into -// a number. This function substitute strconv.ParseUint(string(buf), 16, -// 16) and/or hex.Decode, to avoid generating new strings, thus helping the -// GC. -func hexDecode(buf [lenSize]byte) (int, error) { - var ret int - for i := 0; i < lenSize; i++ { - n, err := asciiHexToByte(buf[i]) - if err != nil { - return 0, ErrInvalidPktLen - } - ret = 16*ret + int(n) - } - return ret, nil -} - -// turns the hexadecimal ascii representation of a byte into its -// numerical value. Example: from 'b' to 11 (0xb). -func asciiHexToByte(b byte) (byte, error) { - switch { - case b >= '0' && b <= '9': - return b - '0', nil - case b >= 'a' && b <= 'f': - return b - 'a' + 10, nil - default: - return 0, ErrInvalidPktLen - } -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/hash.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/hash.go deleted file mode 100644 index afc602a9ec8..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/hash.go +++ /dev/null @@ -1,83 +0,0 @@ -package plumbing - -import ( - "bytes" - "crypto/sha1" - "encoding/hex" - "hash" - "sort" - "strconv" -) - -// Hash SHA1 hashed content -type Hash [20]byte - -// ZeroHash is Hash with value zero -var ZeroHash Hash - -// ComputeHash compute the hash for a given ObjectType and content -func ComputeHash(t ObjectType, content []byte) Hash { - h := NewHasher(t, int64(len(content))) - h.Write(content) - return h.Sum() -} - -// NewHash return a new Hash from a hexadecimal hash representation -func NewHash(s string) Hash { - b, _ := hex.DecodeString(s) - - var h Hash - copy(h[:], b) - - return h -} - -func (h Hash) IsZero() bool { - var empty Hash - return h == empty -} - -func (h Hash) String() string { - return hex.EncodeToString(h[:]) -} - -type Hasher struct { - hash.Hash -} - -func NewHasher(t ObjectType, size int64) Hasher { - h := Hasher{sha1.New()} - h.Write(t.Bytes()) - h.Write([]byte(" ")) - h.Write([]byte(strconv.FormatInt(size, 10))) - h.Write([]byte{0}) - return h -} - -func (h Hasher) Sum() (hash Hash) { - copy(hash[:], h.Hash.Sum(nil)) - return -} - -// HashesSort sorts a slice of Hashes in increasing order. -func HashesSort(a []Hash) { - sort.Sort(HashSlice(a)) -} - -// HashSlice attaches the methods of sort.Interface to []Hash, sorting in -// increasing order. -type HashSlice []Hash - -func (p HashSlice) Len() int { return len(p) } -func (p HashSlice) Less(i, j int) bool { return bytes.Compare(p[i][:], p[j][:]) < 0 } -func (p HashSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -// IsHash returns true if the given string is a valid hash. -func IsHash(s string) bool { - if len(s) != 40 { - return false - } - - _, err := hex.DecodeString(s) - return err == nil -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/memory.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/memory.go deleted file mode 100644 index 21337cc0dd7..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/memory.go +++ /dev/null @@ -1,72 +0,0 @@ -package plumbing - -import ( - "bytes" - "io" -) - -// MemoryObject on memory Object implementation -type MemoryObject struct { - t ObjectType - h Hash - cont []byte - sz int64 -} - -// Hash returns the object Hash, the hash is calculated on-the-fly the first -// time it's called, in all subsequent calls the same Hash is returned even -// if the type or the content have changed. The Hash is only generated if the -// size of the content is exactly the object size. -func (o *MemoryObject) Hash() Hash { - if o.h == ZeroHash && int64(len(o.cont)) == o.sz { - o.h = ComputeHash(o.t, o.cont) - } - - return o.h -} - -// Type return the ObjectType -func (o *MemoryObject) Type() ObjectType { return o.t } - -// SetType sets the ObjectType -func (o *MemoryObject) SetType(t ObjectType) { o.t = t } - -// Size return the size of the object -func (o *MemoryObject) Size() int64 { return o.sz } - -// SetSize set the object size, a content of the given size should be written -// afterwards -func (o *MemoryObject) SetSize(s int64) { o.sz = s } - -// Reader returns an io.ReadCloser used to read the object's content. -// -// For a MemoryObject, this reader is seekable. -func (o *MemoryObject) Reader() (io.ReadCloser, error) { - return nopCloser{bytes.NewReader(o.cont)}, nil -} - -// Writer returns a ObjectWriter used to write the object's content. -func (o *MemoryObject) Writer() (io.WriteCloser, error) { - return o, nil -} - -func (o *MemoryObject) Write(p []byte) (n int, err error) { - o.cont = append(o.cont, p...) - o.sz = int64(len(o.cont)) - - return len(p), nil -} - -// Close releases any resources consumed by the object when it is acting as a -// ObjectWriter. -func (o *MemoryObject) Close() error { return nil } - -// nopCloser exposes the extra methods of bytes.Reader while nopping Close(). -// -// This allows clients to attempt seeking in a cached Blob's Reader. -type nopCloser struct { - *bytes.Reader -} - -// Close does nothing. -func (nc nopCloser) Close() error { return nil } diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object.go deleted file mode 100644 index 2655dee43e8..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object.go +++ /dev/null @@ -1,111 +0,0 @@ -// package plumbing implement the core interfaces and structs used by go-git -package plumbing - -import ( - "errors" - "io" -) - -var ( - ErrObjectNotFound = errors.New("object not found") - // ErrInvalidType is returned when an invalid object type is provided. - ErrInvalidType = errors.New("invalid object type") -) - -// Object is a generic representation of any git object -type EncodedObject interface { - Hash() Hash - Type() ObjectType - SetType(ObjectType) - Size() int64 - SetSize(int64) - Reader() (io.ReadCloser, error) - Writer() (io.WriteCloser, error) -} - -// DeltaObject is an EncodedObject representing a delta. -type DeltaObject interface { - EncodedObject - // BaseHash returns the hash of the object used as base for this delta. - BaseHash() Hash - // ActualHash returns the hash of the object after applying the delta. - ActualHash() Hash - // Size returns the size of the object after applying the delta. - ActualSize() int64 -} - -// ObjectType internal object type -// Integer values from 0 to 7 map to those exposed by git. -// AnyObject is used to represent any from 0 to 7. -type ObjectType int8 - -const ( - InvalidObject ObjectType = 0 - CommitObject ObjectType = 1 - TreeObject ObjectType = 2 - BlobObject ObjectType = 3 - TagObject ObjectType = 4 - // 5 reserved for future expansion - OFSDeltaObject ObjectType = 6 - REFDeltaObject ObjectType = 7 - - AnyObject ObjectType = -127 -) - -func (t ObjectType) String() string { - switch t { - case CommitObject: - return "commit" - case TreeObject: - return "tree" - case BlobObject: - return "blob" - case TagObject: - return "tag" - case OFSDeltaObject: - return "ofs-delta" - case REFDeltaObject: - return "ref-delta" - case AnyObject: - return "any" - default: - return "unknown" - } -} - -func (t ObjectType) Bytes() []byte { - return []byte(t.String()) -} - -// Valid returns true if t is a valid ObjectType. -func (t ObjectType) Valid() bool { - return t >= CommitObject && t <= REFDeltaObject -} - -// IsDelta returns true for any ObjectTyoe that represents a delta (i.e. -// REFDeltaObject or OFSDeltaObject). -func (t ObjectType) IsDelta() bool { - return t == REFDeltaObject || t == OFSDeltaObject -} - -// ParseObjectType parses a string representation of ObjectType. It returns an -// error on parse failure. -func ParseObjectType(value string) (typ ObjectType, err error) { - switch value { - case "commit": - typ = CommitObject - case "tree": - typ = TreeObject - case "blob": - typ = BlobObject - case "tag": - typ = TagObject - case "ofs-delta": - typ = OFSDeltaObject - case "ref-delta": - typ = REFDeltaObject - default: - err = ErrInvalidType - } - return -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/blob.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/blob.go deleted file mode 100644 index 8fb7576fa3f..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/blob.go +++ /dev/null @@ -1,144 +0,0 @@ -package object - -import ( - "io" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/storer" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -// Blob is used to store arbitrary data - it is generally a file. -type Blob struct { - // Hash of the blob. - Hash plumbing.Hash - // Size of the (uncompressed) blob. - Size int64 - - obj plumbing.EncodedObject -} - -// GetBlob gets a blob from an object storer and decodes it. -func GetBlob(s storer.EncodedObjectStorer, h plumbing.Hash) (*Blob, error) { - o, err := s.EncodedObject(plumbing.BlobObject, h) - if err != nil { - return nil, err - } - - return DecodeBlob(o) -} - -// DecodeObject decodes an encoded object into a *Blob. -func DecodeBlob(o plumbing.EncodedObject) (*Blob, error) { - b := &Blob{} - if err := b.Decode(o); err != nil { - return nil, err - } - - return b, nil -} - -// ID returns the object ID of the blob. The returned value will always match -// the current value of Blob.Hash. -// -// ID is present to fulfill the Object interface. -func (b *Blob) ID() plumbing.Hash { - return b.Hash -} - -// Type returns the type of object. It always returns plumbing.BlobObject. -// -// Type is present to fulfill the Object interface. -func (b *Blob) Type() plumbing.ObjectType { - return plumbing.BlobObject -} - -// Decode transforms a plumbing.EncodedObject into a Blob struct. -func (b *Blob) Decode(o plumbing.EncodedObject) error { - if o.Type() != plumbing.BlobObject { - return ErrUnsupportedObject - } - - b.Hash = o.Hash() - b.Size = o.Size() - b.obj = o - - return nil -} - -// Encode transforms a Blob into a plumbing.EncodedObject. -func (b *Blob) Encode(o plumbing.EncodedObject) (err error) { - o.SetType(plumbing.BlobObject) - - w, err := o.Writer() - if err != nil { - return err - } - - defer ioutil.CheckClose(w, &err) - - r, err := b.Reader() - if err != nil { - return err - } - - defer ioutil.CheckClose(r, &err) - - _, err = io.Copy(w, r) - return err -} - -// Reader returns a reader allow the access to the content of the blob -func (b *Blob) Reader() (io.ReadCloser, error) { - return b.obj.Reader() -} - -// BlobIter provides an iterator for a set of blobs. -type BlobIter struct { - storer.EncodedObjectIter - s storer.EncodedObjectStorer -} - -// NewBlobIter takes a storer.EncodedObjectStorer and a -// storer.EncodedObjectIter and returns a *BlobIter that iterates over all -// blobs contained in the storer.EncodedObjectIter. -// -// Any non-blob object returned by the storer.EncodedObjectIter is skipped. -func NewBlobIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) *BlobIter { - return &BlobIter{iter, s} -} - -// Next moves the iterator to the next blob and returns a pointer to it. If -// there are no more blobs, it returns io.EOF. -func (iter *BlobIter) Next() (*Blob, error) { - for { - obj, err := iter.EncodedObjectIter.Next() - if err != nil { - return nil, err - } - - if obj.Type() != plumbing.BlobObject { - continue - } - - return DecodeBlob(obj) - } -} - -// ForEach call the cb function for each blob contained on this iter until -// an error happens or the end of the iter is reached. If ErrStop is sent -// the iteration is stop but no error is returned. The iterator is closed. -func (iter *BlobIter) ForEach(cb func(*Blob) error) error { - return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error { - if obj.Type() != plumbing.BlobObject { - return nil - } - - b, err := DecodeBlob(obj) - if err != nil { - return err - } - - return cb(b) - }) -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/change.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/change.go deleted file mode 100644 index 8b119bc9c94..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/change.go +++ /dev/null @@ -1,159 +0,0 @@ -package object - -import ( - "bytes" - "context" - "fmt" - "strings" - - "github.com/go-git/go-git/v5/utils/merkletrie" -) - -// Change values represent a detected change between two git trees. For -// modifications, From is the original status of the node and To is its -// final status. For insertions, From is the zero value and for -// deletions To is the zero value. -type Change struct { - From ChangeEntry - To ChangeEntry -} - -var empty ChangeEntry - -// Action returns the kind of action represented by the change, an -// insertion, a deletion or a modification. -func (c *Change) Action() (merkletrie.Action, error) { - if c.From == empty && c.To == empty { - return merkletrie.Action(0), - fmt.Errorf("malformed change: empty from and to") - } - - if c.From == empty { - return merkletrie.Insert, nil - } - - if c.To == empty { - return merkletrie.Delete, nil - } - - return merkletrie.Modify, nil -} - -// Files return the files before and after a change. -// For insertions from will be nil. For deletions to will be nil. -func (c *Change) Files() (from, to *File, err error) { - action, err := c.Action() - if err != nil { - return - } - - if action == merkletrie.Insert || action == merkletrie.Modify { - to, err = c.To.Tree.TreeEntryFile(&c.To.TreeEntry) - if !c.To.TreeEntry.Mode.IsFile() { - return nil, nil, nil - } - - if err != nil { - return - } - } - - if action == merkletrie.Delete || action == merkletrie.Modify { - from, err = c.From.Tree.TreeEntryFile(&c.From.TreeEntry) - if !c.From.TreeEntry.Mode.IsFile() { - return nil, nil, nil - } - - if err != nil { - return - } - } - - return -} - -func (c *Change) String() string { - action, err := c.Action() - if err != nil { - return "malformed change" - } - - return fmt.Sprintf("", action, c.name()) -} - -// Patch returns a Patch with all the file changes in chunks. This -// representation can be used to create several diff outputs. -func (c *Change) Patch() (*Patch, error) { - return c.PatchContext(context.Background()) -} - -// Patch returns a Patch with all the file changes in chunks. This -// representation can be used to create several diff outputs. -// If context expires, an non-nil error will be returned -// Provided context must be non-nil -func (c *Change) PatchContext(ctx context.Context) (*Patch, error) { - return getPatchContext(ctx, "", c) -} - -func (c *Change) name() string { - if c.From != empty { - return c.From.Name - } - - return c.To.Name -} - -// ChangeEntry values represent a node that has suffered a change. -type ChangeEntry struct { - // Full path of the node using "/" as separator. - Name string - // Parent tree of the node that has changed. - Tree *Tree - // The entry of the node. - TreeEntry TreeEntry -} - -// Changes represents a collection of changes between two git trees. -// Implements sort.Interface lexicographically over the path of the -// changed files. -type Changes []*Change - -func (c Changes) Len() int { - return len(c) -} - -func (c Changes) Swap(i, j int) { - c[i], c[j] = c[j], c[i] -} - -func (c Changes) Less(i, j int) bool { - return strings.Compare(c[i].name(), c[j].name()) < 0 -} - -func (c Changes) String() string { - var buffer bytes.Buffer - buffer.WriteString("[") - comma := "" - for _, v := range c { - buffer.WriteString(comma) - buffer.WriteString(v.String()) - comma = ", " - } - buffer.WriteString("]") - - return buffer.String() -} - -// Patch returns a Patch with all the changes in chunks. This -// representation can be used to create several diff outputs. -func (c Changes) Patch() (*Patch, error) { - return c.PatchContext(context.Background()) -} - -// Patch returns a Patch with all the changes in chunks. This -// representation can be used to create several diff outputs. -// If context expires, an non-nil error will be returned -// Provided context must be non-nil -func (c Changes) PatchContext(ctx context.Context) (*Patch, error) { - return getPatchContext(ctx, "", c...) -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/change_adaptor.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/change_adaptor.go deleted file mode 100644 index f701188288f..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/change_adaptor.go +++ /dev/null @@ -1,61 +0,0 @@ -package object - -import ( - "errors" - "fmt" - - "github.com/go-git/go-git/v5/utils/merkletrie" - "github.com/go-git/go-git/v5/utils/merkletrie/noder" -) - -// The following functions transform changes types form the merkletrie -// package to changes types from this package. - -func newChange(c merkletrie.Change) (*Change, error) { - ret := &Change{} - - var err error - if ret.From, err = newChangeEntry(c.From); err != nil { - return nil, fmt.Errorf("From field: %s", err) - } - - if ret.To, err = newChangeEntry(c.To); err != nil { - return nil, fmt.Errorf("To field: %s", err) - } - - return ret, nil -} - -func newChangeEntry(p noder.Path) (ChangeEntry, error) { - if p == nil { - return empty, nil - } - - asTreeNoder, ok := p.Last().(*treeNoder) - if !ok { - return ChangeEntry{}, errors.New("cannot transform non-TreeNoders") - } - - return ChangeEntry{ - Name: p.String(), - Tree: asTreeNoder.parent, - TreeEntry: TreeEntry{ - Name: asTreeNoder.name, - Mode: asTreeNoder.mode, - Hash: asTreeNoder.hash, - }, - }, nil -} - -func newChanges(src merkletrie.Changes) (Changes, error) { - ret := make(Changes, len(src)) - var err error - for i, e := range src { - ret[i], err = newChange(e) - if err != nil { - return nil, fmt.Errorf("change #%d: %s", i, err) - } - } - - return ret, nil -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/commit.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/commit.go deleted file mode 100644 index 7a1b8e5ae0c..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/commit.go +++ /dev/null @@ -1,442 +0,0 @@ -package object - -import ( - "bufio" - "bytes" - "context" - "errors" - "fmt" - "io" - "strings" - - "github.com/ProtonMail/go-crypto/openpgp" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/storer" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -const ( - beginpgp string = "-----BEGIN PGP SIGNATURE-----" - endpgp string = "-----END PGP SIGNATURE-----" - headerpgp string = "gpgsig" -) - -// Hash represents the hash of an object -type Hash plumbing.Hash - -// Commit points to a single tree, marking it as what the project looked like -// at a certain point in time. It contains meta-information about that point -// in time, such as a timestamp, the author of the changes since the last -// commit, a pointer to the previous commit(s), etc. -// http://shafiulazam.com/gitbook/1_the_git_object_model.html -type Commit struct { - // Hash of the commit object. - Hash plumbing.Hash - // Author is the original author of the commit. - Author Signature - // Committer is the one performing the commit, might be different from - // Author. - Committer Signature - // PGPSignature is the PGP signature of the commit. - PGPSignature string - // Message is the commit message, contains arbitrary text. - Message string - // TreeHash is the hash of the root tree of the commit. - TreeHash plumbing.Hash - // ParentHashes are the hashes of the parent commits of the commit. - ParentHashes []plumbing.Hash - - s storer.EncodedObjectStorer -} - -// GetCommit gets a commit from an object storer and decodes it. -func GetCommit(s storer.EncodedObjectStorer, h plumbing.Hash) (*Commit, error) { - o, err := s.EncodedObject(plumbing.CommitObject, h) - if err != nil { - return nil, err - } - - return DecodeCommit(s, o) -} - -// DecodeCommit decodes an encoded object into a *Commit and associates it to -// the given object storer. -func DecodeCommit(s storer.EncodedObjectStorer, o plumbing.EncodedObject) (*Commit, error) { - c := &Commit{s: s} - if err := c.Decode(o); err != nil { - return nil, err - } - - return c, nil -} - -// Tree returns the Tree from the commit. -func (c *Commit) Tree() (*Tree, error) { - return GetTree(c.s, c.TreeHash) -} - -// PatchContext returns the Patch between the actual commit and the provided one. -// Error will be return if context expires. Provided context must be non-nil. -// -// NOTE: Since version 5.1.0 the renames are correctly handled, the settings -// used are the recommended options DefaultDiffTreeOptions. -func (c *Commit) PatchContext(ctx context.Context, to *Commit) (*Patch, error) { - fromTree, err := c.Tree() - if err != nil { - return nil, err - } - - var toTree *Tree - if to != nil { - toTree, err = to.Tree() - if err != nil { - return nil, err - } - } - - return fromTree.PatchContext(ctx, toTree) -} - -// Patch returns the Patch between the actual commit and the provided one. -// -// NOTE: Since version 5.1.0 the renames are correctly handled, the settings -// used are the recommended options DefaultDiffTreeOptions. -func (c *Commit) Patch(to *Commit) (*Patch, error) { - return c.PatchContext(context.Background(), to) -} - -// Parents return a CommitIter to the parent Commits. -func (c *Commit) Parents() CommitIter { - return NewCommitIter(c.s, - storer.NewEncodedObjectLookupIter(c.s, plumbing.CommitObject, c.ParentHashes), - ) -} - -// NumParents returns the number of parents in a commit. -func (c *Commit) NumParents() int { - return len(c.ParentHashes) -} - -var ErrParentNotFound = errors.New("commit parent not found") - -// Parent returns the ith parent of a commit. -func (c *Commit) Parent(i int) (*Commit, error) { - if len(c.ParentHashes) == 0 || i > len(c.ParentHashes)-1 { - return nil, ErrParentNotFound - } - - return GetCommit(c.s, c.ParentHashes[i]) -} - -// File returns the file with the specified "path" in the commit and a -// nil error if the file exists. If the file does not exist, it returns -// a nil file and the ErrFileNotFound error. -func (c *Commit) File(path string) (*File, error) { - tree, err := c.Tree() - if err != nil { - return nil, err - } - - return tree.File(path) -} - -// Files returns a FileIter allowing to iterate over the Tree -func (c *Commit) Files() (*FileIter, error) { - tree, err := c.Tree() - if err != nil { - return nil, err - } - - return tree.Files(), nil -} - -// ID returns the object ID of the commit. The returned value will always match -// the current value of Commit.Hash. -// -// ID is present to fulfill the Object interface. -func (c *Commit) ID() plumbing.Hash { - return c.Hash -} - -// Type returns the type of object. It always returns plumbing.CommitObject. -// -// Type is present to fulfill the Object interface. -func (c *Commit) Type() plumbing.ObjectType { - return plumbing.CommitObject -} - -// Decode transforms a plumbing.EncodedObject into a Commit struct. -func (c *Commit) Decode(o plumbing.EncodedObject) (err error) { - if o.Type() != plumbing.CommitObject { - return ErrUnsupportedObject - } - - c.Hash = o.Hash() - - reader, err := o.Reader() - if err != nil { - return err - } - defer ioutil.CheckClose(reader, &err) - - r := bufPool.Get().(*bufio.Reader) - defer bufPool.Put(r) - r.Reset(reader) - - var message bool - var pgpsig bool - var msgbuf bytes.Buffer - for { - line, err := r.ReadBytes('\n') - if err != nil && err != io.EOF { - return err - } - - if pgpsig { - if len(line) > 0 && line[0] == ' ' { - line = bytes.TrimLeft(line, " ") - c.PGPSignature += string(line) - continue - } else { - pgpsig = false - } - } - - if !message { - line = bytes.TrimSpace(line) - if len(line) == 0 { - message = true - continue - } - - split := bytes.SplitN(line, []byte{' '}, 2) - - var data []byte - if len(split) == 2 { - data = split[1] - } - - switch string(split[0]) { - case "tree": - c.TreeHash = plumbing.NewHash(string(data)) - case "parent": - c.ParentHashes = append(c.ParentHashes, plumbing.NewHash(string(data))) - case "author": - c.Author.Decode(data) - case "committer": - c.Committer.Decode(data) - case headerpgp: - c.PGPSignature += string(data) + "\n" - pgpsig = true - } - } else { - msgbuf.Write(line) - } - - if err == io.EOF { - break - } - } - c.Message = msgbuf.String() - return nil -} - -// Encode transforms a Commit into a plumbing.EncodedObject. -func (c *Commit) Encode(o plumbing.EncodedObject) error { - return c.encode(o, true) -} - -// EncodeWithoutSignature export a Commit into a plumbing.EncodedObject without the signature (correspond to the payload of the PGP signature). -func (c *Commit) EncodeWithoutSignature(o plumbing.EncodedObject) error { - return c.encode(o, false) -} - -func (c *Commit) encode(o plumbing.EncodedObject, includeSig bool) (err error) { - o.SetType(plumbing.CommitObject) - w, err := o.Writer() - if err != nil { - return err - } - - defer ioutil.CheckClose(w, &err) - - if _, err = fmt.Fprintf(w, "tree %s\n", c.TreeHash.String()); err != nil { - return err - } - - for _, parent := range c.ParentHashes { - if _, err = fmt.Fprintf(w, "parent %s\n", parent.String()); err != nil { - return err - } - } - - if _, err = fmt.Fprint(w, "author "); err != nil { - return err - } - - if err = c.Author.Encode(w); err != nil { - return err - } - - if _, err = fmt.Fprint(w, "\ncommitter "); err != nil { - return err - } - - if err = c.Committer.Encode(w); err != nil { - return err - } - - if c.PGPSignature != "" && includeSig { - if _, err = fmt.Fprint(w, "\n"+headerpgp+" "); err != nil { - return err - } - - // Split all the signature lines and re-write with a left padding and - // newline. Use join for this so it's clear that a newline should not be - // added after this section, as it will be added when the message is - // printed. - signature := strings.TrimSuffix(c.PGPSignature, "\n") - lines := strings.Split(signature, "\n") - if _, err = fmt.Fprint(w, strings.Join(lines, "\n ")); err != nil { - return err - } - } - - if _, err = fmt.Fprintf(w, "\n\n%s", c.Message); err != nil { - return err - } - - return err -} - -// Stats returns the stats of a commit. -func (c *Commit) Stats() (FileStats, error) { - return c.StatsContext(context.Background()) -} - -// StatsContext returns the stats of a commit. Error will be return if context -// expires. Provided context must be non-nil. -func (c *Commit) StatsContext(ctx context.Context) (FileStats, error) { - fromTree, err := c.Tree() - if err != nil { - return nil, err - } - - toTree := &Tree{} - if c.NumParents() != 0 { - firstParent, err := c.Parents().Next() - if err != nil { - return nil, err - } - - toTree, err = firstParent.Tree() - if err != nil { - return nil, err - } - } - - patch, err := toTree.PatchContext(ctx, fromTree) - if err != nil { - return nil, err - } - - return getFileStatsFromFilePatches(patch.FilePatches()), nil -} - -func (c *Commit) String() string { - return fmt.Sprintf( - "%s %s\nAuthor: %s\nDate: %s\n\n%s\n", - plumbing.CommitObject, c.Hash, c.Author.String(), - c.Author.When.Format(DateFormat), indent(c.Message), - ) -} - -// Verify performs PGP verification of the commit with a provided armored -// keyring and returns openpgp.Entity associated with verifying key on success. -func (c *Commit) Verify(armoredKeyRing string) (*openpgp.Entity, error) { - keyRingReader := strings.NewReader(armoredKeyRing) - keyring, err := openpgp.ReadArmoredKeyRing(keyRingReader) - if err != nil { - return nil, err - } - - // Extract signature. - signature := strings.NewReader(c.PGPSignature) - - encoded := &plumbing.MemoryObject{} - // Encode commit components, excluding signature and get a reader object. - if err := c.EncodeWithoutSignature(encoded); err != nil { - return nil, err - } - er, err := encoded.Reader() - if err != nil { - return nil, err - } - - return openpgp.CheckArmoredDetachedSignature(keyring, er, signature, nil) -} - -func indent(t string) string { - var output []string - for _, line := range strings.Split(t, "\n") { - if len(line) != 0 { - line = " " + line - } - - output = append(output, line) - } - - return strings.Join(output, "\n") -} - -// CommitIter is a generic closable interface for iterating over commits. -type CommitIter interface { - Next() (*Commit, error) - ForEach(func(*Commit) error) error - Close() -} - -// storerCommitIter provides an iterator from commits in an EncodedObjectStorer. -type storerCommitIter struct { - storer.EncodedObjectIter - s storer.EncodedObjectStorer -} - -// NewCommitIter takes a storer.EncodedObjectStorer and a -// storer.EncodedObjectIter and returns a CommitIter that iterates over all -// commits contained in the storer.EncodedObjectIter. -// -// Any non-commit object returned by the storer.EncodedObjectIter is skipped. -func NewCommitIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) CommitIter { - return &storerCommitIter{iter, s} -} - -// Next moves the iterator to the next commit and returns a pointer to it. If -// there are no more commits, it returns io.EOF. -func (iter *storerCommitIter) Next() (*Commit, error) { - obj, err := iter.EncodedObjectIter.Next() - if err != nil { - return nil, err - } - - return DecodeCommit(iter.s, obj) -} - -// ForEach call the cb function for each commit contained on this iter until -// an error appends or the end of the iter is reached. If ErrStop is sent -// the iteration is stopped but no error is returned. The iterator is closed. -func (iter *storerCommitIter) ForEach(cb func(*Commit) error) error { - return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error { - c, err := DecodeCommit(iter.s, obj) - if err != nil { - return err - } - - return cb(c) - }) -} - -func (iter *storerCommitIter) Close() { - iter.EncodedObjectIter.Close() -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker.go deleted file mode 100644 index a96b6a4cf0f..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker.go +++ /dev/null @@ -1,327 +0,0 @@ -package object - -import ( - "container/list" - "io" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/storer" - "github.com/go-git/go-git/v5/storage" -) - -type commitPreIterator struct { - seenExternal map[plumbing.Hash]bool - seen map[plumbing.Hash]bool - stack []CommitIter - start *Commit -} - -// NewCommitPreorderIter returns a CommitIter that walks the commit history, -// starting at the given commit and visiting its parents in pre-order. -// The given callback will be called for each visited commit. Each commit will -// be visited only once. If the callback returns an error, walking will stop -// and will return the error. Other errors might be returned if the history -// cannot be traversed (e.g. missing objects). Ignore allows to skip some -// commits from being iterated. -func NewCommitPreorderIter( - c *Commit, - seenExternal map[plumbing.Hash]bool, - ignore []plumbing.Hash, -) CommitIter { - seen := make(map[plumbing.Hash]bool) - for _, h := range ignore { - seen[h] = true - } - - return &commitPreIterator{ - seenExternal: seenExternal, - seen: seen, - stack: make([]CommitIter, 0), - start: c, - } -} - -func (w *commitPreIterator) Next() (*Commit, error) { - var c *Commit - for { - if w.start != nil { - c = w.start - w.start = nil - } else { - current := len(w.stack) - 1 - if current < 0 { - return nil, io.EOF - } - - var err error - c, err = w.stack[current].Next() - if err == io.EOF { - w.stack = w.stack[:current] - continue - } - - if err != nil { - return nil, err - } - } - - if w.seen[c.Hash] || w.seenExternal[c.Hash] { - continue - } - - w.seen[c.Hash] = true - - if c.NumParents() > 0 { - w.stack = append(w.stack, filteredParentIter(c, w.seen)) - } - - return c, nil - } -} - -func filteredParentIter(c *Commit, seen map[plumbing.Hash]bool) CommitIter { - var hashes []plumbing.Hash - for _, h := range c.ParentHashes { - if !seen[h] { - hashes = append(hashes, h) - } - } - - return NewCommitIter(c.s, - storer.NewEncodedObjectLookupIter(c.s, plumbing.CommitObject, hashes), - ) -} - -func (w *commitPreIterator) ForEach(cb func(*Commit) error) error { - for { - c, err := w.Next() - if err == io.EOF { - break - } - if err != nil { - return err - } - - err = cb(c) - if err == storer.ErrStop { - break - } - if err != nil { - return err - } - } - - return nil -} - -func (w *commitPreIterator) Close() {} - -type commitPostIterator struct { - stack []*Commit - seen map[plumbing.Hash]bool -} - -// NewCommitPostorderIter returns a CommitIter that walks the commit -// history like WalkCommitHistory but in post-order. This means that after -// walking a merge commit, the merged commit will be walked before the base -// it was merged on. This can be useful if you wish to see the history in -// chronological order. Ignore allows to skip some commits from being iterated. -func NewCommitPostorderIter(c *Commit, ignore []plumbing.Hash) CommitIter { - seen := make(map[plumbing.Hash]bool) - for _, h := range ignore { - seen[h] = true - } - - return &commitPostIterator{ - stack: []*Commit{c}, - seen: seen, - } -} - -func (w *commitPostIterator) Next() (*Commit, error) { - for { - if len(w.stack) == 0 { - return nil, io.EOF - } - - c := w.stack[len(w.stack)-1] - w.stack = w.stack[:len(w.stack)-1] - - if w.seen[c.Hash] { - continue - } - - w.seen[c.Hash] = true - - return c, c.Parents().ForEach(func(p *Commit) error { - w.stack = append(w.stack, p) - return nil - }) - } -} - -func (w *commitPostIterator) ForEach(cb func(*Commit) error) error { - for { - c, err := w.Next() - if err == io.EOF { - break - } - if err != nil { - return err - } - - err = cb(c) - if err == storer.ErrStop { - break - } - if err != nil { - return err - } - } - - return nil -} - -func (w *commitPostIterator) Close() {} - -// commitAllIterator stands for commit iterator for all refs. -type commitAllIterator struct { - // currCommit points to the current commit. - currCommit *list.Element -} - -// NewCommitAllIter returns a new commit iterator for all refs. -// repoStorer is a repo Storer used to get commits and references. -// commitIterFunc is a commit iterator function, used to iterate through ref commits in chosen order -func NewCommitAllIter(repoStorer storage.Storer, commitIterFunc func(*Commit) CommitIter) (CommitIter, error) { - commitsPath := list.New() - commitsLookup := make(map[plumbing.Hash]*list.Element) - head, err := storer.ResolveReference(repoStorer, plumbing.HEAD) - if err == nil { - err = addReference(repoStorer, commitIterFunc, head, commitsPath, commitsLookup) - } - - if err != nil && err != plumbing.ErrReferenceNotFound { - return nil, err - } - - // add all references along with the HEAD - refIter, err := repoStorer.IterReferences() - if err != nil { - return nil, err - } - defer refIter.Close() - - for { - ref, err := refIter.Next() - if err == io.EOF { - break - } - - if err == plumbing.ErrReferenceNotFound { - continue - } - - if err != nil { - return nil, err - } - - if err = addReference(repoStorer, commitIterFunc, ref, commitsPath, commitsLookup); err != nil { - return nil, err - } - } - - return &commitAllIterator{commitsPath.Front()}, nil -} - -func addReference( - repoStorer storage.Storer, - commitIterFunc func(*Commit) CommitIter, - ref *plumbing.Reference, - commitsPath *list.List, - commitsLookup map[plumbing.Hash]*list.Element) error { - - _, exists := commitsLookup[ref.Hash()] - if exists { - // we already have it - skip the reference. - return nil - } - - refCommit, _ := GetCommit(repoStorer, ref.Hash()) - if refCommit == nil { - // if it's not a commit - skip it. - return nil - } - - var ( - refCommits []*Commit - parent *list.Element - ) - // collect all ref commits to add - commitIter := commitIterFunc(refCommit) - for c, e := commitIter.Next(); e == nil; { - parent, exists = commitsLookup[c.Hash] - if exists { - break - } - refCommits = append(refCommits, c) - c, e = commitIter.Next() - } - commitIter.Close() - - if parent == nil { - // common parent - not found - // add all commits to the path from this ref (maybe it's a HEAD and we don't have anything, yet) - for _, c := range refCommits { - parent = commitsPath.PushBack(c) - commitsLookup[c.Hash] = parent - } - } else { - // add ref's commits to the path in reverse order (from the latest) - for i := len(refCommits) - 1; i >= 0; i-- { - c := refCommits[i] - // insert before found common parent - parent = commitsPath.InsertBefore(c, parent) - commitsLookup[c.Hash] = parent - } - } - - return nil -} - -func (it *commitAllIterator) Next() (*Commit, error) { - if it.currCommit == nil { - return nil, io.EOF - } - - c := it.currCommit.Value.(*Commit) - it.currCommit = it.currCommit.Next() - - return c, nil -} - -func (it *commitAllIterator) ForEach(cb func(*Commit) error) error { - for { - c, err := it.Next() - if err == io.EOF { - break - } - if err != nil { - return err - } - - err = cb(c) - if err == storer.ErrStop { - break - } - if err != nil { - return err - } - } - - return nil -} - -func (it *commitAllIterator) Close() { - it.currCommit = nil -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_bfs.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_bfs.go deleted file mode 100644 index 8047fa9bc0e..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_bfs.go +++ /dev/null @@ -1,100 +0,0 @@ -package object - -import ( - "io" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/storer" -) - -type bfsCommitIterator struct { - seenExternal map[plumbing.Hash]bool - seen map[plumbing.Hash]bool - queue []*Commit -} - -// NewCommitIterBSF returns a CommitIter that walks the commit history, -// starting at the given commit and visiting its parents in pre-order. -// The given callback will be called for each visited commit. Each commit will -// be visited only once. If the callback returns an error, walking will stop -// and will return the error. Other errors might be returned if the history -// cannot be traversed (e.g. missing objects). Ignore allows to skip some -// commits from being iterated. -func NewCommitIterBSF( - c *Commit, - seenExternal map[plumbing.Hash]bool, - ignore []plumbing.Hash, -) CommitIter { - seen := make(map[plumbing.Hash]bool) - for _, h := range ignore { - seen[h] = true - } - - return &bfsCommitIterator{ - seenExternal: seenExternal, - seen: seen, - queue: []*Commit{c}, - } -} - -func (w *bfsCommitIterator) appendHash(store storer.EncodedObjectStorer, h plumbing.Hash) error { - if w.seen[h] || w.seenExternal[h] { - return nil - } - c, err := GetCommit(store, h) - if err != nil { - return err - } - w.queue = append(w.queue, c) - return nil -} - -func (w *bfsCommitIterator) Next() (*Commit, error) { - var c *Commit - for { - if len(w.queue) == 0 { - return nil, io.EOF - } - c = w.queue[0] - w.queue = w.queue[1:] - - if w.seen[c.Hash] || w.seenExternal[c.Hash] { - continue - } - - w.seen[c.Hash] = true - - for _, h := range c.ParentHashes { - err := w.appendHash(c.s, h) - if err != nil { - return nil, err - } - } - - return c, nil - } -} - -func (w *bfsCommitIterator) ForEach(cb func(*Commit) error) error { - for { - c, err := w.Next() - if err == io.EOF { - break - } - if err != nil { - return err - } - - err = cb(c) - if err == storer.ErrStop { - break - } - if err != nil { - return err - } - } - - return nil -} - -func (w *bfsCommitIterator) Close() {} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_bfs_filtered.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_bfs_filtered.go deleted file mode 100644 index 9d518133e21..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_bfs_filtered.go +++ /dev/null @@ -1,175 +0,0 @@ -package object - -import ( - "io" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/storer" -) - -// NewFilterCommitIter returns a CommitIter that walks the commit history, -// starting at the passed commit and visiting its parents in Breadth-first order. -// The commits returned by the CommitIter will validate the passed CommitFilter. -// The history won't be transversed beyond a commit if isLimit is true for it. -// Each commit will be visited only once. -// If the commit history can not be traversed, or the Close() method is called, -// the CommitIter won't return more commits. -// If no isValid is passed, all ancestors of from commit will be valid. -// If no isLimit is limit, all ancestors of all commits will be visited. -func NewFilterCommitIter( - from *Commit, - isValid *CommitFilter, - isLimit *CommitFilter, -) CommitIter { - var validFilter CommitFilter - if isValid == nil { - validFilter = func(_ *Commit) bool { - return true - } - } else { - validFilter = *isValid - } - - var limitFilter CommitFilter - if isLimit == nil { - limitFilter = func(_ *Commit) bool { - return false - } - } else { - limitFilter = *isLimit - } - - return &filterCommitIter{ - isValid: validFilter, - isLimit: limitFilter, - visited: map[plumbing.Hash]struct{}{}, - queue: []*Commit{from}, - } -} - -// CommitFilter returns a boolean for the passed Commit -type CommitFilter func(*Commit) bool - -// filterCommitIter implements CommitIter -type filterCommitIter struct { - isValid CommitFilter - isLimit CommitFilter - visited map[plumbing.Hash]struct{} - queue []*Commit - lastErr error -} - -// Next returns the next commit of the CommitIter. -// It will return io.EOF if there are no more commits to visit, -// or an error if the history could not be traversed. -func (w *filterCommitIter) Next() (*Commit, error) { - var commit *Commit - var err error - for { - commit, err = w.popNewFromQueue() - if err != nil { - return nil, w.close(err) - } - - w.visited[commit.Hash] = struct{}{} - - if !w.isLimit(commit) { - err = w.addToQueue(commit.s, commit.ParentHashes...) - if err != nil { - return nil, w.close(err) - } - } - - if w.isValid(commit) { - return commit, nil - } - } -} - -// ForEach runs the passed callback over each Commit returned by the CommitIter -// until the callback returns an error or there is no more commits to traverse. -func (w *filterCommitIter) ForEach(cb func(*Commit) error) error { - for { - commit, err := w.Next() - if err == io.EOF { - break - } - - if err != nil { - return err - } - - if err := cb(commit); err == storer.ErrStop { - break - } else if err != nil { - return err - } - } - - return nil -} - -// Error returns the error that caused that the CommitIter is no longer returning commits -func (w *filterCommitIter) Error() error { - return w.lastErr -} - -// Close closes the CommitIter -func (w *filterCommitIter) Close() { - w.visited = map[plumbing.Hash]struct{}{} - w.queue = []*Commit{} - w.isLimit = nil - w.isValid = nil -} - -// close closes the CommitIter with an error -func (w *filterCommitIter) close(err error) error { - w.Close() - w.lastErr = err - return err -} - -// popNewFromQueue returns the first new commit from the internal fifo queue, -// or an io.EOF error if the queue is empty -func (w *filterCommitIter) popNewFromQueue() (*Commit, error) { - var first *Commit - for { - if len(w.queue) == 0 { - if w.lastErr != nil { - return nil, w.lastErr - } - - return nil, io.EOF - } - - first = w.queue[0] - w.queue = w.queue[1:] - if _, ok := w.visited[first.Hash]; ok { - continue - } - - return first, nil - } -} - -// addToQueue adds the passed commits to the internal fifo queue if they weren't seen -// or returns an error if the passed hashes could not be used to get valid commits -func (w *filterCommitIter) addToQueue( - store storer.EncodedObjectStorer, - hashes ...plumbing.Hash, -) error { - for _, hash := range hashes { - if _, ok := w.visited[hash]; ok { - continue - } - - commit, err := GetCommit(store, hash) - if err != nil { - return err - } - - w.queue = append(w.queue, commit) - } - - return nil -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_ctime.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_ctime.go deleted file mode 100644 index fbddf1d238f..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_ctime.go +++ /dev/null @@ -1,103 +0,0 @@ -package object - -import ( - "io" - - "github.com/emirpasic/gods/trees/binaryheap" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/storer" -) - -type commitIteratorByCTime struct { - seenExternal map[plumbing.Hash]bool - seen map[plumbing.Hash]bool - heap *binaryheap.Heap -} - -// NewCommitIterCTime returns a CommitIter that walks the commit history, -// starting at the given commit and visiting its parents while preserving Committer Time order. -// this appears to be the closest order to `git log` -// The given callback will be called for each visited commit. Each commit will -// be visited only once. If the callback returns an error, walking will stop -// and will return the error. Other errors might be returned if the history -// cannot be traversed (e.g. missing objects). Ignore allows to skip some -// commits from being iterated. -func NewCommitIterCTime( - c *Commit, - seenExternal map[plumbing.Hash]bool, - ignore []plumbing.Hash, -) CommitIter { - seen := make(map[plumbing.Hash]bool) - for _, h := range ignore { - seen[h] = true - } - - heap := binaryheap.NewWith(func(a, b interface{}) int { - if a.(*Commit).Committer.When.Before(b.(*Commit).Committer.When) { - return 1 - } - return -1 - }) - heap.Push(c) - - return &commitIteratorByCTime{ - seenExternal: seenExternal, - seen: seen, - heap: heap, - } -} - -func (w *commitIteratorByCTime) Next() (*Commit, error) { - var c *Commit - for { - cIn, ok := w.heap.Pop() - if !ok { - return nil, io.EOF - } - c = cIn.(*Commit) - - if w.seen[c.Hash] || w.seenExternal[c.Hash] { - continue - } - - w.seen[c.Hash] = true - - for _, h := range c.ParentHashes { - if w.seen[h] || w.seenExternal[h] { - continue - } - pc, err := GetCommit(c.s, h) - if err != nil { - return nil, err - } - w.heap.Push(pc) - } - - return c, nil - } -} - -func (w *commitIteratorByCTime) ForEach(cb func(*Commit) error) error { - for { - c, err := w.Next() - if err == io.EOF { - break - } - if err != nil { - return err - } - - err = cb(c) - if err == storer.ErrStop { - break - } - if err != nil { - return err - } - } - - return nil -} - -func (w *commitIteratorByCTime) Close() {} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_limit.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_limit.go deleted file mode 100644 index ac56a71c41a..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_limit.go +++ /dev/null @@ -1,65 +0,0 @@ -package object - -import ( - "io" - "time" - - "github.com/go-git/go-git/v5/plumbing/storer" -) - -type commitLimitIter struct { - sourceIter CommitIter - limitOptions LogLimitOptions -} - -type LogLimitOptions struct { - Since *time.Time - Until *time.Time -} - -func NewCommitLimitIterFromIter(commitIter CommitIter, limitOptions LogLimitOptions) CommitIter { - iterator := new(commitLimitIter) - iterator.sourceIter = commitIter - iterator.limitOptions = limitOptions - return iterator -} - -func (c *commitLimitIter) Next() (*Commit, error) { - for { - commit, err := c.sourceIter.Next() - if err != nil { - return nil, err - } - - if c.limitOptions.Since != nil && commit.Committer.When.Before(*c.limitOptions.Since) { - continue - } - if c.limitOptions.Until != nil && commit.Committer.When.After(*c.limitOptions.Until) { - continue - } - return commit, nil - } -} - -func (c *commitLimitIter) ForEach(cb func(*Commit) error) error { - for { - commit, nextErr := c.Next() - if nextErr == io.EOF { - break - } - if nextErr != nil { - return nextErr - } - err := cb(commit) - if err == storer.ErrStop { - return nil - } else if err != nil { - return err - } - } - return nil -} - -func (c *commitLimitIter) Close() { - c.sourceIter.Close() -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_path.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_path.go deleted file mode 100644 index aa0ca15fd0b..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_path.go +++ /dev/null @@ -1,160 +0,0 @@ -package object - -import ( - "io" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/storer" -) - -type commitPathIter struct { - pathFilter func(string) bool - sourceIter CommitIter - currentCommit *Commit - checkParent bool -} - -// NewCommitPathIterFromIter returns a commit iterator which performs diffTree between -// successive trees returned from the commit iterator from the argument. The purpose of this is -// to find the commits that explain how the files that match the path came to be. -// If checkParent is true then the function double checks if potential parent (next commit in a path) -// is one of the parents in the tree (it's used by `git log --all`). -// pathFilter is a function that takes path of file as argument and returns true if we want it -func NewCommitPathIterFromIter(pathFilter func(string) bool, commitIter CommitIter, checkParent bool) CommitIter { - iterator := new(commitPathIter) - iterator.sourceIter = commitIter - iterator.pathFilter = pathFilter - iterator.checkParent = checkParent - return iterator -} - -// NewCommitFileIterFromIter is kept for compatibility, can be replaced with NewCommitPathIterFromIter -func NewCommitFileIterFromIter(fileName string, commitIter CommitIter, checkParent bool) CommitIter { - return NewCommitPathIterFromIter( - func(path string) bool { - return path == fileName - }, - commitIter, - checkParent, - ) -} - -func (c *commitPathIter) Next() (*Commit, error) { - if c.currentCommit == nil { - var err error - c.currentCommit, err = c.sourceIter.Next() - if err != nil { - return nil, err - } - } - commit, commitErr := c.getNextFileCommit() - - // Setting current-commit to nil to prevent unwanted states when errors are raised - if commitErr != nil { - c.currentCommit = nil - } - return commit, commitErr -} - -func (c *commitPathIter) getNextFileCommit() (*Commit, error) { - for { - // Parent-commit can be nil if the current-commit is the initial commit - parentCommit, parentCommitErr := c.sourceIter.Next() - if parentCommitErr != nil { - // If the parent-commit is beyond the initial commit, keep it nil - if parentCommitErr != io.EOF { - return nil, parentCommitErr - } - parentCommit = nil - } - - // Fetch the trees of the current and parent commits - currentTree, currTreeErr := c.currentCommit.Tree() - if currTreeErr != nil { - return nil, currTreeErr - } - - var parentTree *Tree - if parentCommit != nil { - var parentTreeErr error - parentTree, parentTreeErr = parentCommit.Tree() - if parentTreeErr != nil { - return nil, parentTreeErr - } - } - - // Find diff between current and parent trees - changes, diffErr := DiffTree(currentTree, parentTree) - if diffErr != nil { - return nil, diffErr - } - - found := c.hasFileChange(changes, parentCommit) - - // Storing the current-commit in-case a change is found, and - // Updating the current-commit for the next-iteration - prevCommit := c.currentCommit - c.currentCommit = parentCommit - - if found { - return prevCommit, nil - } - - // If not matches found and if parent-commit is beyond the initial commit, then return with EOF - if parentCommit == nil { - return nil, io.EOF - } - } -} - -func (c *commitPathIter) hasFileChange(changes Changes, parent *Commit) bool { - for _, change := range changes { - if !c.pathFilter(change.name()) { - continue - } - - // filename matches, now check if source iterator contains all commits (from all refs) - if c.checkParent { - if parent != nil && isParentHash(parent.Hash, c.currentCommit) { - return true - } - continue - } - - return true - } - - return false -} - -func isParentHash(hash plumbing.Hash, commit *Commit) bool { - for _, h := range commit.ParentHashes { - if h == hash { - return true - } - } - return false -} - -func (c *commitPathIter) ForEach(cb func(*Commit) error) error { - for { - commit, nextErr := c.Next() - if nextErr == io.EOF { - break - } - if nextErr != nil { - return nextErr - } - err := cb(commit) - if err == storer.ErrStop { - return nil - } else if err != nil { - return err - } - } - return nil -} - -func (c *commitPathIter) Close() { - c.sourceIter.Close() -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/common.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/common.go deleted file mode 100644 index 3591f5f0a60..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/common.go +++ /dev/null @@ -1,12 +0,0 @@ -package object - -import ( - "bufio" - "sync" -) - -var bufPool = sync.Pool{ - New: func() interface{} { - return bufio.NewReader(nil) - }, -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/difftree.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/difftree.go deleted file mode 100644 index 7c2222702ce..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/difftree.go +++ /dev/null @@ -1,98 +0,0 @@ -package object - -import ( - "bytes" - "context" - - "github.com/go-git/go-git/v5/utils/merkletrie" - "github.com/go-git/go-git/v5/utils/merkletrie/noder" -) - -// DiffTree compares the content and mode of the blobs found via two -// tree objects. -// DiffTree does not perform rename detection, use DiffTreeWithOptions -// instead to detect renames. -func DiffTree(a, b *Tree) (Changes, error) { - return DiffTreeContext(context.Background(), a, b) -} - -// DiffTreeContext compares the content and mode of the blobs found via two -// tree objects. Provided context must be non-nil. -// An error will be returned if context expires. -func DiffTreeContext(ctx context.Context, a, b *Tree) (Changes, error) { - return DiffTreeWithOptions(ctx, a, b, nil) -} - -// DiffTreeOptions are the configurable options when performing a diff tree. -type DiffTreeOptions struct { - // DetectRenames is whether the diff tree will use rename detection. - DetectRenames bool - // RenameScore is the threshold to of similarity between files to consider - // that a pair of delete and insert are a rename. The number must be - // exactly between 0 and 100. - RenameScore uint - // RenameLimit is the maximum amount of files that can be compared when - // detecting renames. The number of comparisons that have to be performed - // is equal to the number of deleted files * the number of added files. - // That means, that if 100 files were deleted and 50 files were added, 5000 - // file comparisons may be needed. So, if the rename limit is 50, the number - // of both deleted and added needs to be equal or less than 50. - // A value of 0 means no limit. - RenameLimit uint - // OnlyExactRenames performs only detection of exact renames and will not perform - // any detection of renames based on file similarity. - OnlyExactRenames bool -} - -// DefaultDiffTreeOptions are the default and recommended options for the -// diff tree. -var DefaultDiffTreeOptions = &DiffTreeOptions{ - DetectRenames: true, - RenameScore: 60, - RenameLimit: 0, - OnlyExactRenames: false, -} - -// DiffTreeWithOptions compares the content and mode of the blobs found -// via two tree objects with the given options. The provided context -// must be non-nil. -// If no options are passed, no rename detection will be performed. The -// recommended options are DefaultDiffTreeOptions. -// An error will be returned if the context expires. -// This function will be deprecated and removed in v6 so the default -// behaviour of DiffTree is to detect renames. -func DiffTreeWithOptions( - ctx context.Context, - a, b *Tree, - opts *DiffTreeOptions, -) (Changes, error) { - from := NewTreeRootNode(a) - to := NewTreeRootNode(b) - - hashEqual := func(a, b noder.Hasher) bool { - return bytes.Equal(a.Hash(), b.Hash()) - } - - merkletrieChanges, err := merkletrie.DiffTreeContext(ctx, from, to, hashEqual) - if err != nil { - if err == merkletrie.ErrCanceled { - return nil, ErrCanceled - } - return nil, err - } - - changes, err := newChanges(merkletrieChanges) - if err != nil { - return nil, err - } - - if opts == nil { - opts = new(DiffTreeOptions) - } - - if opts.DetectRenames { - return DetectRenames(changes, opts) - } - - return changes, nil -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/file.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/file.go deleted file mode 100644 index 6cc5367d8d6..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/file.go +++ /dev/null @@ -1,137 +0,0 @@ -package object - -import ( - "bytes" - "io" - "strings" - - "github.com/go-git/go-git/v5/plumbing/filemode" - "github.com/go-git/go-git/v5/plumbing/storer" - "github.com/go-git/go-git/v5/utils/binary" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -// File represents git file objects. -type File struct { - // Name is the path of the file. It might be relative to a tree, - // depending of the function that generates it. - Name string - // Mode is the file mode. - Mode filemode.FileMode - // Blob with the contents of the file. - Blob -} - -// NewFile returns a File based on the given blob object -func NewFile(name string, m filemode.FileMode, b *Blob) *File { - return &File{Name: name, Mode: m, Blob: *b} -} - -// Contents returns the contents of a file as a string. -func (f *File) Contents() (content string, err error) { - reader, err := f.Reader() - if err != nil { - return "", err - } - defer ioutil.CheckClose(reader, &err) - - buf := new(bytes.Buffer) - if _, err := buf.ReadFrom(reader); err != nil { - return "", err - } - - return buf.String(), nil -} - -// IsBinary returns if the file is binary or not -func (f *File) IsBinary() (bin bool, err error) { - reader, err := f.Reader() - if err != nil { - return false, err - } - defer ioutil.CheckClose(reader, &err) - - return binary.IsBinary(reader) -} - -// Lines returns a slice of lines from the contents of a file, stripping -// all end of line characters. If the last line is empty (does not end -// in an end of line), it is also stripped. -func (f *File) Lines() ([]string, error) { - content, err := f.Contents() - if err != nil { - return nil, err - } - - splits := strings.Split(content, "\n") - // remove the last line if it is empty - if splits[len(splits)-1] == "" { - return splits[:len(splits)-1], nil - } - - return splits, nil -} - -// FileIter provides an iterator for the files in a tree. -type FileIter struct { - s storer.EncodedObjectStorer - w TreeWalker -} - -// NewFileIter takes a storer.EncodedObjectStorer and a Tree and returns a -// *FileIter that iterates over all files contained in the tree, recursively. -func NewFileIter(s storer.EncodedObjectStorer, t *Tree) *FileIter { - return &FileIter{s: s, w: *NewTreeWalker(t, true, nil)} -} - -// Next moves the iterator to the next file and returns a pointer to it. If -// there are no more files, it returns io.EOF. -func (iter *FileIter) Next() (*File, error) { - for { - name, entry, err := iter.w.Next() - if err != nil { - return nil, err - } - - if entry.Mode == filemode.Dir || entry.Mode == filemode.Submodule { - continue - } - - blob, err := GetBlob(iter.s, entry.Hash) - if err != nil { - return nil, err - } - - return NewFile(name, entry.Mode, blob), nil - } -} - -// ForEach call the cb function for each file contained in this iter until -// an error happens or the end of the iter is reached. If plumbing.ErrStop is sent -// the iteration is stop but no error is returned. The iterator is closed. -func (iter *FileIter) ForEach(cb func(*File) error) error { - defer iter.Close() - - for { - f, err := iter.Next() - if err != nil { - if err == io.EOF { - return nil - } - - return err - } - - if err := cb(f); err != nil { - if err == storer.ErrStop { - return nil - } - - return err - } - } -} - -func (iter *FileIter) Close() { - iter.w.Close() -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/merge_base.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/merge_base.go deleted file mode 100644 index b412361d029..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/merge_base.go +++ /dev/null @@ -1,210 +0,0 @@ -package object - -import ( - "fmt" - "sort" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/storer" -) - -// errIsReachable is thrown when first commit is an ancestor of the second -var errIsReachable = fmt.Errorf("first is reachable from second") - -// MergeBase mimics the behavior of `git merge-base actual other`, returning the -// best common ancestor between the actual and the passed one. -// The best common ancestors can not be reached from other common ancestors. -func (c *Commit) MergeBase(other *Commit) ([]*Commit, error) { - // use sortedByCommitDateDesc strategy - sorted := sortByCommitDateDesc(c, other) - newer := sorted[0] - older := sorted[1] - - newerHistory, err := ancestorsIndex(older, newer) - if err == errIsReachable { - return []*Commit{older}, nil - } - - if err != nil { - return nil, err - } - - var res []*Commit - inNewerHistory := isInIndexCommitFilter(newerHistory) - resIter := NewFilterCommitIter(older, &inNewerHistory, &inNewerHistory) - _ = resIter.ForEach(func(commit *Commit) error { - res = append(res, commit) - return nil - }) - - return Independents(res) -} - -// IsAncestor returns true if the actual commit is ancestor of the passed one. -// It returns an error if the history is not transversable -// It mimics the behavior of `git merge --is-ancestor actual other` -func (c *Commit) IsAncestor(other *Commit) (bool, error) { - found := false - iter := NewCommitPreorderIter(other, nil, nil) - err := iter.ForEach(func(comm *Commit) error { - if comm.Hash != c.Hash { - return nil - } - - found = true - return storer.ErrStop - }) - - return found, err -} - -// ancestorsIndex returns a map with the ancestors of the starting commit if the -// excluded one is not one of them. It returns errIsReachable if the excluded commit -// is ancestor of the starting, or another error if the history is not traversable. -func ancestorsIndex(excluded, starting *Commit) (map[plumbing.Hash]struct{}, error) { - if excluded.Hash.String() == starting.Hash.String() { - return nil, errIsReachable - } - - startingHistory := map[plumbing.Hash]struct{}{} - startingIter := NewCommitIterBSF(starting, nil, nil) - err := startingIter.ForEach(func(commit *Commit) error { - if commit.Hash == excluded.Hash { - return errIsReachable - } - - startingHistory[commit.Hash] = struct{}{} - return nil - }) - - if err != nil { - return nil, err - } - - return startingHistory, nil -} - -// Independents returns a subset of the passed commits, that are not reachable the others -// It mimics the behavior of `git merge-base --independent commit...`. -func Independents(commits []*Commit) ([]*Commit, error) { - // use sortedByCommitDateDesc strategy - candidates := sortByCommitDateDesc(commits...) - candidates = removeDuplicated(candidates) - - seen := map[plumbing.Hash]struct{}{} - var isLimit CommitFilter = func(commit *Commit) bool { - _, ok := seen[commit.Hash] - return ok - } - - if len(candidates) < 2 { - return candidates, nil - } - - pos := 0 - for { - from := candidates[pos] - others := remove(candidates, from) - fromHistoryIter := NewFilterCommitIter(from, nil, &isLimit) - err := fromHistoryIter.ForEach(func(fromAncestor *Commit) error { - for _, other := range others { - if fromAncestor.Hash == other.Hash { - candidates = remove(candidates, other) - others = remove(others, other) - } - } - - if len(candidates) == 1 { - return storer.ErrStop - } - - seen[fromAncestor.Hash] = struct{}{} - return nil - }) - - if err != nil { - return nil, err - } - - nextPos := indexOf(candidates, from) + 1 - if nextPos >= len(candidates) { - break - } - - pos = nextPos - } - - return candidates, nil -} - -// sortByCommitDateDesc returns the passed commits, sorted by `committer.When desc` -// -// Following this strategy, it is tried to reduce the time needed when walking -// the history from one commit to reach the others. It is assumed that ancestors -// use to be committed before its descendant; -// That way `Independents(A^, A)` will be processed as being `Independents(A, A^)`; -// so starting by `A` it will be reached `A^` way sooner than walking from `A^` -// to the initial commit, and then from `A` to `A^`. -func sortByCommitDateDesc(commits ...*Commit) []*Commit { - sorted := make([]*Commit, len(commits)) - copy(sorted, commits) - sort.Slice(sorted, func(i, j int) bool { - return sorted[i].Committer.When.After(sorted[j].Committer.When) - }) - - return sorted -} - -// indexOf returns the first position where target was found in the passed commits -func indexOf(commits []*Commit, target *Commit) int { - for i, commit := range commits { - if target.Hash == commit.Hash { - return i - } - } - - return -1 -} - -// remove returns the passed commits excluding the commit toDelete -func remove(commits []*Commit, toDelete *Commit) []*Commit { - res := make([]*Commit, len(commits)) - j := 0 - for _, commit := range commits { - if commit.Hash == toDelete.Hash { - continue - } - - res[j] = commit - j++ - } - - return res[:j] -} - -// removeDuplicated removes duplicated commits from the passed slice of commits -func removeDuplicated(commits []*Commit) []*Commit { - seen := make(map[plumbing.Hash]struct{}, len(commits)) - res := make([]*Commit, len(commits)) - j := 0 - for _, commit := range commits { - if _, ok := seen[commit.Hash]; ok { - continue - } - - seen[commit.Hash] = struct{}{} - res[j] = commit - j++ - } - - return res[:j] -} - -// isInIndexCommitFilter returns a commitFilter that returns true -// if the commit is in the passed index. -func isInIndexCommitFilter(index map[plumbing.Hash]struct{}) CommitFilter { - return func(c *Commit) bool { - _, ok := index[c.Hash] - return ok - } -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/object.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/object.go deleted file mode 100644 index 13b1e91c9c6..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/object.go +++ /dev/null @@ -1,239 +0,0 @@ -// Package object contains implementations of all Git objects and utility -// functions to work with them. -package object - -import ( - "bytes" - "errors" - "fmt" - "io" - "strconv" - "time" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/storer" -) - -// ErrUnsupportedObject trigger when a non-supported object is being decoded. -var ErrUnsupportedObject = errors.New("unsupported object type") - -// Object is a generic representation of any git object. It is implemented by -// Commit, Tree, Blob, and Tag, and includes the functions that are common to -// them. -// -// Object is returned when an object can be of any type. It is frequently used -// with a type cast to acquire the specific type of object: -// -// func process(obj Object) { -// switch o := obj.(type) { -// case *Commit: -// // o is a Commit -// case *Tree: -// // o is a Tree -// case *Blob: -// // o is a Blob -// case *Tag: -// // o is a Tag -// } -// } -// -// This interface is intentionally different from plumbing.EncodedObject, which -// is a lower level interface used by storage implementations to read and write -// objects in its encoded form. -type Object interface { - ID() plumbing.Hash - Type() plumbing.ObjectType - Decode(plumbing.EncodedObject) error - Encode(plumbing.EncodedObject) error -} - -// GetObject gets an object from an object storer and decodes it. -func GetObject(s storer.EncodedObjectStorer, h plumbing.Hash) (Object, error) { - o, err := s.EncodedObject(plumbing.AnyObject, h) - if err != nil { - return nil, err - } - - return DecodeObject(s, o) -} - -// DecodeObject decodes an encoded object into an Object and associates it to -// the given object storer. -func DecodeObject(s storer.EncodedObjectStorer, o plumbing.EncodedObject) (Object, error) { - switch o.Type() { - case plumbing.CommitObject: - return DecodeCommit(s, o) - case plumbing.TreeObject: - return DecodeTree(s, o) - case plumbing.BlobObject: - return DecodeBlob(o) - case plumbing.TagObject: - return DecodeTag(s, o) - default: - return nil, plumbing.ErrInvalidType - } -} - -// DateFormat is the format being used in the original git implementation -const DateFormat = "Mon Jan 02 15:04:05 2006 -0700" - -// Signature is used to identify who and when created a commit or tag. -type Signature struct { - // Name represents a person name. It is an arbitrary string. - Name string - // Email is an email, but it cannot be assumed to be well-formed. - Email string - // When is the timestamp of the signature. - When time.Time -} - -// Decode decodes a byte slice into a signature -func (s *Signature) Decode(b []byte) { - open := bytes.LastIndexByte(b, '<') - close := bytes.LastIndexByte(b, '>') - if open == -1 || close == -1 { - return - } - - if close < open { - return - } - - s.Name = string(bytes.Trim(b[:open], " ")) - s.Email = string(b[open+1 : close]) - - hasTime := close+2 < len(b) - if hasTime { - s.decodeTimeAndTimeZone(b[close+2:]) - } -} - -// Encode encodes a Signature into a writer. -func (s *Signature) Encode(w io.Writer) error { - if _, err := fmt.Fprintf(w, "%s <%s> ", s.Name, s.Email); err != nil { - return err - } - if err := s.encodeTimeAndTimeZone(w); err != nil { - return err - } - return nil -} - -var timeZoneLength = 5 - -func (s *Signature) decodeTimeAndTimeZone(b []byte) { - space := bytes.IndexByte(b, ' ') - if space == -1 { - space = len(b) - } - - ts, err := strconv.ParseInt(string(b[:space]), 10, 64) - if err != nil { - return - } - - s.When = time.Unix(ts, 0).In(time.UTC) - var tzStart = space + 1 - if tzStart >= len(b) || tzStart+timeZoneLength > len(b) { - return - } - - timezone := string(b[tzStart : tzStart+timeZoneLength]) - tzhours, err1 := strconv.ParseInt(timezone[0:3], 10, 64) - tzmins, err2 := strconv.ParseInt(timezone[3:], 10, 64) - if err1 != nil || err2 != nil { - return - } - if tzhours < 0 { - tzmins *= -1 - } - - tz := time.FixedZone("", int(tzhours*60*60+tzmins*60)) - - s.When = s.When.In(tz) -} - -func (s *Signature) encodeTimeAndTimeZone(w io.Writer) error { - u := s.When.Unix() - if u < 0 { - u = 0 - } - _, err := fmt.Fprintf(w, "%d %s", u, s.When.Format("-0700")) - return err -} - -func (s *Signature) String() string { - return fmt.Sprintf("%s <%s>", s.Name, s.Email) -} - -// ObjectIter provides an iterator for a set of objects. -type ObjectIter struct { - storer.EncodedObjectIter - s storer.EncodedObjectStorer -} - -// NewObjectIter takes a storer.EncodedObjectStorer and a -// storer.EncodedObjectIter and returns an *ObjectIter that iterates over all -// objects contained in the storer.EncodedObjectIter. -func NewObjectIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) *ObjectIter { - return &ObjectIter{iter, s} -} - -// Next moves the iterator to the next object and returns a pointer to it. If -// there are no more objects, it returns io.EOF. -func (iter *ObjectIter) Next() (Object, error) { - for { - obj, err := iter.EncodedObjectIter.Next() - if err != nil { - return nil, err - } - - o, err := iter.toObject(obj) - if err == plumbing.ErrInvalidType { - continue - } - - if err != nil { - return nil, err - } - - return o, nil - } -} - -// ForEach call the cb function for each object contained on this iter until -// an error happens or the end of the iter is reached. If ErrStop is sent -// the iteration is stop but no error is returned. The iterator is closed. -func (iter *ObjectIter) ForEach(cb func(Object) error) error { - return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error { - o, err := iter.toObject(obj) - if err == plumbing.ErrInvalidType { - return nil - } - - if err != nil { - return err - } - - return cb(o) - }) -} - -func (iter *ObjectIter) toObject(obj plumbing.EncodedObject) (Object, error) { - switch obj.Type() { - case plumbing.BlobObject: - blob := &Blob{} - return blob, blob.Decode(obj) - case plumbing.TreeObject: - tree := &Tree{s: iter.s} - return tree, tree.Decode(obj) - case plumbing.CommitObject: - commit := &Commit{} - return commit, commit.Decode(obj) - case plumbing.TagObject: - tag := &Tag{} - return tag, tag.Decode(obj) - default: - return nil, plumbing.ErrInvalidType - } -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/patch.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/patch.go deleted file mode 100644 index 56b62c191f9..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/patch.go +++ /dev/null @@ -1,354 +0,0 @@ -package object - -import ( - "bytes" - "context" - "errors" - "fmt" - "io" - "math" - "strings" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/filemode" - fdiff "github.com/go-git/go-git/v5/plumbing/format/diff" - "github.com/go-git/go-git/v5/utils/diff" - - dmp "github.com/sergi/go-diff/diffmatchpatch" -) - -var ( - ErrCanceled = errors.New("operation canceled") -) - -func getPatch(message string, changes ...*Change) (*Patch, error) { - ctx := context.Background() - return getPatchContext(ctx, message, changes...) -} - -func getPatchContext(ctx context.Context, message string, changes ...*Change) (*Patch, error) { - var filePatches []fdiff.FilePatch - for _, c := range changes { - select { - case <-ctx.Done(): - return nil, ErrCanceled - default: - } - - fp, err := filePatchWithContext(ctx, c) - if err != nil { - return nil, err - } - - filePatches = append(filePatches, fp) - } - - return &Patch{message, filePatches}, nil -} - -func filePatchWithContext(ctx context.Context, c *Change) (fdiff.FilePatch, error) { - from, to, err := c.Files() - if err != nil { - return nil, err - } - fromContent, fIsBinary, err := fileContent(from) - if err != nil { - return nil, err - } - - toContent, tIsBinary, err := fileContent(to) - if err != nil { - return nil, err - } - - if fIsBinary || tIsBinary { - return &textFilePatch{from: c.From, to: c.To}, nil - } - - diffs := diff.Do(fromContent, toContent) - - var chunks []fdiff.Chunk - for _, d := range diffs { - select { - case <-ctx.Done(): - return nil, ErrCanceled - default: - } - - var op fdiff.Operation - switch d.Type { - case dmp.DiffEqual: - op = fdiff.Equal - case dmp.DiffDelete: - op = fdiff.Delete - case dmp.DiffInsert: - op = fdiff.Add - } - - chunks = append(chunks, &textChunk{d.Text, op}) - } - - return &textFilePatch{ - chunks: chunks, - from: c.From, - to: c.To, - }, nil - -} - -func filePatch(c *Change) (fdiff.FilePatch, error) { - return filePatchWithContext(context.Background(), c) -} - -func fileContent(f *File) (content string, isBinary bool, err error) { - if f == nil { - return - } - - isBinary, err = f.IsBinary() - if err != nil || isBinary { - return - } - - content, err = f.Contents() - - return -} - -// Patch is an implementation of fdiff.Patch interface -type Patch struct { - message string - filePatches []fdiff.FilePatch -} - -func (p *Patch) FilePatches() []fdiff.FilePatch { - return p.filePatches -} - -func (p *Patch) Message() string { - return p.message -} - -func (p *Patch) Encode(w io.Writer) error { - ue := fdiff.NewUnifiedEncoder(w, fdiff.DefaultContextLines) - - return ue.Encode(p) -} - -func (p *Patch) Stats() FileStats { - return getFileStatsFromFilePatches(p.FilePatches()) -} - -func (p *Patch) String() string { - buf := bytes.NewBuffer(nil) - err := p.Encode(buf) - if err != nil { - return fmt.Sprintf("malformed patch: %s", err.Error()) - } - - return buf.String() -} - -// changeEntryWrapper is an implementation of fdiff.File interface -type changeEntryWrapper struct { - ce ChangeEntry -} - -func (f *changeEntryWrapper) Hash() plumbing.Hash { - if !f.ce.TreeEntry.Mode.IsFile() { - return plumbing.ZeroHash - } - - return f.ce.TreeEntry.Hash -} - -func (f *changeEntryWrapper) Mode() filemode.FileMode { - return f.ce.TreeEntry.Mode -} -func (f *changeEntryWrapper) Path() string { - if !f.ce.TreeEntry.Mode.IsFile() { - return "" - } - - return f.ce.Name -} - -func (f *changeEntryWrapper) Empty() bool { - return !f.ce.TreeEntry.Mode.IsFile() -} - -// textFilePatch is an implementation of fdiff.FilePatch interface -type textFilePatch struct { - chunks []fdiff.Chunk - from, to ChangeEntry -} - -func (tf *textFilePatch) Files() (from fdiff.File, to fdiff.File) { - f := &changeEntryWrapper{tf.from} - t := &changeEntryWrapper{tf.to} - - if !f.Empty() { - from = f - } - - if !t.Empty() { - to = t - } - - return -} - -func (tf *textFilePatch) IsBinary() bool { - return len(tf.chunks) == 0 -} - -func (tf *textFilePatch) Chunks() []fdiff.Chunk { - return tf.chunks -} - -// textChunk is an implementation of fdiff.Chunk interface -type textChunk struct { - content string - op fdiff.Operation -} - -func (t *textChunk) Content() string { - return t.content -} - -func (t *textChunk) Type() fdiff.Operation { - return t.op -} - -// FileStat stores the status of changes in content of a file. -type FileStat struct { - Name string - Addition int - Deletion int -} - -func (fs FileStat) String() string { - return printStat([]FileStat{fs}) -} - -// FileStats is a collection of FileStat. -type FileStats []FileStat - -func (fileStats FileStats) String() string { - return printStat(fileStats) -} - -func printStat(fileStats []FileStat) string { - padLength := float64(len(" ")) - newlineLength := float64(len("\n")) - separatorLength := float64(len("|")) - // Soft line length limit. The text length calculation below excludes - // length of the change number. Adding that would take it closer to 80, - // but probably not more than 80, until it's a huge number. - lineLength := 72.0 - - // Get the longest filename and longest total change. - var longestLength float64 - var longestTotalChange float64 - for _, fs := range fileStats { - if int(longestLength) < len(fs.Name) { - longestLength = float64(len(fs.Name)) - } - totalChange := fs.Addition + fs.Deletion - if int(longestTotalChange) < totalChange { - longestTotalChange = float64(totalChange) - } - } - - // Parts of the output: - // |<+++/---> - // example: " main.go | 10 +++++++--- " - - // - leftTextLength := padLength + longestLength + padLength - - // <+++++/-----> - // Excluding number length here. - rightTextLength := padLength + padLength + newlineLength - - totalTextArea := leftTextLength + separatorLength + rightTextLength - heightOfHistogram := lineLength - totalTextArea - - // Scale the histogram. - var scaleFactor float64 - if longestTotalChange > heightOfHistogram { - // Scale down to heightOfHistogram. - scaleFactor = longestTotalChange / heightOfHistogram - } else { - scaleFactor = 1.0 - } - - finalOutput := "" - for _, fs := range fileStats { - addn := float64(fs.Addition) - deln := float64(fs.Deletion) - addc := int(math.Floor(addn/scaleFactor)) - delc := int(math.Floor(deln/scaleFactor)) - if addc < 0 { - addc = 0 - } - if delc < 0 { - delc = 0 - } - adds := strings.Repeat("+", addc) - dels := strings.Repeat("-", delc) - finalOutput += fmt.Sprintf(" %s | %d %s%s\n", fs.Name, (fs.Addition + fs.Deletion), adds, dels) - } - - return finalOutput -} - -func getFileStatsFromFilePatches(filePatches []fdiff.FilePatch) FileStats { - var fileStats FileStats - - for _, fp := range filePatches { - // ignore empty patches (binary files, submodule refs updates) - if len(fp.Chunks()) == 0 { - continue - } - - cs := FileStat{} - from, to := fp.Files() - if from == nil { - // New File is created. - cs.Name = to.Path() - } else if to == nil { - // File is deleted. - cs.Name = from.Path() - } else if from.Path() != to.Path() { - // File is renamed. Not supported. - // cs.Name = fmt.Sprintf("%s => %s", from.Path(), to.Path()) - } else { - cs.Name = from.Path() - } - - for _, chunk := range fp.Chunks() { - s := chunk.Content() - if len(s) == 0 { - continue - } - - switch chunk.Type() { - case fdiff.Add: - cs.Addition += strings.Count(s, "\n") - if s[len(s)-1] != '\n' { - cs.Addition++ - } - case fdiff.Delete: - cs.Deletion += strings.Count(s, "\n") - if s[len(s)-1] != '\n' { - cs.Deletion++ - } - } - } - - fileStats = append(fileStats, cs) - } - - return fileStats -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/rename.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/rename.go deleted file mode 100644 index 7fed72c2f52..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/rename.go +++ /dev/null @@ -1,813 +0,0 @@ -package object - -import ( - "errors" - "io" - "sort" - "strings" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/filemode" - "github.com/go-git/go-git/v5/utils/ioutil" - "github.com/go-git/go-git/v5/utils/merkletrie" -) - -// DetectRenames detects the renames in the given changes on two trees with -// the given options. It will return the given changes grouping additions and -// deletions into modifications when possible. -// If options is nil, the default diff tree options will be used. -func DetectRenames( - changes Changes, - opts *DiffTreeOptions, -) (Changes, error) { - if opts == nil { - opts = DefaultDiffTreeOptions - } - - detector := &renameDetector{ - renameScore: int(opts.RenameScore), - renameLimit: int(opts.RenameLimit), - onlyExact: opts.OnlyExactRenames, - } - - for _, c := range changes { - action, err := c.Action() - if err != nil { - return nil, err - } - - switch action { - case merkletrie.Insert: - detector.added = append(detector.added, c) - case merkletrie.Delete: - detector.deleted = append(detector.deleted, c) - default: - detector.modified = append(detector.modified, c) - } - } - - return detector.detect() -} - -// renameDetector will detect and resolve renames in a set of changes. -// see: https://github.com/eclipse/jgit/blob/master/org.eclipse.jgit/src/org/eclipse/jgit/diff/RenameDetector.java -type renameDetector struct { - added []*Change - deleted []*Change - modified []*Change - - renameScore int - renameLimit int - onlyExact bool -} - -// detectExactRenames detects matches files that were deleted with files that -// were added where the hash is the same on both. If there are multiple targets -// the one with the most similar path will be chosen as the rename and the -// rest as either deletions or additions. -func (d *renameDetector) detectExactRenames() { - added := groupChangesByHash(d.added) - deletes := groupChangesByHash(d.deleted) - var uniqueAdds []*Change - var nonUniqueAdds [][]*Change - var addedLeft []*Change - - for _, cs := range added { - if len(cs) == 1 { - uniqueAdds = append(uniqueAdds, cs[0]) - } else { - nonUniqueAdds = append(nonUniqueAdds, cs) - } - } - - for _, c := range uniqueAdds { - hash := changeHash(c) - deleted := deletes[hash] - - if len(deleted) == 1 { - if sameMode(c, deleted[0]) { - d.modified = append(d.modified, &Change{From: deleted[0].From, To: c.To}) - delete(deletes, hash) - } else { - addedLeft = append(addedLeft, c) - } - } else if len(deleted) > 1 { - bestMatch := bestNameMatch(c, deleted) - if bestMatch != nil && sameMode(c, bestMatch) { - d.modified = append(d.modified, &Change{From: bestMatch.From, To: c.To}) - delete(deletes, hash) - - var newDeletes = make([]*Change, 0, len(deleted)-1) - for _, d := range deleted { - if d != bestMatch { - newDeletes = append(newDeletes, d) - } - } - deletes[hash] = newDeletes - } - } else { - addedLeft = append(addedLeft, c) - } - } - - for _, added := range nonUniqueAdds { - hash := changeHash(added[0]) - deleted := deletes[hash] - - if len(deleted) == 1 { - deleted := deleted[0] - bestMatch := bestNameMatch(deleted, added) - if bestMatch != nil && sameMode(deleted, bestMatch) { - d.modified = append(d.modified, &Change{From: deleted.From, To: bestMatch.To}) - delete(deletes, hash) - - for _, c := range added { - if c != bestMatch { - addedLeft = append(addedLeft, c) - } - } - } else { - addedLeft = append(addedLeft, added...) - } - } else if len(deleted) > 1 { - maxSize := len(deleted) * len(added) - if d.renameLimit > 0 && d.renameLimit < maxSize { - maxSize = d.renameLimit - } - - matrix := make(similarityMatrix, 0, maxSize) - - for delIdx, del := range deleted { - deletedName := changeName(del) - - for addIdx, add := range added { - addedName := changeName(add) - - score := nameSimilarityScore(addedName, deletedName) - matrix = append(matrix, similarityPair{added: addIdx, deleted: delIdx, score: score}) - - if len(matrix) >= maxSize { - break - } - } - - if len(matrix) >= maxSize { - break - } - } - - sort.Stable(matrix) - - usedAdds := make(map[*Change]struct{}) - usedDeletes := make(map[*Change]struct{}) - for i := len(matrix) - 1; i >= 0; i-- { - del := deleted[matrix[i].deleted] - add := added[matrix[i].added] - - if add == nil || del == nil { - // it was already matched - continue - } - - usedAdds[add] = struct{}{} - usedDeletes[del] = struct{}{} - d.modified = append(d.modified, &Change{From: del.From, To: add.To}) - added[matrix[i].added] = nil - deleted[matrix[i].deleted] = nil - } - - for _, c := range added { - if _, ok := usedAdds[c]; !ok && c != nil { - addedLeft = append(addedLeft, c) - } - } - - var newDeletes = make([]*Change, 0, len(deleted)-len(usedDeletes)) - for _, c := range deleted { - if _, ok := usedDeletes[c]; !ok && c != nil { - newDeletes = append(newDeletes, c) - } - } - deletes[hash] = newDeletes - } else { - addedLeft = append(addedLeft, added...) - } - } - - d.added = addedLeft - d.deleted = nil - for _, dels := range deletes { - d.deleted = append(d.deleted, dels...) - } -} - -// detectContentRenames detects renames based on the similarity of the content -// in the files by building a matrix of pairs between sources and destinations -// and matching by the highest score. -// see: https://github.com/eclipse/jgit/blob/master/org.eclipse.jgit/src/org/eclipse/jgit/diff/SimilarityRenameDetector.java -func (d *renameDetector) detectContentRenames() error { - cnt := max(len(d.added), len(d.deleted)) - if d.renameLimit > 0 && cnt > d.renameLimit { - return nil - } - - srcs, dsts := d.deleted, d.added - matrix, err := buildSimilarityMatrix(srcs, dsts, d.renameScore) - if err != nil { - return err - } - renames := make([]*Change, 0, min(len(matrix), len(dsts))) - - // Match rename pairs on a first come, first serve basis until - // we have looked at everything that is above the minimum score. - for i := len(matrix) - 1; i >= 0; i-- { - pair := matrix[i] - src := srcs[pair.deleted] - dst := dsts[pair.added] - - if dst == nil || src == nil { - // It was already matched before - continue - } - - renames = append(renames, &Change{From: src.From, To: dst.To}) - - // Claim destination and source as matched - dsts[pair.added] = nil - srcs[pair.deleted] = nil - } - - d.modified = append(d.modified, renames...) - d.added = compactChanges(dsts) - d.deleted = compactChanges(srcs) - - return nil -} - -func (d *renameDetector) detect() (Changes, error) { - if len(d.added) > 0 && len(d.deleted) > 0 { - d.detectExactRenames() - - if !d.onlyExact { - if err := d.detectContentRenames(); err != nil { - return nil, err - } - } - } - - result := make(Changes, 0, len(d.added)+len(d.deleted)+len(d.modified)) - result = append(result, d.added...) - result = append(result, d.deleted...) - result = append(result, d.modified...) - - sort.Stable(result) - - return result, nil -} - -func bestNameMatch(change *Change, changes []*Change) *Change { - var best *Change - var bestScore int - - cname := changeName(change) - - for _, c := range changes { - score := nameSimilarityScore(cname, changeName(c)) - if score > bestScore { - bestScore = score - best = c - } - } - - return best -} - -func nameSimilarityScore(a, b string) int { - aDirLen := strings.LastIndexByte(a, '/') + 1 - bDirLen := strings.LastIndexByte(b, '/') + 1 - - dirMin := min(aDirLen, bDirLen) - dirMax := max(aDirLen, bDirLen) - - var dirScoreLtr, dirScoreRtl int - if dirMax == 0 { - dirScoreLtr = 100 - dirScoreRtl = 100 - } else { - var dirSim int - - for ; dirSim < dirMin; dirSim++ { - if a[dirSim] != b[dirSim] { - break - } - } - - dirScoreLtr = dirSim * 100 / dirMax - - if dirScoreLtr == 100 { - dirScoreRtl = 100 - } else { - for dirSim = 0; dirSim < dirMin; dirSim++ { - if a[aDirLen-1-dirSim] != b[bDirLen-1-dirSim] { - break - } - } - dirScoreRtl = dirSim * 100 / dirMax - } - } - - fileMin := min(len(a)-aDirLen, len(b)-bDirLen) - fileMax := max(len(a)-aDirLen, len(b)-bDirLen) - - fileSim := 0 - for ; fileSim < fileMin; fileSim++ { - if a[len(a)-1-fileSim] != b[len(b)-1-fileSim] { - break - } - } - fileScore := fileSim * 100 / fileMax - - return (((dirScoreLtr + dirScoreRtl) * 25) + (fileScore * 50)) / 100 -} - -func changeName(c *Change) string { - if c.To != empty { - return c.To.Name - } - return c.From.Name -} - -func changeHash(c *Change) plumbing.Hash { - if c.To != empty { - return c.To.TreeEntry.Hash - } - - return c.From.TreeEntry.Hash -} - -func changeMode(c *Change) filemode.FileMode { - if c.To != empty { - return c.To.TreeEntry.Mode - } - - return c.From.TreeEntry.Mode -} - -func sameMode(a, b *Change) bool { - return changeMode(a) == changeMode(b) -} - -func groupChangesByHash(changes []*Change) map[plumbing.Hash][]*Change { - var result = make(map[plumbing.Hash][]*Change) - for _, c := range changes { - hash := changeHash(c) - result[hash] = append(result[hash], c) - } - return result -} - -type similarityMatrix []similarityPair - -func (m similarityMatrix) Len() int { return len(m) } -func (m similarityMatrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] } -func (m similarityMatrix) Less(i, j int) bool { - if m[i].score == m[j].score { - if m[i].added == m[j].added { - return m[i].deleted < m[j].deleted - } - return m[i].added < m[j].added - } - return m[i].score < m[j].score -} - -type similarityPair struct { - // index of the added file - added int - // index of the deleted file - deleted int - // similarity score - score int -} - -func max(a, b int) int { - if a > b { - return a - } - return b -} - -func min(a, b int) int { - if a < b { - return a - } - return b -} - -func buildSimilarityMatrix(srcs, dsts []*Change, renameScore int) (similarityMatrix, error) { - // Allocate for the worst-case scenario where every pair has a score - // that we need to consider. We might not need that many. - matrix := make(similarityMatrix, 0, len(srcs)*len(dsts)) - srcSizes := make([]int64, len(srcs)) - dstSizes := make([]int64, len(dsts)) - dstTooLarge := make(map[int]bool) - - // Consider each pair of files, if the score is above the minimum - // threshold we need to record that scoring in the matrix so we can - // later find the best matches. -outerLoop: - for srcIdx, src := range srcs { - if changeMode(src) != filemode.Regular { - continue - } - - // Declare the from file and the similarity index here to be able to - // reuse it inside the inner loop. The reason to not initialize them - // here is so we can skip the initialization in case they happen to - // not be needed later. They will be initialized inside the inner - // loop if and only if they're needed and reused in subsequent passes. - var from *File - var s *similarityIndex - var err error - for dstIdx, dst := range dsts { - if changeMode(dst) != filemode.Regular { - continue - } - - if dstTooLarge[dstIdx] { - continue - } - - var to *File - srcSize := srcSizes[srcIdx] - if srcSize == 0 { - from, _, err = src.Files() - if err != nil { - return nil, err - } - srcSize = from.Size + 1 - srcSizes[srcIdx] = srcSize - } - - dstSize := dstSizes[dstIdx] - if dstSize == 0 { - _, to, err = dst.Files() - if err != nil { - return nil, err - } - dstSize = to.Size + 1 - dstSizes[dstIdx] = dstSize - } - - min, max := srcSize, dstSize - if dstSize < srcSize { - min = dstSize - max = srcSize - } - - if int(min*100/max) < renameScore { - // File sizes are too different to be a match - continue - } - - if s == nil { - s, err = fileSimilarityIndex(from) - if err != nil { - if err == errIndexFull { - continue outerLoop - } - return nil, err - } - } - - if to == nil { - _, to, err = dst.Files() - if err != nil { - return nil, err - } - } - - di, err := fileSimilarityIndex(to) - if err != nil { - if err == errIndexFull { - dstTooLarge[dstIdx] = true - } - - return nil, err - } - - contentScore := s.score(di, 10000) - // The name score returns a value between 0 and 100, so we need to - // convert it to the same range as the content score. - nameScore := nameSimilarityScore(src.From.Name, dst.To.Name) * 100 - score := (contentScore*99 + nameScore*1) / 10000 - - if score < renameScore { - continue - } - - matrix = append(matrix, similarityPair{added: dstIdx, deleted: srcIdx, score: score}) - } - } - - sort.Stable(matrix) - - return matrix, nil -} - -func compactChanges(changes []*Change) []*Change { - var result []*Change - for _, c := range changes { - if c != nil { - result = append(result, c) - } - } - return result -} - -const ( - keyShift = 32 - maxCountValue = (1 << keyShift) - 1 -) - -var errIndexFull = errors.New("index is full") - -// similarityIndex is an index structure of lines/blocks in one file. -// This structure can be used to compute an approximation of the similarity -// between two files. -// To save space in memory, this index uses a space efficient encoding which -// will not exceed 1MiB per instance. The index starts out at a smaller size -// (closer to 2KiB), but may grow as more distinct blocks within the scanned -// file are discovered. -// see: https://github.com/eclipse/jgit/blob/master/org.eclipse.jgit/src/org/eclipse/jgit/diff/SimilarityIndex.java -type similarityIndex struct { - hashed uint64 - // number of non-zero entries in hashes - numHashes int - growAt int - hashes []keyCountPair - hashBits int -} - -func fileSimilarityIndex(f *File) (*similarityIndex, error) { - idx := newSimilarityIndex() - if err := idx.hash(f); err != nil { - return nil, err - } - - sort.Stable(keyCountPairs(idx.hashes)) - - return idx, nil -} - -func newSimilarityIndex() *similarityIndex { - return &similarityIndex{ - hashBits: 8, - hashes: make([]keyCountPair, 1<<8), - growAt: shouldGrowAt(8), - } -} - -func (i *similarityIndex) hash(f *File) error { - isBin, err := f.IsBinary() - if err != nil { - return err - } - - r, err := f.Reader() - if err != nil { - return err - } - - defer ioutil.CheckClose(r, &err) - - return i.hashContent(r, f.Size, isBin) -} - -func (i *similarityIndex) hashContent(r io.Reader, size int64, isBin bool) error { - var buf = make([]byte, 4096) - var ptr, cnt int - remaining := size - - for 0 < remaining { - hash := 5381 - var blockHashedCnt uint64 - - // Hash one line or block, whatever happens first - n := int64(0) - for { - if ptr == cnt { - ptr = 0 - var err error - cnt, err = io.ReadFull(r, buf) - if err != nil && err != io.ErrUnexpectedEOF { - return err - } - - if cnt == 0 { - return io.EOF - } - } - n++ - c := buf[ptr] & 0xff - ptr++ - - // Ignore CR in CRLF sequence if it's text - if !isBin && c == '\r' && ptr < cnt && buf[ptr] == '\n' { - continue - } - blockHashedCnt++ - - if c == '\n' { - break - } - - hash = (hash << 5) + hash + int(c) - - if n >= 64 || n >= remaining { - break - } - } - i.hashed += blockHashedCnt - if err := i.add(hash, blockHashedCnt); err != nil { - return err - } - remaining -= n - } - - return nil -} - -// score computes the similarity score between this index and another one. -// A region of a file is defined as a line in a text file or a fixed-size -// block in a binary file. To prepare an index, each region in the file is -// hashed; the values and counts of hashes are retained in a sorted table. -// Define the similarity fraction F as the count of matching regions between -// the two files divided between the maximum count of regions in either file. -// The similarity score is F multiplied by the maxScore constant, yielding a -// range [0, maxScore]. It is defined as maxScore for the degenerate case of -// two empty files. -// The similarity score is symmetrical; i.e. a.score(b) == b.score(a). -func (i *similarityIndex) score(other *similarityIndex, maxScore int) int { - var maxHashed = i.hashed - if maxHashed < other.hashed { - maxHashed = other.hashed - } - if maxHashed == 0 { - return maxScore - } - - return int(i.common(other) * uint64(maxScore) / maxHashed) -} - -func (i *similarityIndex) common(dst *similarityIndex) uint64 { - srcIdx, dstIdx := 0, 0 - if i.numHashes == 0 || dst.numHashes == 0 { - return 0 - } - - var common uint64 - srcKey, dstKey := i.hashes[srcIdx].key(), dst.hashes[dstIdx].key() - - for { - if srcKey == dstKey { - srcCnt, dstCnt := i.hashes[srcIdx].count(), dst.hashes[dstIdx].count() - if srcCnt < dstCnt { - common += srcCnt - } else { - common += dstCnt - } - - srcIdx++ - if srcIdx == len(i.hashes) { - break - } - srcKey = i.hashes[srcIdx].key() - - dstIdx++ - if dstIdx == len(dst.hashes) { - break - } - dstKey = dst.hashes[dstIdx].key() - } else if srcKey < dstKey { - // Region of src that is not in dst - srcIdx++ - if srcIdx == len(i.hashes) { - break - } - srcKey = i.hashes[srcIdx].key() - } else { - // Region of dst that is not in src - dstIdx++ - if dstIdx == len(dst.hashes) { - break - } - dstKey = dst.hashes[dstIdx].key() - } - } - - return common -} - -func (i *similarityIndex) add(key int, cnt uint64) error { - key = int(uint32(key) * 0x9e370001 >> 1) - - j := i.slot(key) - for { - v := i.hashes[j] - if v == 0 { - // It's an empty slot, so we can store it here. - if i.growAt <= i.numHashes { - if err := i.grow(); err != nil { - return err - } - j = i.slot(key) - continue - } - - var err error - i.hashes[j], err = newKeyCountPair(key, cnt) - if err != nil { - return err - } - i.numHashes++ - return nil - } else if v.key() == key { - // It's the same key, so increment the counter. - var err error - i.hashes[j], err = newKeyCountPair(key, v.count()+cnt) - if err != nil { - return err - } - return nil - } else if j+1 >= len(i.hashes) { - j = 0 - } else { - j++ - } - } -} - -type keyCountPair uint64 - -func newKeyCountPair(key int, cnt uint64) (keyCountPair, error) { - if cnt > maxCountValue { - return 0, errIndexFull - } - - return keyCountPair((uint64(key) << keyShift) | cnt), nil -} - -func (p keyCountPair) key() int { - return int(p >> keyShift) -} - -func (p keyCountPair) count() uint64 { - return uint64(p) & maxCountValue -} - -func (i *similarityIndex) slot(key int) int { - // We use 31 - hashBits because the upper bit was already forced - // to be 0 and we want the remaining high bits to be used as the - // table slot. - return int(uint32(key) >> uint(31-i.hashBits)) -} - -func shouldGrowAt(hashBits int) int { - return (1 << uint(hashBits)) * (hashBits - 3) / hashBits -} - -func (i *similarityIndex) grow() error { - if i.hashBits == 30 { - return errIndexFull - } - - old := i.hashes - - i.hashBits++ - i.growAt = shouldGrowAt(i.hashBits) - - // TODO(erizocosmico): find a way to check if it will OOM and return - // errIndexFull instead. - i.hashes = make([]keyCountPair, 1<= len(i.hashes) { - j = 0 - } - } - i.hashes[j] = v - } - } - - return nil -} - -type keyCountPairs []keyCountPair - -func (p keyCountPairs) Len() int { return len(p) } -func (p keyCountPairs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } -func (p keyCountPairs) Less(i, j int) bool { return p[i] < p[j] } diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/tag.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/tag.go deleted file mode 100644 index 216010d913b..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/tag.go +++ /dev/null @@ -1,357 +0,0 @@ -package object - -import ( - "bufio" - "bytes" - "fmt" - "io" - stdioutil "io/ioutil" - "strings" - - "github.com/ProtonMail/go-crypto/openpgp" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/storer" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -// Tag represents an annotated tag object. It points to a single git object of -// any type, but tags typically are applied to commit or blob objects. It -// provides a reference that associates the target with a tag name. It also -// contains meta-information about the tag, including the tagger, tag date and -// message. -// -// Note that this is not used for lightweight tags. -// -// https://git-scm.com/book/en/v2/Git-Internals-Git-References#Tags -type Tag struct { - // Hash of the tag. - Hash plumbing.Hash - // Name of the tag. - Name string - // Tagger is the one who created the tag. - Tagger Signature - // Message is an arbitrary text message. - Message string - // PGPSignature is the PGP signature of the tag. - PGPSignature string - // TargetType is the object type of the target. - TargetType plumbing.ObjectType - // Target is the hash of the target object. - Target plumbing.Hash - - s storer.EncodedObjectStorer -} - -// GetTag gets a tag from an object storer and decodes it. -func GetTag(s storer.EncodedObjectStorer, h plumbing.Hash) (*Tag, error) { - o, err := s.EncodedObject(plumbing.TagObject, h) - if err != nil { - return nil, err - } - - return DecodeTag(s, o) -} - -// DecodeTag decodes an encoded object into a *Commit and associates it to the -// given object storer. -func DecodeTag(s storer.EncodedObjectStorer, o plumbing.EncodedObject) (*Tag, error) { - t := &Tag{s: s} - if err := t.Decode(o); err != nil { - return nil, err - } - - return t, nil -} - -// ID returns the object ID of the tag, not the object that the tag references. -// The returned value will always match the current value of Tag.Hash. -// -// ID is present to fulfill the Object interface. -func (t *Tag) ID() plumbing.Hash { - return t.Hash -} - -// Type returns the type of object. It always returns plumbing.TagObject. -// -// Type is present to fulfill the Object interface. -func (t *Tag) Type() plumbing.ObjectType { - return plumbing.TagObject -} - -// Decode transforms a plumbing.EncodedObject into a Tag struct. -func (t *Tag) Decode(o plumbing.EncodedObject) (err error) { - if o.Type() != plumbing.TagObject { - return ErrUnsupportedObject - } - - t.Hash = o.Hash() - - reader, err := o.Reader() - if err != nil { - return err - } - defer ioutil.CheckClose(reader, &err) - - r := bufPool.Get().(*bufio.Reader) - defer bufPool.Put(r) - r.Reset(reader) - for { - var line []byte - line, err = r.ReadBytes('\n') - if err != nil && err != io.EOF { - return err - } - - line = bytes.TrimSpace(line) - if len(line) == 0 { - break // Start of message - } - - split := bytes.SplitN(line, []byte{' '}, 2) - switch string(split[0]) { - case "object": - t.Target = plumbing.NewHash(string(split[1])) - case "type": - t.TargetType, err = plumbing.ParseObjectType(string(split[1])) - if err != nil { - return err - } - case "tag": - t.Name = string(split[1]) - case "tagger": - t.Tagger.Decode(split[1]) - } - - if err == io.EOF { - return nil - } - } - - data, err := stdioutil.ReadAll(r) - if err != nil { - return err - } - - var pgpsig bool - // Check if data contains PGP signature. - if bytes.Contains(data, []byte(beginpgp)) { - // Split the lines at newline. - messageAndSig := bytes.Split(data, []byte("\n")) - - for _, l := range messageAndSig { - if pgpsig { - if bytes.Contains(l, []byte(endpgp)) { - t.PGPSignature += endpgp + "\n" - break - } else { - t.PGPSignature += string(l) + "\n" - } - continue - } - - // Check if it's the beginning of a PGP signature. - if bytes.Contains(l, []byte(beginpgp)) { - t.PGPSignature += beginpgp + "\n" - pgpsig = true - continue - } - - t.Message += string(l) + "\n" - } - } else { - t.Message = string(data) - } - - return nil -} - -// Encode transforms a Tag into a plumbing.EncodedObject. -func (t *Tag) Encode(o plumbing.EncodedObject) error { - return t.encode(o, true) -} - -// EncodeWithoutSignature export a Tag into a plumbing.EncodedObject without the signature (correspond to the payload of the PGP signature). -func (t *Tag) EncodeWithoutSignature(o plumbing.EncodedObject) error { - return t.encode(o, false) -} - -func (t *Tag) encode(o plumbing.EncodedObject, includeSig bool) (err error) { - o.SetType(plumbing.TagObject) - w, err := o.Writer() - if err != nil { - return err - } - defer ioutil.CheckClose(w, &err) - - if _, err = fmt.Fprintf(w, - "object %s\ntype %s\ntag %s\ntagger ", - t.Target.String(), t.TargetType.Bytes(), t.Name); err != nil { - return err - } - - if err = t.Tagger.Encode(w); err != nil { - return err - } - - if _, err = fmt.Fprint(w, "\n\n"); err != nil { - return err - } - - if _, err = fmt.Fprint(w, t.Message); err != nil { - return err - } - - // Note that this is highly sensitive to what it sent along in the message. - // Message *always* needs to end with a newline, or else the message and the - // signature will be concatenated into a corrupt object. Since this is a - // lower-level method, we assume you know what you are doing and have already - // done the needful on the message in the caller. - if includeSig { - if _, err = fmt.Fprint(w, t.PGPSignature); err != nil { - return err - } - } - - return err -} - -// Commit returns the commit pointed to by the tag. If the tag points to a -// different type of object ErrUnsupportedObject will be returned. -func (t *Tag) Commit() (*Commit, error) { - if t.TargetType != plumbing.CommitObject { - return nil, ErrUnsupportedObject - } - - o, err := t.s.EncodedObject(plumbing.CommitObject, t.Target) - if err != nil { - return nil, err - } - - return DecodeCommit(t.s, o) -} - -// Tree returns the tree pointed to by the tag. If the tag points to a commit -// object the tree of that commit will be returned. If the tag does not point -// to a commit or tree object ErrUnsupportedObject will be returned. -func (t *Tag) Tree() (*Tree, error) { - switch t.TargetType { - case plumbing.CommitObject: - c, err := t.Commit() - if err != nil { - return nil, err - } - - return c.Tree() - case plumbing.TreeObject: - return GetTree(t.s, t.Target) - default: - return nil, ErrUnsupportedObject - } -} - -// Blob returns the blob pointed to by the tag. If the tag points to a -// different type of object ErrUnsupportedObject will be returned. -func (t *Tag) Blob() (*Blob, error) { - if t.TargetType != plumbing.BlobObject { - return nil, ErrUnsupportedObject - } - - return GetBlob(t.s, t.Target) -} - -// Object returns the object pointed to by the tag. -func (t *Tag) Object() (Object, error) { - o, err := t.s.EncodedObject(t.TargetType, t.Target) - if err != nil { - return nil, err - } - - return DecodeObject(t.s, o) -} - -// String returns the meta information contained in the tag as a formatted -// string. -func (t *Tag) String() string { - obj, _ := t.Object() - - return fmt.Sprintf( - "%s %s\nTagger: %s\nDate: %s\n\n%s\n%s", - plumbing.TagObject, t.Name, t.Tagger.String(), t.Tagger.When.Format(DateFormat), - t.Message, objectAsString(obj), - ) -} - -// Verify performs PGP verification of the tag with a provided armored -// keyring and returns openpgp.Entity associated with verifying key on success. -func (t *Tag) Verify(armoredKeyRing string) (*openpgp.Entity, error) { - keyRingReader := strings.NewReader(armoredKeyRing) - keyring, err := openpgp.ReadArmoredKeyRing(keyRingReader) - if err != nil { - return nil, err - } - - // Extract signature. - signature := strings.NewReader(t.PGPSignature) - - encoded := &plumbing.MemoryObject{} - // Encode tag components, excluding signature and get a reader object. - if err := t.EncodeWithoutSignature(encoded); err != nil { - return nil, err - } - er, err := encoded.Reader() - if err != nil { - return nil, err - } - - return openpgp.CheckArmoredDetachedSignature(keyring, er, signature, nil) -} - -// TagIter provides an iterator for a set of tags. -type TagIter struct { - storer.EncodedObjectIter - s storer.EncodedObjectStorer -} - -// NewTagIter takes a storer.EncodedObjectStorer and a -// storer.EncodedObjectIter and returns a *TagIter that iterates over all -// tags contained in the storer.EncodedObjectIter. -// -// Any non-tag object returned by the storer.EncodedObjectIter is skipped. -func NewTagIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) *TagIter { - return &TagIter{iter, s} -} - -// Next moves the iterator to the next tag and returns a pointer to it. If -// there are no more tags, it returns io.EOF. -func (iter *TagIter) Next() (*Tag, error) { - obj, err := iter.EncodedObjectIter.Next() - if err != nil { - return nil, err - } - - return DecodeTag(iter.s, obj) -} - -// ForEach call the cb function for each tag contained on this iter until -// an error happens or the end of the iter is reached. If ErrStop is sent -// the iteration is stop but no error is returned. The iterator is closed. -func (iter *TagIter) ForEach(cb func(*Tag) error) error { - return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error { - t, err := DecodeTag(iter.s, obj) - if err != nil { - return err - } - - return cb(t) - }) -} - -func objectAsString(obj Object) string { - switch o := obj.(type) { - case *Commit: - return o.String() - default: - return "" - } -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/tree.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/tree.go deleted file mode 100644 index 5e6378ca499..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/tree.go +++ /dev/null @@ -1,525 +0,0 @@ -package object - -import ( - "bufio" - "context" - "errors" - "fmt" - "io" - "path" - "path/filepath" - "strings" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/filemode" - "github.com/go-git/go-git/v5/plumbing/storer" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -const ( - maxTreeDepth = 1024 - startingStackSize = 8 -) - -// New errors defined by this package. -var ( - ErrMaxTreeDepth = errors.New("maximum tree depth exceeded") - ErrFileNotFound = errors.New("file not found") - ErrDirectoryNotFound = errors.New("directory not found") - ErrEntryNotFound = errors.New("entry not found") -) - -// Tree is basically like a directory - it references a bunch of other trees -// and/or blobs (i.e. files and sub-directories) -type Tree struct { - Entries []TreeEntry - Hash plumbing.Hash - - s storer.EncodedObjectStorer - m map[string]*TreeEntry - t map[string]*Tree // tree path cache -} - -// GetTree gets a tree from an object storer and decodes it. -func GetTree(s storer.EncodedObjectStorer, h plumbing.Hash) (*Tree, error) { - o, err := s.EncodedObject(plumbing.TreeObject, h) - if err != nil { - return nil, err - } - - return DecodeTree(s, o) -} - -// DecodeTree decodes an encoded object into a *Tree and associates it to the -// given object storer. -func DecodeTree(s storer.EncodedObjectStorer, o plumbing.EncodedObject) (*Tree, error) { - t := &Tree{s: s} - if err := t.Decode(o); err != nil { - return nil, err - } - - return t, nil -} - -// TreeEntry represents a file -type TreeEntry struct { - Name string - Mode filemode.FileMode - Hash plumbing.Hash -} - -// File returns the hash of the file identified by the `path` argument. -// The path is interpreted as relative to the tree receiver. -func (t *Tree) File(path string) (*File, error) { - e, err := t.FindEntry(path) - if err != nil { - return nil, ErrFileNotFound - } - - blob, err := GetBlob(t.s, e.Hash) - if err != nil { - if err == plumbing.ErrObjectNotFound { - return nil, ErrFileNotFound - } - return nil, err - } - - return NewFile(path, e.Mode, blob), nil -} - -// Size returns the plaintext size of an object, without reading it -// into memory. -func (t *Tree) Size(path string) (int64, error) { - e, err := t.FindEntry(path) - if err != nil { - return 0, ErrEntryNotFound - } - - return t.s.EncodedObjectSize(e.Hash) -} - -// Tree returns the tree identified by the `path` argument. -// The path is interpreted as relative to the tree receiver. -func (t *Tree) Tree(path string) (*Tree, error) { - e, err := t.FindEntry(path) - if err != nil { - return nil, ErrDirectoryNotFound - } - - tree, err := GetTree(t.s, e.Hash) - if err == plumbing.ErrObjectNotFound { - return nil, ErrDirectoryNotFound - } - - return tree, err -} - -// TreeEntryFile returns the *File for a given *TreeEntry. -func (t *Tree) TreeEntryFile(e *TreeEntry) (*File, error) { - blob, err := GetBlob(t.s, e.Hash) - if err != nil { - return nil, err - } - - return NewFile(e.Name, e.Mode, blob), nil -} - -// FindEntry search a TreeEntry in this tree or any subtree. -func (t *Tree) FindEntry(path string) (*TreeEntry, error) { - if t.t == nil { - t.t = make(map[string]*Tree) - } - - pathParts := strings.Split(path, "/") - startingTree := t - pathCurrent := "" - - // search for the longest path in the tree path cache - for i := len(pathParts) - 1; i > 1; i-- { - path := filepath.Join(pathParts[:i]...) - - tree, ok := t.t[path] - if ok { - startingTree = tree - pathParts = pathParts[i:] - pathCurrent = path - - break - } - } - - var tree *Tree - var err error - for tree = startingTree; len(pathParts) > 1; pathParts = pathParts[1:] { - if tree, err = tree.dir(pathParts[0]); err != nil { - return nil, err - } - - pathCurrent = filepath.Join(pathCurrent, pathParts[0]) - t.t[pathCurrent] = tree - } - - return tree.entry(pathParts[0]) -} - -func (t *Tree) dir(baseName string) (*Tree, error) { - entry, err := t.entry(baseName) - if err != nil { - return nil, ErrDirectoryNotFound - } - - obj, err := t.s.EncodedObject(plumbing.TreeObject, entry.Hash) - if err != nil { - return nil, err - } - - tree := &Tree{s: t.s} - err = tree.Decode(obj) - - return tree, err -} - -func (t *Tree) entry(baseName string) (*TreeEntry, error) { - if t.m == nil { - t.buildMap() - } - - entry, ok := t.m[baseName] - if !ok { - return nil, ErrEntryNotFound - } - - return entry, nil -} - -// Files returns a FileIter allowing to iterate over the Tree -func (t *Tree) Files() *FileIter { - return NewFileIter(t.s, t) -} - -// ID returns the object ID of the tree. The returned value will always match -// the current value of Tree.Hash. -// -// ID is present to fulfill the Object interface. -func (t *Tree) ID() plumbing.Hash { - return t.Hash -} - -// Type returns the type of object. It always returns plumbing.TreeObject. -func (t *Tree) Type() plumbing.ObjectType { - return plumbing.TreeObject -} - -// Decode transform an plumbing.EncodedObject into a Tree struct -func (t *Tree) Decode(o plumbing.EncodedObject) (err error) { - if o.Type() != plumbing.TreeObject { - return ErrUnsupportedObject - } - - t.Hash = o.Hash() - if o.Size() == 0 { - return nil - } - - t.Entries = nil - t.m = nil - - reader, err := o.Reader() - if err != nil { - return err - } - defer ioutil.CheckClose(reader, &err) - - r := bufPool.Get().(*bufio.Reader) - defer bufPool.Put(r) - r.Reset(reader) - for { - str, err := r.ReadString(' ') - if err != nil { - if err == io.EOF { - break - } - - return err - } - str = str[:len(str)-1] // strip last byte (' ') - - mode, err := filemode.New(str) - if err != nil { - return err - } - - name, err := r.ReadString(0) - if err != nil && err != io.EOF { - return err - } - - var hash plumbing.Hash - if _, err = io.ReadFull(r, hash[:]); err != nil { - return err - } - - baseName := name[:len(name)-1] - t.Entries = append(t.Entries, TreeEntry{ - Hash: hash, - Mode: mode, - Name: baseName, - }) - } - - return nil -} - -// Encode transforms a Tree into a plumbing.EncodedObject. -func (t *Tree) Encode(o plumbing.EncodedObject) (err error) { - o.SetType(plumbing.TreeObject) - w, err := o.Writer() - if err != nil { - return err - } - - defer ioutil.CheckClose(w, &err) - for _, entry := range t.Entries { - if _, err = fmt.Fprintf(w, "%o %s", entry.Mode, entry.Name); err != nil { - return err - } - - if _, err = w.Write([]byte{0x00}); err != nil { - return err - } - - if _, err = w.Write(entry.Hash[:]); err != nil { - return err - } - } - - return err -} - -func (t *Tree) buildMap() { - t.m = make(map[string]*TreeEntry) - for i := 0; i < len(t.Entries); i++ { - t.m[t.Entries[i].Name] = &t.Entries[i] - } -} - -// Diff returns a list of changes between this tree and the provided one -func (t *Tree) Diff(to *Tree) (Changes, error) { - return t.DiffContext(context.Background(), to) -} - -// DiffContext returns a list of changes between this tree and the provided one -// Error will be returned if context expires. Provided context must be non nil. -// -// NOTE: Since version 5.1.0 the renames are correctly handled, the settings -// used are the recommended options DefaultDiffTreeOptions. -func (t *Tree) DiffContext(ctx context.Context, to *Tree) (Changes, error) { - return DiffTreeWithOptions(ctx, t, to, DefaultDiffTreeOptions) -} - -// Patch returns a slice of Patch objects with all the changes between trees -// in chunks. This representation can be used to create several diff outputs. -func (t *Tree) Patch(to *Tree) (*Patch, error) { - return t.PatchContext(context.Background(), to) -} - -// PatchContext returns a slice of Patch objects with all the changes between -// trees in chunks. This representation can be used to create several diff -// outputs. If context expires, an error will be returned. Provided context must -// be non-nil. -// -// NOTE: Since version 5.1.0 the renames are correctly handled, the settings -// used are the recommended options DefaultDiffTreeOptions. -func (t *Tree) PatchContext(ctx context.Context, to *Tree) (*Patch, error) { - changes, err := t.DiffContext(ctx, to) - if err != nil { - return nil, err - } - - return changes.PatchContext(ctx) -} - -// treeEntryIter facilitates iterating through the TreeEntry objects in a Tree. -type treeEntryIter struct { - t *Tree - pos int -} - -func (iter *treeEntryIter) Next() (TreeEntry, error) { - if iter.pos >= len(iter.t.Entries) { - return TreeEntry{}, io.EOF - } - iter.pos++ - return iter.t.Entries[iter.pos-1], nil -} - -// TreeWalker provides a means of walking through all of the entries in a Tree. -type TreeWalker struct { - stack []*treeEntryIter - base string - recursive bool - seen map[plumbing.Hash]bool - - s storer.EncodedObjectStorer - t *Tree -} - -// NewTreeWalker returns a new TreeWalker for the given tree. -// -// It is the caller's responsibility to call Close() when finished with the -// tree walker. -func NewTreeWalker(t *Tree, recursive bool, seen map[plumbing.Hash]bool) *TreeWalker { - stack := make([]*treeEntryIter, 0, startingStackSize) - stack = append(stack, &treeEntryIter{t, 0}) - - return &TreeWalker{ - stack: stack, - recursive: recursive, - seen: seen, - - s: t.s, - t: t, - } -} - -// Next returns the next object from the tree. Objects are returned in order -// and subtrees are included. After the last object has been returned further -// calls to Next() will return io.EOF. -// -// In the current implementation any objects which cannot be found in the -// underlying repository will be skipped automatically. It is possible that this -// may change in future versions. -func (w *TreeWalker) Next() (name string, entry TreeEntry, err error) { - var obj *Tree - for { - current := len(w.stack) - 1 - if current < 0 { - // Nothing left on the stack so we're finished - err = io.EOF - return - } - - if current > maxTreeDepth { - // We're probably following bad data or some self-referencing tree - err = ErrMaxTreeDepth - return - } - - entry, err = w.stack[current].Next() - if err == io.EOF { - // Finished with the current tree, move back up to the parent - w.stack = w.stack[:current] - w.base, _ = path.Split(w.base) - w.base = strings.TrimSuffix(w.base, "/") - continue - } - - if err != nil { - return - } - - if w.seen[entry.Hash] { - continue - } - - if entry.Mode == filemode.Dir { - obj, err = GetTree(w.s, entry.Hash) - } - - name = simpleJoin(w.base, entry.Name) - - if err != nil { - err = io.EOF - return - } - - break - } - - if !w.recursive { - return - } - - if obj != nil { - w.stack = append(w.stack, &treeEntryIter{obj, 0}) - w.base = simpleJoin(w.base, entry.Name) - } - - return -} - -// Tree returns the tree that the tree walker most recently operated on. -func (w *TreeWalker) Tree() *Tree { - current := len(w.stack) - 1 - if w.stack[current].pos == 0 { - current-- - } - - if current < 0 { - return nil - } - - return w.stack[current].t -} - -// Close releases any resources used by the TreeWalker. -func (w *TreeWalker) Close() { - w.stack = nil -} - -// TreeIter provides an iterator for a set of trees. -type TreeIter struct { - storer.EncodedObjectIter - s storer.EncodedObjectStorer -} - -// NewTreeIter takes a storer.EncodedObjectStorer and a -// storer.EncodedObjectIter and returns a *TreeIter that iterates over all -// tree contained in the storer.EncodedObjectIter. -// -// Any non-tree object returned by the storer.EncodedObjectIter is skipped. -func NewTreeIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) *TreeIter { - return &TreeIter{iter, s} -} - -// Next moves the iterator to the next tree and returns a pointer to it. If -// there are no more trees, it returns io.EOF. -func (iter *TreeIter) Next() (*Tree, error) { - for { - obj, err := iter.EncodedObjectIter.Next() - if err != nil { - return nil, err - } - - if obj.Type() != plumbing.TreeObject { - continue - } - - return DecodeTree(iter.s, obj) - } -} - -// ForEach call the cb function for each tree contained on this iter until -// an error happens or the end of the iter is reached. If ErrStop is sent -// the iteration is stop but no error is returned. The iterator is closed. -func (iter *TreeIter) ForEach(cb func(*Tree) error) error { - return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error { - if obj.Type() != plumbing.TreeObject { - return nil - } - - t, err := DecodeTree(iter.s, obj) - if err != nil { - return err - } - - return cb(t) - }) -} - -func simpleJoin(parent, child string) string { - if len(parent) > 0 { - return parent + "/" + child - } - return child -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/treenoder.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/treenoder.go deleted file mode 100644 index b4891b957c5..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/object/treenoder.go +++ /dev/null @@ -1,136 +0,0 @@ -package object - -import ( - "io" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/filemode" - "github.com/go-git/go-git/v5/utils/merkletrie/noder" -) - -// A treenoder is a helper type that wraps git trees into merkletrie -// noders. -// -// As a merkletrie noder doesn't understand the concept of modes (e.g. -// file permissions), the treenoder includes the mode of the git tree in -// the hash, so changes in the modes will be detected as modifications -// to the file contents by the merkletrie difftree algorithm. This is -// consistent with how the "git diff-tree" command works. -type treeNoder struct { - parent *Tree // the root node is its own parent - name string // empty string for the root node - mode filemode.FileMode - hash plumbing.Hash - children []noder.Noder // memoized -} - -// NewTreeRootNode returns the root node of a Tree -func NewTreeRootNode(t *Tree) noder.Noder { - if t == nil { - return &treeNoder{} - } - - return &treeNoder{ - parent: t, - name: "", - mode: filemode.Dir, - hash: t.Hash, - } -} - -func (t *treeNoder) isRoot() bool { - return t.name == "" -} - -func (t *treeNoder) String() string { - return "treeNoder <" + t.name + ">" -} - -func (t *treeNoder) Hash() []byte { - if t.mode == filemode.Deprecated { - return append(t.hash[:], filemode.Regular.Bytes()...) - } - return append(t.hash[:], t.mode.Bytes()...) -} - -func (t *treeNoder) Name() string { - return t.name -} - -func (t *treeNoder) IsDir() bool { - return t.mode == filemode.Dir -} - -// Children will return the children of a treenoder as treenoders, -// building them from the children of the wrapped git tree. -func (t *treeNoder) Children() ([]noder.Noder, error) { - if t.mode != filemode.Dir { - return noder.NoChildren, nil - } - - // children are memoized for efficiency - if t.children != nil { - return t.children, nil - } - - // the parent of the returned children will be ourself as a tree if - // we are a not the root treenoder. The root is special as it - // is is own parent. - parent := t.parent - if !t.isRoot() { - var err error - if parent, err = t.parent.Tree(t.name); err != nil { - return nil, err - } - } - - return transformChildren(parent) -} - -// Returns the children of a tree as treenoders. -// Efficiency is key here. -func transformChildren(t *Tree) ([]noder.Noder, error) { - var err error - var e TreeEntry - - // there will be more tree entries than children in the tree, - // due to submodules and empty directories, but I think it is still - // worth it to pre-allocate the whole array now, even if sometimes - // is bigger than needed. - ret := make([]noder.Noder, 0, len(t.Entries)) - - walker := NewTreeWalker(t, false, nil) // don't recurse - // don't defer walker.Close() for efficiency reasons. - for { - _, e, err = walker.Next() - if err == io.EOF { - break - } - if err != nil { - walker.Close() - return nil, err - } - - ret = append(ret, &treeNoder{ - parent: t, - name: e.Name, - mode: e.Mode, - hash: e.Hash, - }) - } - walker.Close() - - return ret, nil -} - -// len(t.tree.Entries) != the number of elements walked by treewalker -// for some reason because of empty directories, submodules, etc, so we -// have to walk here. -func (t *treeNoder) NumChildren() (int, error) { - children, err := t.Children() - if err != nil { - return 0, err - } - - return len(children), nil -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs.go deleted file mode 100644 index 1bd724cad5b..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs.go +++ /dev/null @@ -1,211 +0,0 @@ -package packp - -import ( - "fmt" - "sort" - "strings" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" - "github.com/go-git/go-git/v5/plumbing/storer" - "github.com/go-git/go-git/v5/storage/memory" -) - -// AdvRefs values represent the information transmitted on an -// advertised-refs message. Values from this type are not zero-value -// safe, use the New function instead. -type AdvRefs struct { - // Prefix stores prefix payloads. - // - // When using this message over (smart) HTTP, you have to add a pktline - // before the whole thing with the following payload: - // - // '# service=$servicename" LF - // - // Moreover, some (all) git HTTP smart servers will send a flush-pkt - // just after the first pkt-line. - // - // To accommodate both situations, the Prefix field allow you to store - // any data you want to send before the actual pktlines. It will also - // be filled up with whatever is found on the line. - Prefix [][]byte - // Head stores the resolved HEAD reference if present. - // This can be present with git-upload-pack, not with git-receive-pack. - Head *plumbing.Hash - // Capabilities are the capabilities. - Capabilities *capability.List - // References are the hash references. - References map[string]plumbing.Hash - // Peeled are the peeled hash references. - Peeled map[string]plumbing.Hash - // Shallows are the shallow object ids. - Shallows []plumbing.Hash -} - -// NewAdvRefs returns a pointer to a new AdvRefs value, ready to be used. -func NewAdvRefs() *AdvRefs { - return &AdvRefs{ - Prefix: [][]byte{}, - Capabilities: capability.NewList(), - References: make(map[string]plumbing.Hash), - Peeled: make(map[string]plumbing.Hash), - Shallows: []plumbing.Hash{}, - } -} - -func (a *AdvRefs) AddReference(r *plumbing.Reference) error { - switch r.Type() { - case plumbing.SymbolicReference: - v := fmt.Sprintf("%s:%s", r.Name().String(), r.Target().String()) - a.Capabilities.Add(capability.SymRef, v) - case plumbing.HashReference: - a.References[r.Name().String()] = r.Hash() - default: - return plumbing.ErrInvalidType - } - - return nil -} - -func (a *AdvRefs) AllReferences() (memory.ReferenceStorage, error) { - s := memory.ReferenceStorage{} - if err := a.addRefs(s); err != nil { - return s, plumbing.NewUnexpectedError(err) - } - - return s, nil -} - -func (a *AdvRefs) addRefs(s storer.ReferenceStorer) error { - for name, hash := range a.References { - ref := plumbing.NewReferenceFromStrings(name, hash.String()) - if err := s.SetReference(ref); err != nil { - return err - } - } - - if a.supportSymrefs() { - return a.addSymbolicRefs(s) - } - - return a.resolveHead(s) -} - -// If the server does not support symrefs capability, -// we need to guess the reference where HEAD is pointing to. -// -// Git versions prior to 1.8.4.3 has an special procedure to get -// the reference where is pointing to HEAD: -// - Check if a reference called master exists. If exists and it -// has the same hash as HEAD hash, we can say that HEAD is pointing to master -// - If master does not exists or does not have the same hash as HEAD, -// order references and check in that order if that reference has the same -// hash than HEAD. If yes, set HEAD pointing to that branch hash -// - If no reference is found, throw an error -func (a *AdvRefs) resolveHead(s storer.ReferenceStorer) error { - if a.Head == nil { - return nil - } - - ref, err := s.Reference(plumbing.Master) - - // check first if HEAD is pointing to master - if err == nil { - ok, err := a.createHeadIfCorrectReference(ref, s) - if err != nil { - return err - } - - if ok { - return nil - } - } - - if err != nil && err != plumbing.ErrReferenceNotFound { - return err - } - - // From here we are trying to guess the branch that HEAD is pointing - refIter, err := s.IterReferences() - if err != nil { - return err - } - - var refNames []string - err = refIter.ForEach(func(r *plumbing.Reference) error { - refNames = append(refNames, string(r.Name())) - return nil - }) - if err != nil { - return err - } - - sort.Strings(refNames) - - var headSet bool - for _, refName := range refNames { - ref, err := s.Reference(plumbing.ReferenceName(refName)) - if err != nil { - return err - } - ok, err := a.createHeadIfCorrectReference(ref, s) - if err != nil { - return err - } - if ok { - headSet = true - break - } - } - - if !headSet { - return plumbing.ErrReferenceNotFound - } - - return nil -} - -func (a *AdvRefs) createHeadIfCorrectReference( - reference *plumbing.Reference, - s storer.ReferenceStorer) (bool, error) { - if reference.Hash() == *a.Head { - headRef := plumbing.NewSymbolicReference(plumbing.HEAD, reference.Name()) - if err := s.SetReference(headRef); err != nil { - return false, err - } - - return true, nil - } - - return false, nil -} - -func (a *AdvRefs) addSymbolicRefs(s storer.ReferenceStorer) error { - for _, symref := range a.Capabilities.Get(capability.SymRef) { - chunks := strings.Split(symref, ":") - if len(chunks) != 2 { - err := fmt.Errorf("bad number of `:` in symref value (%q)", symref) - return plumbing.NewUnexpectedError(err) - } - name := plumbing.ReferenceName(chunks[0]) - target := plumbing.ReferenceName(chunks[1]) - ref := plumbing.NewSymbolicReference(name, target) - if err := s.SetReference(ref); err != nil { - return nil - } - } - - return nil -} - -func (a *AdvRefs) supportSymrefs() bool { - return a.Capabilities.Supports(capability.SymRef) -} - -// IsEmpty returns true if doesn't contain any reference. -func (a *AdvRefs) IsEmpty() bool { - return a.Head == nil && - len(a.References) == 0 && - len(a.Peeled) == 0 && - len(a.Shallows) == 0 -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs_decode.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs_decode.go deleted file mode 100644 index 63bbe5ab16e..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs_decode.go +++ /dev/null @@ -1,288 +0,0 @@ -package packp - -import ( - "bytes" - "encoding/hex" - "errors" - "fmt" - "io" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/format/pktline" -) - -// Decode reads the next advertised-refs message form its input and -// stores it in the AdvRefs. -func (a *AdvRefs) Decode(r io.Reader) error { - d := newAdvRefsDecoder(r) - return d.Decode(a) -} - -type advRefsDecoder struct { - s *pktline.Scanner // a pkt-line scanner from the input stream - line []byte // current pkt-line contents, use parser.nextLine() to make it advance - nLine int // current pkt-line number for debugging, begins at 1 - hash plumbing.Hash // last hash read - err error // sticky error, use the parser.error() method to fill this out - data *AdvRefs // parsed data is stored here -} - -var ( - // ErrEmptyAdvRefs is returned by Decode if it gets an empty advertised - // references message. - ErrEmptyAdvRefs = errors.New("empty advertised-ref message") - // ErrEmptyInput is returned by Decode if the input is empty. - ErrEmptyInput = errors.New("empty input") -) - -func newAdvRefsDecoder(r io.Reader) *advRefsDecoder { - return &advRefsDecoder{ - s: pktline.NewScanner(r), - } -} - -func (d *advRefsDecoder) Decode(v *AdvRefs) error { - d.data = v - - for state := decodePrefix; state != nil; { - state = state(d) - } - - return d.err -} - -type decoderStateFn func(*advRefsDecoder) decoderStateFn - -// fills out the parser sticky error -func (d *advRefsDecoder) error(format string, a ...interface{}) { - msg := fmt.Sprintf( - "pkt-line %d: %s", d.nLine, - fmt.Sprintf(format, a...), - ) - - d.err = NewErrUnexpectedData(msg, d.line) -} - -// Reads a new pkt-line from the scanner, makes its payload available as -// p.line and increments p.nLine. A successful invocation returns true, -// otherwise, false is returned and the sticky error is filled out -// accordingly. Trims eols at the end of the payloads. -func (d *advRefsDecoder) nextLine() bool { - d.nLine++ - - if !d.s.Scan() { - if d.err = d.s.Err(); d.err != nil { - return false - } - - if d.nLine == 1 { - d.err = ErrEmptyInput - return false - } - - d.error("EOF") - return false - } - - d.line = d.s.Bytes() - d.line = bytes.TrimSuffix(d.line, eol) - - return true -} - -// The HTTP smart prefix is often followed by a flush-pkt. -func decodePrefix(d *advRefsDecoder) decoderStateFn { - if ok := d.nextLine(); !ok { - return nil - } - - if !isPrefix(d.line) { - return decodeFirstHash - } - - tmp := make([]byte, len(d.line)) - copy(tmp, d.line) - d.data.Prefix = append(d.data.Prefix, tmp) - if ok := d.nextLine(); !ok { - return nil - } - - if !isFlush(d.line) { - return decodeFirstHash - } - - d.data.Prefix = append(d.data.Prefix, pktline.Flush) - if ok := d.nextLine(); !ok { - return nil - } - - return decodeFirstHash -} - -func isPrefix(payload []byte) bool { - return len(payload) > 0 && payload[0] == '#' -} - -// If the first hash is zero, then a no-refs is coming. Otherwise, a -// list-of-refs is coming, and the hash will be followed by the first -// advertised ref. -func decodeFirstHash(p *advRefsDecoder) decoderStateFn { - // If the repository is empty, we receive a flush here (HTTP). - if isFlush(p.line) { - p.err = ErrEmptyAdvRefs - return nil - } - - if len(p.line) < hashSize { - p.error("cannot read hash, pkt-line too short") - return nil - } - - if _, err := hex.Decode(p.hash[:], p.line[:hashSize]); err != nil { - p.error("invalid hash text: %s", err) - return nil - } - - p.line = p.line[hashSize:] - - if p.hash.IsZero() { - return decodeSkipNoRefs - } - - return decodeFirstRef -} - -// Skips SP "capabilities^{}" NUL -func decodeSkipNoRefs(p *advRefsDecoder) decoderStateFn { - if len(p.line) < len(noHeadMark) { - p.error("too short zero-id ref") - return nil - } - - if !bytes.HasPrefix(p.line, noHeadMark) { - p.error("malformed zero-id ref") - return nil - } - - p.line = p.line[len(noHeadMark):] - - return decodeCaps -} - -// decode the refname, expects SP refname NULL -func decodeFirstRef(l *advRefsDecoder) decoderStateFn { - if len(l.line) < 3 { - l.error("line too short after hash") - return nil - } - - if !bytes.HasPrefix(l.line, sp) { - l.error("no space after hash") - return nil - } - l.line = l.line[1:] - - chunks := bytes.SplitN(l.line, null, 2) - if len(chunks) < 2 { - l.error("NULL not found") - return nil - } - ref := chunks[0] - l.line = chunks[1] - - if bytes.Equal(ref, []byte(head)) { - l.data.Head = &l.hash - } else { - l.data.References[string(ref)] = l.hash - } - - return decodeCaps -} - -func decodeCaps(p *advRefsDecoder) decoderStateFn { - if err := p.data.Capabilities.Decode(p.line); err != nil { - p.error("invalid capabilities: %s", err) - return nil - } - - return decodeOtherRefs -} - -// The refs are either tips (obj-id SP refname) or a peeled (obj-id SP refname^{}). -// If there are no refs, then there might be a shallow or flush-ptk. -func decodeOtherRefs(p *advRefsDecoder) decoderStateFn { - if ok := p.nextLine(); !ok { - return nil - } - - if bytes.HasPrefix(p.line, shallow) { - return decodeShallow - } - - if len(p.line) == 0 { - return nil - } - - saveTo := p.data.References - if bytes.HasSuffix(p.line, peeled) { - p.line = bytes.TrimSuffix(p.line, peeled) - saveTo = p.data.Peeled - } - - ref, hash, err := readRef(p.line) - if err != nil { - p.error("%s", err) - return nil - } - saveTo[ref] = hash - - return decodeOtherRefs -} - -// Reads a ref-name -func readRef(data []byte) (string, plumbing.Hash, error) { - chunks := bytes.Split(data, sp) - switch { - case len(chunks) == 1: - return "", plumbing.ZeroHash, fmt.Errorf("malformed ref data: no space was found") - case len(chunks) > 2: - return "", plumbing.ZeroHash, fmt.Errorf("malformed ref data: more than one space found") - default: - return string(chunks[1]), plumbing.NewHash(string(chunks[0])), nil - } -} - -// Keeps reading shallows until a flush-pkt is found -func decodeShallow(p *advRefsDecoder) decoderStateFn { - if !bytes.HasPrefix(p.line, shallow) { - p.error("malformed shallow prefix, found %q... instead", p.line[:len(shallow)]) - return nil - } - p.line = bytes.TrimPrefix(p.line, shallow) - - if len(p.line) != hashSize { - p.error(fmt.Sprintf( - "malformed shallow hash: wrong length, expected 40 bytes, read %d bytes", - len(p.line))) - return nil - } - - text := p.line[:hashSize] - var h plumbing.Hash - if _, err := hex.Decode(h[:], text); err != nil { - p.error("invalid hash text: %s", err) - return nil - } - - p.data.Shallows = append(p.data.Shallows, h) - - if ok := p.nextLine(); !ok { - return nil - } - - if len(p.line) == 0 { - return nil // successful parse of the advertised-refs message - } - - return decodeShallow -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs_encode.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs_encode.go deleted file mode 100644 index fb9bd883fce..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs_encode.go +++ /dev/null @@ -1,176 +0,0 @@ -package packp - -import ( - "bytes" - "fmt" - "io" - "sort" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/format/pktline" - "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" -) - -// Encode writes the AdvRefs encoding to a writer. -// -// All the payloads will end with a newline character. Capabilities, -// references and shallows are written in alphabetical order, except for -// peeled references that always follow their corresponding references. -func (a *AdvRefs) Encode(w io.Writer) error { - e := newAdvRefsEncoder(w) - return e.Encode(a) -} - -type advRefsEncoder struct { - data *AdvRefs // data to encode - pe *pktline.Encoder // where to write the encoded data - firstRefName string // reference name to encode in the first pkt-line (HEAD if present) - firstRefHash plumbing.Hash // hash referenced to encode in the first pkt-line (HEAD if present) - sortedRefs []string // hash references to encode ordered by increasing order - err error // sticky error - -} - -func newAdvRefsEncoder(w io.Writer) *advRefsEncoder { - return &advRefsEncoder{ - pe: pktline.NewEncoder(w), - } -} - -func (e *advRefsEncoder) Encode(v *AdvRefs) error { - e.data = v - e.sortRefs() - e.setFirstRef() - - for state := encodePrefix; state != nil; { - state = state(e) - } - - return e.err -} - -func (e *advRefsEncoder) sortRefs() { - if len(e.data.References) > 0 { - refs := make([]string, 0, len(e.data.References)) - for refName := range e.data.References { - refs = append(refs, refName) - } - - sort.Strings(refs) - e.sortedRefs = refs - } -} - -func (e *advRefsEncoder) setFirstRef() { - if e.data.Head != nil { - e.firstRefName = head - e.firstRefHash = *e.data.Head - return - } - - if len(e.sortedRefs) > 0 { - refName := e.sortedRefs[0] - e.firstRefName = refName - e.firstRefHash = e.data.References[refName] - } -} - -type encoderStateFn func(*advRefsEncoder) encoderStateFn - -func encodePrefix(e *advRefsEncoder) encoderStateFn { - for _, p := range e.data.Prefix { - if bytes.Equal(p, pktline.Flush) { - if e.err = e.pe.Flush(); e.err != nil { - return nil - } - continue - } - if e.err = e.pe.Encodef("%s\n", string(p)); e.err != nil { - return nil - } - } - - return encodeFirstLine -} - -// Adds the first pkt-line payload: head hash, head ref and capabilities. -// If HEAD ref is not found, the first reference ordered in increasing order will be used. -// If there aren't HEAD neither refs, the first line will be "PKT-LINE(zero-id SP "capabilities^{}" NUL capability-list)". -// See: https://github.com/git/git/blob/master/Documentation/technical/pack-protocol.txt -// See: https://github.com/git/git/blob/master/Documentation/technical/protocol-common.txt -func encodeFirstLine(e *advRefsEncoder) encoderStateFn { - const formatFirstLine = "%s %s\x00%s\n" - var firstLine string - capabilities := formatCaps(e.data.Capabilities) - - if e.firstRefName == "" { - firstLine = fmt.Sprintf(formatFirstLine, plumbing.ZeroHash.String(), "capabilities^{}", capabilities) - } else { - firstLine = fmt.Sprintf(formatFirstLine, e.firstRefHash.String(), e.firstRefName, capabilities) - - } - - if e.err = e.pe.EncodeString(firstLine); e.err != nil { - return nil - } - - return encodeRefs -} - -func formatCaps(c *capability.List) string { - if c == nil { - return "" - } - - return c.String() -} - -// Adds the (sorted) refs: hash SP refname EOL -// and their peeled refs if any. -func encodeRefs(e *advRefsEncoder) encoderStateFn { - for _, r := range e.sortedRefs { - if r == e.firstRefName { - continue - } - - hash := e.data.References[r] - if e.err = e.pe.Encodef("%s %s\n", hash.String(), r); e.err != nil { - return nil - } - - if hash, ok := e.data.Peeled[r]; ok { - if e.err = e.pe.Encodef("%s %s^{}\n", hash.String(), r); e.err != nil { - return nil - } - } - } - - return encodeShallow -} - -// Adds the (sorted) shallows: "shallow" SP hash EOL -func encodeShallow(e *advRefsEncoder) encoderStateFn { - sorted := sortShallows(e.data.Shallows) - for _, hash := range sorted { - if e.err = e.pe.Encodef("shallow %s\n", hash); e.err != nil { - return nil - } - } - - return encodeFlush -} - -func sortShallows(c []plumbing.Hash) []string { - ret := []string{} - for _, h := range c { - ret = append(ret, h.String()) - } - sort.Strings(ret) - - return ret -} - -func encodeFlush(e *advRefsEncoder) encoderStateFn { - e.err = e.pe.Flush() - return nil -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/capability/capability.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/capability/capability.go deleted file mode 100644 index 8d6a56f532a..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/capability/capability.go +++ /dev/null @@ -1,259 +0,0 @@ -// Package capability defines the server and client capabilities. -package capability - -// Capability describes a server or client capability. -type Capability string - -func (n Capability) String() string { - return string(n) -} - -const ( - // MultiACK capability allows the server to return "ACK obj-id continue" as - // soon as it finds a commit that it can use as a common base, between the - // client's wants and the client's have set. - // - // By sending this early, the server can potentially head off the client - // from walking any further down that particular branch of the client's - // repository history. The client may still need to walk down other - // branches, sending have lines for those, until the server has a - // complete cut across the DAG, or the client has said "done". - // - // Without multi_ack, a client sends have lines in --date-order until - // the server has found a common base. That means the client will send - // have lines that are already known by the server to be common, because - // they overlap in time with another branch that the server hasn't found - // a common base on yet. - // - // For example suppose the client has commits in caps that the server - // doesn't and the server has commits in lower case that the client - // doesn't, as in the following diagram: - // - // +---- u ---------------------- x - // / +----- y - // / / - // a -- b -- c -- d -- E -- F - // \ - // +--- Q -- R -- S - // - // If the client wants x,y and starts out by saying have F,S, the server - // doesn't know what F,S is. Eventually the client says "have d" and - // the server sends "ACK d continue" to let the client know to stop - // walking down that line (so don't send c-b-a), but it's not done yet, - // it needs a base for x. The client keeps going with S-R-Q, until a - // gets reached, at which point the server has a clear base and it all - // ends. - // - // Without multi_ack the client would have sent that c-b-a chain anyway, - // interleaved with S-R-Q. - MultiACK Capability = "multi_ack" - // MultiACKDetailed is an extension of multi_ack that permits client to - // better understand the server's in-memory state. - MultiACKDetailed Capability = "multi_ack_detailed" - // NoDone should only be used with the smart HTTP protocol. If - // multi_ack_detailed and no-done are both present, then the sender is - // free to immediately send a pack following its first "ACK obj-id ready" - // message. - // - // Without no-done in the smart HTTP protocol, the server session would - // end and the client has to make another trip to send "done" before - // the server can send the pack. no-done removes the last round and - // thus slightly reduces latency. - NoDone Capability = "no-done" - // ThinPack is one with deltas which reference base objects not - // contained within the pack (but are known to exist at the receiving - // end). This can reduce the network traffic significantly, but it - // requires the receiving end to know how to "thicken" these packs by - // adding the missing bases to the pack. - // - // The upload-pack server advertises 'thin-pack' when it can generate - // and send a thin pack. A client requests the 'thin-pack' capability - // when it understands how to "thicken" it, notifying the server that - // it can receive such a pack. A client MUST NOT request the - // 'thin-pack' capability if it cannot turn a thin pack into a - // self-contained pack. - // - // Receive-pack, on the other hand, is assumed by default to be able to - // handle thin packs, but can ask the client not to use the feature by - // advertising the 'no-thin' capability. A client MUST NOT send a thin - // pack if the server advertises the 'no-thin' capability. - // - // The reasons for this asymmetry are historical. The receive-pack - // program did not exist until after the invention of thin packs, so - // historically the reference implementation of receive-pack always - // understood thin packs. Adding 'no-thin' later allowed receive-pack - // to disable the feature in a backwards-compatible manner. - ThinPack Capability = "thin-pack" - // Sideband means that server can send, and client understand multiplexed - // progress reports and error info interleaved with the packfile itself. - // - // These two options are mutually exclusive. A modern client always - // favors Sideband64k. - // - // Either mode indicates that the packfile data will be streamed broken - // up into packets of up to either 1000 bytes in the case of 'side_band', - // or 65520 bytes in the case of 'side_band_64k'. Each packet is made up - // of a leading 4-byte pkt-line length of how much data is in the packet, - // followed by a 1-byte stream code, followed by the actual data. - // - // The stream code can be one of: - // - // 1 - pack data - // 2 - progress messages - // 3 - fatal error message just before stream aborts - // - // The "side-band-64k" capability came about as a way for newer clients - // that can handle much larger packets to request packets that are - // actually crammed nearly full, while maintaining backward compatibility - // for the older clients. - // - // Further, with side-band and its up to 1000-byte messages, it's actually - // 999 bytes of payload and 1 byte for the stream code. With side-band-64k, - // same deal, you have up to 65519 bytes of data and 1 byte for the stream - // code. - // - // The client MUST send only maximum of one of "side-band" and "side- - // band-64k". Server MUST diagnose it as an error if client requests - // both. - Sideband Capability = "side-band" - Sideband64k Capability = "side-band-64k" - // OFSDelta server can send, and client understand PACKv2 with delta - // referring to its base by position in pack rather than by an obj-id. That - // is, they can send/read OBJ_OFS_DELTA (aka type 6) in a packfile. - OFSDelta Capability = "ofs-delta" - // Agent the server may optionally send this capability to notify the client - // that the server is running version `X`. The client may optionally return - // its own agent string by responding with an `agent=Y` capability (but it - // MUST NOT do so if the server did not mention the agent capability). The - // `X` and `Y` strings may contain any printable ASCII characters except - // space (i.e., the byte range 32 < x < 127), and are typically of the form - // "package/version" (e.g., "git/1.8.3.1"). The agent strings are purely - // informative for statistics and debugging purposes, and MUST NOT be used - // to programmatically assume the presence or absence of particular features. - Agent Capability = "agent" - // Shallow capability adds "deepen", "shallow" and "unshallow" commands to - // the fetch-pack/upload-pack protocol so clients can request shallow - // clones. - Shallow Capability = "shallow" - // DeepenSince adds "deepen-since" command to fetch-pack/upload-pack - // protocol so the client can request shallow clones that are cut at a - // specific time, instead of depth. Internally it's equivalent of doing - // "rev-list --max-age=" on the server side. "deepen-since" - // cannot be used with "deepen". - DeepenSince Capability = "deepen-since" - // DeepenNot adds "deepen-not" command to fetch-pack/upload-pack - // protocol so the client can request shallow clones that are cut at a - // specific revision, instead of depth. Internally it's equivalent of - // doing "rev-list --not " on the server side. "deepen-not" - // cannot be used with "deepen", but can be used with "deepen-since". - DeepenNot Capability = "deepen-not" - // DeepenRelative if this capability is requested by the client, the - // semantics of "deepen" command is changed. The "depth" argument is the - // depth from the current shallow boundary, instead of the depth from - // remote refs. - DeepenRelative Capability = "deepen-relative" - // NoProgress the client was started with "git clone -q" or something, and - // doesn't want that side band 2. Basically the client just says "I do not - // wish to receive stream 2 on sideband, so do not send it to me, and if - // you did, I will drop it on the floor anyway". However, the sideband - // channel 3 is still used for error responses. - NoProgress Capability = "no-progress" - // IncludeTag capability is about sending annotated tags if we are - // sending objects they point to. If we pack an object to the client, and - // a tag object points exactly at that object, we pack the tag object too. - // In general this allows a client to get all new annotated tags when it - // fetches a branch, in a single network connection. - // - // Clients MAY always send include-tag, hardcoding it into a request when - // the server advertises this capability. The decision for a client to - // request include-tag only has to do with the client's desires for tag - // data, whether or not a server had advertised objects in the - // refs/tags/* namespace. - // - // Servers MUST pack the tags if their referrant is packed and the client - // has requested include-tags. - // - // Clients MUST be prepared for the case where a server has ignored - // include-tag and has not actually sent tags in the pack. In such - // cases the client SHOULD issue a subsequent fetch to acquire the tags - // that include-tag would have otherwise given the client. - // - // The server SHOULD send include-tag, if it supports it, regardless - // of whether or not there are tags available. - IncludeTag Capability = "include-tag" - // ReportStatus the receive-pack process can receive a 'report-status' - // capability, which tells it that the client wants a report of what - // happened after a packfile upload and reference update. If the pushing - // client requests this capability, after unpacking and updating references - // the server will respond with whether the packfile unpacked successfully - // and if each reference was updated successfully. If any of those were not - // successful, it will send back an error message. See pack-protocol.txt - // for example messages. - ReportStatus Capability = "report-status" - // DeleteRefs If the server sends back this capability, it means that - // it is capable of accepting a zero-id value as the target - // value of a reference update. It is not sent back by the client, it - // simply informs the client that it can be sent zero-id values - // to delete references - DeleteRefs Capability = "delete-refs" - // Quiet If the receive-pack server advertises this capability, it is - // capable of silencing human-readable progress output which otherwise may - // be shown when processing the received pack. A send-pack client should - // respond with the 'quiet' capability to suppress server-side progress - // reporting if the local progress reporting is also being suppressed - // (e.g., via `push -q`, or if stderr does not go to a tty). - Quiet Capability = "quiet" - // Atomic If the server sends this capability it is capable of accepting - // atomic pushes. If the pushing client requests this capability, the server - // will update the refs in one atomic transaction. Either all refs are - // updated or none. - Atomic Capability = "atomic" - // PushOptions If the server sends this capability it is able to accept - // push options after the update commands have been sent, but before the - // packfile is streamed. If the pushing client requests this capability, - // the server will pass the options to the pre- and post- receive hooks - // that process this push request. - PushOptions Capability = "push-options" - // AllowTipSHA1InWant if the upload-pack server advertises this capability, - // fetch-pack may send "want" lines with SHA-1s that exist at the server but - // are not advertised by upload-pack. - AllowTipSHA1InWant Capability = "allow-tip-sha1-in-want" - // AllowReachableSHA1InWant if the upload-pack server advertises this - // capability, fetch-pack may send "want" lines with SHA-1s that exist at - // the server but are not advertised by upload-pack. - AllowReachableSHA1InWant Capability = "allow-reachable-sha1-in-want" - // PushCert the receive-pack server that advertises this capability is - // willing to accept a signed push certificate, and asks the to be - // included in the push certificate. A send-pack client MUST NOT - // send a push-cert packet unless the receive-pack server advertises - // this capability. - PushCert Capability = "push-cert" - // SymRef symbolic reference support for better negotiation. - SymRef Capability = "symref" - // ObjectFormat takes a hash algorithm as an argument, indicates that the - // server supports the given hash algorithms. - ObjectFormat Capability = "object-format" - // Filter if present, fetch-pack may send "filter" commands to request a - // partial clone or partial fetch and request that the server omit various objects from the packfile - Filter Capability = "filter" -) - -const DefaultAgent = "go-git/4.x" - -var known = map[Capability]bool{ - MultiACK: true, MultiACKDetailed: true, NoDone: true, ThinPack: true, - Sideband: true, Sideband64k: true, OFSDelta: true, Agent: true, - Shallow: true, DeepenSince: true, DeepenNot: true, DeepenRelative: true, - NoProgress: true, IncludeTag: true, ReportStatus: true, DeleteRefs: true, - Quiet: true, Atomic: true, PushOptions: true, AllowTipSHA1InWant: true, - AllowReachableSHA1InWant: true, PushCert: true, SymRef: true, - ObjectFormat: true, Filter: true, -} - -var requiresArgument = map[Capability]bool{ - Agent: true, PushCert: true, SymRef: true, ObjectFormat: true, -} - -var multipleArgument = map[Capability]bool{ - SymRef: true, -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/capability/list.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/capability/list.go deleted file mode 100644 index f41ec799cf3..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/capability/list.go +++ /dev/null @@ -1,193 +0,0 @@ -package capability - -import ( - "bytes" - "errors" - "fmt" - "strings" -) - -var ( - // ErrArgumentsRequired is returned if no arguments are giving with a - // capability that requires arguments - ErrArgumentsRequired = errors.New("arguments required") - // ErrArguments is returned if arguments are given with a capabilities that - // not supports arguments - ErrArguments = errors.New("arguments not allowed") - // ErrEmptyArgument is returned when an empty value is given - ErrEmptyArgument = errors.New("empty argument") - // ErrMultipleArguments multiple argument given to a capabilities that not - // support it - ErrMultipleArguments = errors.New("multiple arguments not allowed") -) - -// List represents a list of capabilities -type List struct { - m map[Capability]*entry - sort []string -} - -type entry struct { - Name Capability - Values []string -} - -// NewList returns a new List of capabilities -func NewList() *List { - return &List{ - m: make(map[Capability]*entry), - } -} - -// IsEmpty returns true if the List is empty -func (l *List) IsEmpty() bool { - return len(l.sort) == 0 -} - -// Decode decodes list of capabilities from raw into the list -func (l *List) Decode(raw []byte) error { - // git 1.x receive pack used to send a leading space on its - // git-receive-pack capabilities announcement. We just trim space to be - // tolerant to space changes in different versions. - raw = bytes.TrimSpace(raw) - - if len(raw) == 0 { - return nil - } - - for _, data := range bytes.Split(raw, []byte{' '}) { - pair := bytes.SplitN(data, []byte{'='}, 2) - - c := Capability(pair[0]) - if len(pair) == 1 { - if err := l.Add(c); err != nil { - return err - } - - continue - } - - if err := l.Add(c, string(pair[1])); err != nil { - return err - } - } - - return nil -} - -// Get returns the values for a capability -func (l *List) Get(capability Capability) []string { - if _, ok := l.m[capability]; !ok { - return nil - } - - return l.m[capability].Values -} - -// Set sets a capability removing the previous values -func (l *List) Set(capability Capability, values ...string) error { - delete(l.m, capability) - return l.Add(capability, values...) -} - -// Add adds a capability, values are optional -func (l *List) Add(c Capability, values ...string) error { - if err := l.validate(c, values); err != nil { - return err - } - - if !l.Supports(c) { - l.m[c] = &entry{Name: c} - l.sort = append(l.sort, c.String()) - } - - if len(values) == 0 { - return nil - } - - if known[c] && !multipleArgument[c] && len(l.m[c].Values) > 0 { - return ErrMultipleArguments - } - - l.m[c].Values = append(l.m[c].Values, values...) - return nil -} - -func (l *List) validateNoEmptyArgs(values []string) error { - for _, v := range values { - if v == "" { - return ErrEmptyArgument - } - } - return nil -} - -func (l *List) validate(c Capability, values []string) error { - if !known[c] { - return l.validateNoEmptyArgs(values) - } - if requiresArgument[c] && len(values) == 0 { - return ErrArgumentsRequired - } - - if !requiresArgument[c] && len(values) != 0 { - return ErrArguments - } - - if !multipleArgument[c] && len(values) > 1 { - return ErrMultipleArguments - } - return l.validateNoEmptyArgs(values) -} - -// Supports returns true if capability is present -func (l *List) Supports(capability Capability) bool { - _, ok := l.m[capability] - return ok -} - -// Delete deletes a capability from the List -func (l *List) Delete(capability Capability) { - if !l.Supports(capability) { - return - } - - delete(l.m, capability) - for i, c := range l.sort { - if c != string(capability) { - continue - } - - l.sort = append(l.sort[:i], l.sort[i+1:]...) - return - } -} - -// All returns a slice with all defined capabilities. -func (l *List) All() []Capability { - var cs []Capability - for _, key := range l.sort { - cs = append(cs, Capability(key)) - } - - return cs -} - -// String generates the capabilities strings, the capabilities are sorted in -// insertion order -func (l *List) String() string { - var o []string - for _, key := range l.sort { - cap := l.m[Capability(key)] - if len(cap.Values) == 0 { - o = append(o, key) - continue - } - - for _, value := range cap.Values { - o = append(o, fmt.Sprintf("%s=%s", key, value)) - } - } - - return strings.Join(o, " ") -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/common.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/common.go deleted file mode 100644 index ab07ac8f74f..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/common.go +++ /dev/null @@ -1,70 +0,0 @@ -package packp - -import ( - "fmt" -) - -type stateFn func() stateFn - -const ( - // common - hashSize = 40 - - // advrefs - head = "HEAD" - noHead = "capabilities^{}" -) - -var ( - // common - sp = []byte(" ") - eol = []byte("\n") - eq = []byte{'='} - - // advertised-refs - null = []byte("\x00") - peeled = []byte("^{}") - noHeadMark = []byte(" capabilities^{}\x00") - - // upload-request - want = []byte("want ") - shallow = []byte("shallow ") - deepen = []byte("deepen") - deepenCommits = []byte("deepen ") - deepenSince = []byte("deepen-since ") - deepenReference = []byte("deepen-not ") - - // shallow-update - unshallow = []byte("unshallow ") - - // server-response - ack = []byte("ACK") - nak = []byte("NAK") - - // updreq - shallowNoSp = []byte("shallow") -) - -func isFlush(payload []byte) bool { - return len(payload) == 0 -} - -// ErrUnexpectedData represents an unexpected data decoding a message -type ErrUnexpectedData struct { - Msg string - Data []byte -} - -// NewErrUnexpectedData returns a new ErrUnexpectedData containing the data and -// the message given -func NewErrUnexpectedData(msg string, data []byte) error { - return &ErrUnexpectedData{Msg: msg, Data: data} -} - -func (err *ErrUnexpectedData) Error() string { - if len(err.Data) == 0 { - return err.Msg - } - - return fmt.Sprintf("%s (%s)", err.Msg, err.Data) -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/doc.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/doc.go deleted file mode 100644 index 4950d1d6625..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/doc.go +++ /dev/null @@ -1,724 +0,0 @@ -package packp - -/* - -A nice way to trace the real data transmitted and received by git, use: - -GIT_TRACE_PACKET=true git ls-remote http://github.com/src-d/go-git -GIT_TRACE_PACKET=true git clone http://github.com/src-d/go-git - -Here follows a copy of the current protocol specification at the time of -this writing. - -(Please notice that most http git servers will add a flush-pkt after the -first pkt-line when using HTTP smart.) - - -Documentation Common to Pack and Http Protocols -=============================================== - -ABNF Notation -------------- - -ABNF notation as described by RFC 5234 is used within the protocol documents, -except the following replacement core rules are used: ----- - HEXDIG = DIGIT / "a" / "b" / "c" / "d" / "e" / "f" ----- - -We also define the following common rules: ----- - NUL = %x00 - zero-id = 40*"0" - obj-id = 40*(HEXDIGIT) - - refname = "HEAD" - refname /= "refs/" ----- - -A refname is a hierarchical octet string beginning with "refs/" and -not violating the 'git-check-ref-format' command's validation rules. -More specifically, they: - -. They can include slash `/` for hierarchical (directory) - grouping, but no slash-separated component can begin with a - dot `.`. - -. They must contain at least one `/`. This enforces the presence of a - category like `heads/`, `tags/` etc. but the actual names are not - restricted. - -. They cannot have two consecutive dots `..` anywhere. - -. They cannot have ASCII control characters (i.e. bytes whose - values are lower than \040, or \177 `DEL`), space, tilde `~`, - caret `^`, colon `:`, question-mark `?`, asterisk `*`, - or open bracket `[` anywhere. - -. They cannot end with a slash `/` or a dot `.`. - -. They cannot end with the sequence `.lock`. - -. They cannot contain a sequence `@{`. - -. They cannot contain a `\\`. - - -pkt-line Format ---------------- - -Much (but not all) of the payload is described around pkt-lines. - -A pkt-line is a variable length binary string. The first four bytes -of the line, the pkt-len, indicates the total length of the line, -in hexadecimal. The pkt-len includes the 4 bytes used to contain -the length's hexadecimal representation. - -A pkt-line MAY contain binary data, so implementors MUST ensure -pkt-line parsing/formatting routines are 8-bit clean. - -A non-binary line SHOULD BE terminated by an LF, which if present -MUST be included in the total length. Receivers MUST treat pkt-lines -with non-binary data the same whether or not they contain the trailing -LF (stripping the LF if present, and not complaining when it is -missing). - -The maximum length of a pkt-line's data component is 65516 bytes. -Implementations MUST NOT send pkt-line whose length exceeds 65520 -(65516 bytes of payload + 4 bytes of length data). - -Implementations SHOULD NOT send an empty pkt-line ("0004"). - -A pkt-line with a length field of 0 ("0000"), called a flush-pkt, -is a special case and MUST be handled differently than an empty -pkt-line ("0004"). - ----- - pkt-line = data-pkt / flush-pkt - - data-pkt = pkt-len pkt-payload - pkt-len = 4*(HEXDIG) - pkt-payload = (pkt-len - 4)*(OCTET) - - flush-pkt = "0000" ----- - -Examples (as C-style strings): - ----- - pkt-line actual value - --------------------------------- - "0006a\n" "a\n" - "0005a" "a" - "000bfoobar\n" "foobar\n" - "0004" "" ----- - -Packfile transfer protocols -=========================== - -Git supports transferring data in packfiles over the ssh://, git://, http:// and -file:// transports. There exist two sets of protocols, one for pushing -data from a client to a server and another for fetching data from a -server to a client. The three transports (ssh, git, file) use the same -protocol to transfer data. http is documented in http-protocol.txt. - -The processes invoked in the canonical Git implementation are 'upload-pack' -on the server side and 'fetch-pack' on the client side for fetching data; -then 'receive-pack' on the server and 'send-pack' on the client for pushing -data. The protocol functions to have a server tell a client what is -currently on the server, then for the two to negotiate the smallest amount -of data to send in order to fully update one or the other. - -pkt-line Format ---------------- - -The descriptions below build on the pkt-line format described in -protocol-common.txt. When the grammar indicate `PKT-LINE(...)`, unless -otherwise noted the usual pkt-line LF rules apply: the sender SHOULD -include a LF, but the receiver MUST NOT complain if it is not present. - -Transports ----------- -There are three transports over which the packfile protocol is -initiated. The Git transport is a simple, unauthenticated server that -takes the command (almost always 'upload-pack', though Git -servers can be configured to be globally writable, in which 'receive- -pack' initiation is also allowed) with which the client wishes to -communicate and executes it and connects it to the requesting -process. - -In the SSH transport, the client just runs the 'upload-pack' -or 'receive-pack' process on the server over the SSH protocol and then -communicates with that invoked process over the SSH connection. - -The file:// transport runs the 'upload-pack' or 'receive-pack' -process locally and communicates with it over a pipe. - -Git Transport -------------- - -The Git transport starts off by sending the command and repository -on the wire using the pkt-line format, followed by a NUL byte and a -hostname parameter, terminated by a NUL byte. - - 0032git-upload-pack /project.git\0host=myserver.com\0 - --- - git-proto-request = request-command SP pathname NUL [ host-parameter NUL ] - request-command = "git-upload-pack" / "git-receive-pack" / - "git-upload-archive" ; case sensitive - pathname = *( %x01-ff ) ; exclude NUL - host-parameter = "host=" hostname [ ":" port ] --- - -Only host-parameter is allowed in the git-proto-request. Clients -MUST NOT attempt to send additional parameters. It is used for the -git-daemon name based virtual hosting. See --interpolated-path -option to git daemon, with the %H/%CH format characters. - -Basically what the Git client is doing to connect to an 'upload-pack' -process on the server side over the Git protocol is this: - - $ echo -e -n \ - "0039git-upload-pack /schacon/gitbook.git\0host=example.com\0" | - nc -v example.com 9418 - -If the server refuses the request for some reasons, it could abort -gracefully with an error message. - ----- - error-line = PKT-LINE("ERR" SP explanation-text) ----- - - -SSH Transport -------------- - -Initiating the upload-pack or receive-pack processes over SSH is -executing the binary on the server via SSH remote execution. -It is basically equivalent to running this: - - $ ssh git.example.com "git-upload-pack '/project.git'" - -For a server to support Git pushing and pulling for a given user over -SSH, that user needs to be able to execute one or both of those -commands via the SSH shell that they are provided on login. On some -systems, that shell access is limited to only being able to run those -two commands, or even just one of them. - -In an ssh:// format URI, it's absolute in the URI, so the '/' after -the host name (or port number) is sent as an argument, which is then -read by the remote git-upload-pack exactly as is, so it's effectively -an absolute path in the remote filesystem. - - git clone ssh://user@example.com/project.git - | - v - ssh user@example.com "git-upload-pack '/project.git'" - -In a "user@host:path" format URI, its relative to the user's home -directory, because the Git client will run: - - git clone user@example.com:project.git - | - v - ssh user@example.com "git-upload-pack 'project.git'" - -The exception is if a '~' is used, in which case -we execute it without the leading '/'. - - ssh://user@example.com/~alice/project.git, - | - v - ssh user@example.com "git-upload-pack '~alice/project.git'" - -A few things to remember here: - -- The "command name" is spelled with dash (e.g. git-upload-pack), but - this can be overridden by the client; - -- The repository path is always quoted with single quotes. - -Fetching Data From a Server ---------------------------- - -When one Git repository wants to get data that a second repository -has, the first can 'fetch' from the second. This operation determines -what data the server has that the client does not then streams that -data down to the client in packfile format. - - -Reference Discovery -------------------- - -When the client initially connects the server will immediately respond -with a listing of each reference it has (all branches and tags) along -with the object name that each reference currently points to. - - $ echo -e -n "0039git-upload-pack /schacon/gitbook.git\0host=example.com\0" | - nc -v example.com 9418 - 00887217a7c7e582c46cec22a130adf4b9d7d950fba0 HEAD\0multi_ack thin-pack - side-band side-band-64k ofs-delta shallow no-progress include-tag - 00441d3fcd5ced445d1abc402225c0b8a1299641f497 refs/heads/integration - 003f7217a7c7e582c46cec22a130adf4b9d7d950fba0 refs/heads/master - 003cb88d2441cac0977faf98efc80305012112238d9d refs/tags/v0.9 - 003c525128480b96c89e6418b1e40909bf6c5b2d580f refs/tags/v1.0 - 003fe92df48743b7bc7d26bcaabfddde0a1e20cae47c refs/tags/v1.0^{} - 0000 - -The returned response is a pkt-line stream describing each ref and -its current value. The stream MUST be sorted by name according to -the C locale ordering. - -If HEAD is a valid ref, HEAD MUST appear as the first advertised -ref. If HEAD is not a valid ref, HEAD MUST NOT appear in the -advertisement list at all, but other refs may still appear. - -The stream MUST include capability declarations behind a NUL on the -first ref. The peeled value of a ref (that is "ref^{}") MUST be -immediately after the ref itself, if presented. A conforming server -MUST peel the ref if it's an annotated tag. - ----- - advertised-refs = (no-refs / list-of-refs) - *shallow - flush-pkt - - no-refs = PKT-LINE(zero-id SP "capabilities^{}" - NUL capability-list) - - list-of-refs = first-ref *other-ref - first-ref = PKT-LINE(obj-id SP refname - NUL capability-list) - - other-ref = PKT-LINE(other-tip / other-peeled) - other-tip = obj-id SP refname - other-peeled = obj-id SP refname "^{}" - - shallow = PKT-LINE("shallow" SP obj-id) - - capability-list = capability *(SP capability) - capability = 1*(LC_ALPHA / DIGIT / "-" / "_") - LC_ALPHA = %x61-7A ----- - -Server and client MUST use lowercase for obj-id, both MUST treat obj-id -as case-insensitive. - -See protocol-capabilities.txt for a list of allowed server capabilities -and descriptions. - -Packfile Negotiation --------------------- -After reference and capabilities discovery, the client can decide to -terminate the connection by sending a flush-pkt, telling the server it can -now gracefully terminate, and disconnect, when it does not need any pack -data. This can happen with the ls-remote command, and also can happen when -the client already is up-to-date. - -Otherwise, it enters the negotiation phase, where the client and -server determine what the minimal packfile necessary for transport is, -by telling the server what objects it wants, its shallow objects -(if any), and the maximum commit depth it wants (if any). The client -will also send a list of the capabilities it wants to be in effect, -out of what the server said it could do with the first 'want' line. - ----- - upload-request = want-list - *shallow-line - *1depth-request - flush-pkt - - want-list = first-want - *additional-want - - shallow-line = PKT-LINE("shallow" SP obj-id) - - depth-request = PKT-LINE("deepen" SP depth) / - PKT-LINE("deepen-since" SP timestamp) / - PKT-LINE("deepen-not" SP ref) - - first-want = PKT-LINE("want" SP obj-id SP capability-list) - additional-want = PKT-LINE("want" SP obj-id) - - depth = 1*DIGIT ----- - -Clients MUST send all the obj-ids it wants from the reference -discovery phase as 'want' lines. Clients MUST send at least one -'want' command in the request body. Clients MUST NOT mention an -obj-id in a 'want' command which did not appear in the response -obtained through ref discovery. - -The client MUST write all obj-ids which it only has shallow copies -of (meaning that it does not have the parents of a commit) as -'shallow' lines so that the server is aware of the limitations of -the client's history. - -The client now sends the maximum commit history depth it wants for -this transaction, which is the number of commits it wants from the -tip of the history, if any, as a 'deepen' line. A depth of 0 is the -same as not making a depth request. The client does not want to receive -any commits beyond this depth, nor does it want objects needed only to -complete those commits. Commits whose parents are not received as a -result are defined as shallow and marked as such in the server. This -information is sent back to the client in the next step. - -Once all the 'want's and 'shallow's (and optional 'deepen') are -transferred, clients MUST send a flush-pkt, to tell the server side -that it is done sending the list. - -Otherwise, if the client sent a positive depth request, the server -will determine which commits will and will not be shallow and -send this information to the client. If the client did not request -a positive depth, this step is skipped. - ----- - shallow-update = *shallow-line - *unshallow-line - flush-pkt - - shallow-line = PKT-LINE("shallow" SP obj-id) - - unshallow-line = PKT-LINE("unshallow" SP obj-id) ----- - -If the client has requested a positive depth, the server will compute -the set of commits which are no deeper than the desired depth. The set -of commits start at the client's wants. - -The server writes 'shallow' lines for each -commit whose parents will not be sent as a result. The server writes -an 'unshallow' line for each commit which the client has indicated is -shallow, but is no longer shallow at the currently requested depth -(that is, its parents will now be sent). The server MUST NOT mark -as unshallow anything which the client has not indicated was shallow. - -Now the client will send a list of the obj-ids it has using 'have' -lines, so the server can make a packfile that only contains the objects -that the client needs. In multi_ack mode, the canonical implementation -will send up to 32 of these at a time, then will send a flush-pkt. The -canonical implementation will skip ahead and send the next 32 immediately, -so that there is always a block of 32 "in-flight on the wire" at a time. - ----- - upload-haves = have-list - compute-end - - have-list = *have-line - have-line = PKT-LINE("have" SP obj-id) - compute-end = flush-pkt / PKT-LINE("done") ----- - -If the server reads 'have' lines, it then will respond by ACKing any -of the obj-ids the client said it had that the server also has. The -server will ACK obj-ids differently depending on which ack mode is -chosen by the client. - -In multi_ack mode: - - * the server will respond with 'ACK obj-id continue' for any common - commits. - - * once the server has found an acceptable common base commit and is - ready to make a packfile, it will blindly ACK all 'have' obj-ids - back to the client. - - * the server will then send a 'NAK' and then wait for another response - from the client - either a 'done' or another list of 'have' lines. - -In multi_ack_detailed mode: - - * the server will differentiate the ACKs where it is signaling - that it is ready to send data with 'ACK obj-id ready' lines, and - signals the identified common commits with 'ACK obj-id common' lines. - -Without either multi_ack or multi_ack_detailed: - - * upload-pack sends "ACK obj-id" on the first common object it finds. - After that it says nothing until the client gives it a "done". - - * upload-pack sends "NAK" on a flush-pkt if no common object - has been found yet. If one has been found, and thus an ACK - was already sent, it's silent on the flush-pkt. - -After the client has gotten enough ACK responses that it can determine -that the server has enough information to send an efficient packfile -(in the canonical implementation, this is determined when it has received -enough ACKs that it can color everything left in the --date-order queue -as common with the server, or the --date-order queue is empty), or the -client determines that it wants to give up (in the canonical implementation, -this is determined when the client sends 256 'have' lines without getting -any of them ACKed by the server - meaning there is nothing in common and -the server should just send all of its objects), then the client will send -a 'done' command. The 'done' command signals to the server that the client -is ready to receive its packfile data. - -However, the 256 limit *only* turns on in the canonical client -implementation if we have received at least one "ACK %s continue" -during a prior round. This helps to ensure that at least one common -ancestor is found before we give up entirely. - -Once the 'done' line is read from the client, the server will either -send a final 'ACK obj-id' or it will send a 'NAK'. 'obj-id' is the object -name of the last commit determined to be common. The server only sends -ACK after 'done' if there is at least one common base and multi_ack or -multi_ack_detailed is enabled. The server always sends NAK after 'done' -if there is no common base found. - -Then the server will start sending its packfile data. - ----- - server-response = *ack_multi ack / nak - ack_multi = PKT-LINE("ACK" SP obj-id ack_status) - ack_status = "continue" / "common" / "ready" - ack = PKT-LINE("ACK" SP obj-id) - nak = PKT-LINE("NAK") ----- - -A simple clone may look like this (with no 'have' lines): - ----- - C: 0054want 74730d410fcb6603ace96f1dc55ea6196122532d multi_ack \ - side-band-64k ofs-delta\n - C: 0032want 7d1665144a3a975c05f1f43902ddaf084e784dbe\n - C: 0032want 5a3f6be755bbb7deae50065988cbfa1ffa9ab68a\n - C: 0032want 7e47fe2bd8d01d481f44d7af0531bd93d3b21c01\n - C: 0032want 74730d410fcb6603ace96f1dc55ea6196122532d\n - C: 0000 - C: 0009done\n - - S: 0008NAK\n - S: [PACKFILE] ----- - -An incremental update (fetch) response might look like this: - ----- - C: 0054want 74730d410fcb6603ace96f1dc55ea6196122532d multi_ack \ - side-band-64k ofs-delta\n - C: 0032want 7d1665144a3a975c05f1f43902ddaf084e784dbe\n - C: 0032want 5a3f6be755bbb7deae50065988cbfa1ffa9ab68a\n - C: 0000 - C: 0032have 7e47fe2bd8d01d481f44d7af0531bd93d3b21c01\n - C: [30 more have lines] - C: 0032have 74730d410fcb6603ace96f1dc55ea6196122532d\n - C: 0000 - - S: 003aACK 7e47fe2bd8d01d481f44d7af0531bd93d3b21c01 continue\n - S: 003aACK 74730d410fcb6603ace96f1dc55ea6196122532d continue\n - S: 0008NAK\n - - C: 0009done\n - - S: 0031ACK 74730d410fcb6603ace96f1dc55ea6196122532d\n - S: [PACKFILE] ----- - - -Packfile Data -------------- - -Now that the client and server have finished negotiation about what -the minimal amount of data that needs to be sent to the client is, the server -will construct and send the required data in packfile format. - -See pack-format.txt for what the packfile itself actually looks like. - -If 'side-band' or 'side-band-64k' capabilities have been specified by -the client, the server will send the packfile data multiplexed. - -Each packet starting with the packet-line length of the amount of data -that follows, followed by a single byte specifying the sideband the -following data is coming in on. - -In 'side-band' mode, it will send up to 999 data bytes plus 1 control -code, for a total of up to 1000 bytes in a pkt-line. In 'side-band-64k' -mode it will send up to 65519 data bytes plus 1 control code, for a -total of up to 65520 bytes in a pkt-line. - -The sideband byte will be a '1', '2' or a '3'. Sideband '1' will contain -packfile data, sideband '2' will be used for progress information that the -client will generally print to stderr and sideband '3' is used for error -information. - -If no 'side-band' capability was specified, the server will stream the -entire packfile without multiplexing. - - -Pushing Data To a Server ------------------------- - -Pushing data to a server will invoke the 'receive-pack' process on the -server, which will allow the client to tell it which references it should -update and then send all the data the server will need for those new -references to be complete. Once all the data is received and validated, -the server will then update its references to what the client specified. - -Authentication --------------- - -The protocol itself contains no authentication mechanisms. That is to be -handled by the transport, such as SSH, before the 'receive-pack' process is -invoked. If 'receive-pack' is configured over the Git transport, those -repositories will be writable by anyone who can access that port (9418) as -that transport is unauthenticated. - -Reference Discovery -------------------- - -The reference discovery phase is done nearly the same way as it is in the -fetching protocol. Each reference obj-id and name on the server is sent -in packet-line format to the client, followed by a flush-pkt. The only -real difference is that the capability listing is different - the only -possible values are 'report-status', 'delete-refs', 'ofs-delta' and -'push-options'. - -Reference Update Request and Packfile Transfer ----------------------------------------------- - -Once the client knows what references the server is at, it can send a -list of reference update requests. For each reference on the server -that it wants to update, it sends a line listing the obj-id currently on -the server, the obj-id the client would like to update it to and the name -of the reference. - -This list is followed by a flush-pkt. Then the push options are transmitted -one per packet followed by another flush-pkt. After that the packfile that -should contain all the objects that the server will need to complete the new -references will be sent. - ----- - update-request = *shallow ( command-list | push-cert ) [packfile] - - shallow = PKT-LINE("shallow" SP obj-id) - - command-list = PKT-LINE(command NUL capability-list) - *PKT-LINE(command) - flush-pkt - - command = create / delete / update - create = zero-id SP new-id SP name - delete = old-id SP zero-id SP name - update = old-id SP new-id SP name - - old-id = obj-id - new-id = obj-id - - push-cert = PKT-LINE("push-cert" NUL capability-list LF) - PKT-LINE("certificate version 0.1" LF) - PKT-LINE("pusher" SP ident LF) - PKT-LINE("pushee" SP url LF) - PKT-LINE("nonce" SP nonce LF) - PKT-LINE(LF) - *PKT-LINE(command LF) - *PKT-LINE(gpg-signature-lines LF) - PKT-LINE("push-cert-end" LF) - - packfile = "PACK" 28*(OCTET) ----- - -If the receiving end does not support delete-refs, the sending end MUST -NOT ask for delete command. - -If the receiving end does not support push-cert, the sending end -MUST NOT send a push-cert command. When a push-cert command is -sent, command-list MUST NOT be sent; the commands recorded in the -push certificate is used instead. - -The packfile MUST NOT be sent if the only command used is 'delete'. - -A packfile MUST be sent if either create or update command is used, -even if the server already has all the necessary objects. In this -case the client MUST send an empty packfile. The only time this -is likely to happen is if the client is creating -a new branch or a tag that points to an existing obj-id. - -The server will receive the packfile, unpack it, then validate each -reference that is being updated that it hasn't changed while the request -was being processed (the obj-id is still the same as the old-id), and -it will run any update hooks to make sure that the update is acceptable. -If all of that is fine, the server will then update the references. - -Push Certificate ----------------- - -A push certificate begins with a set of header lines. After the -header and an empty line, the protocol commands follow, one per -line. Note that the trailing LF in push-cert PKT-LINEs is _not_ -optional; it must be present. - -Currently, the following header fields are defined: - -`pusher` ident:: - Identify the GPG key in "Human Readable Name " - format. - -`pushee` url:: - The repository URL (anonymized, if the URL contains - authentication material) the user who ran `git push` - intended to push into. - -`nonce` nonce:: - The 'nonce' string the receiving repository asked the - pushing user to include in the certificate, to prevent - replay attacks. - -The GPG signature lines are a detached signature for the contents -recorded in the push certificate before the signature block begins. -The detached signature is used to certify that the commands were -given by the pusher, who must be the signer. - -Report Status -------------- - -After receiving the pack data from the sender, the receiver sends a -report if 'report-status' capability is in effect. -It is a short listing of what happened in that update. It will first -list the status of the packfile unpacking as either 'unpack ok' or -'unpack [error]'. Then it will list the status for each of the references -that it tried to update. Each line is either 'ok [refname]' if the -update was successful, or 'ng [refname] [error]' if the update was not. - ----- - report-status = unpack-status - 1*(command-status) - flush-pkt - - unpack-status = PKT-LINE("unpack" SP unpack-result) - unpack-result = "ok" / error-msg - - command-status = command-ok / command-fail - command-ok = PKT-LINE("ok" SP refname) - command-fail = PKT-LINE("ng" SP refname SP error-msg) - - error-msg = 1*(OCTECT) ; where not "ok" ----- - -Updates can be unsuccessful for a number of reasons. The reference can have -changed since the reference discovery phase was originally sent, meaning -someone pushed in the meantime. The reference being pushed could be a -non-fast-forward reference and the update hooks or configuration could be -set to not allow that, etc. Also, some references can be updated while others -can be rejected. - -An example client/server communication might look like this: - ----- - S: 007c74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/local\0report-status delete-refs ofs-delta\n - S: 003e7d1665144a3a975c05f1f43902ddaf084e784dbe refs/heads/debug\n - S: 003f74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/master\n - S: 003f74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/team\n - S: 0000 - - C: 003e7d1665144a3a975c05f1f43902ddaf084e784dbe 74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/debug\n - C: 003e74730d410fcb6603ace96f1dc55ea6196122532d 5a3f6be755bbb7deae50065988cbfa1ffa9ab68a refs/heads/master\n - C: 0000 - C: [PACKDATA] - - S: 000eunpack ok\n - S: 0018ok refs/heads/debug\n - S: 002ang refs/heads/master non-fast-forward\n ----- -*/ diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/report_status.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/report_status.go deleted file mode 100644 index e2a0a108b24..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/report_status.go +++ /dev/null @@ -1,165 +0,0 @@ -package packp - -import ( - "bytes" - "fmt" - "io" - "strings" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/format/pktline" -) - -const ( - ok = "ok" -) - -// ReportStatus is a report status message, as used in the git-receive-pack -// process whenever the 'report-status' capability is negotiated. -type ReportStatus struct { - UnpackStatus string - CommandStatuses []*CommandStatus -} - -// NewReportStatus creates a new ReportStatus message. -func NewReportStatus() *ReportStatus { - return &ReportStatus{} -} - -// Error returns the first error if any. -func (s *ReportStatus) Error() error { - if s.UnpackStatus != ok { - return fmt.Errorf("unpack error: %s", s.UnpackStatus) - } - - for _, s := range s.CommandStatuses { - if err := s.Error(); err != nil { - return err - } - } - - return nil -} - -// Encode writes the report status to a writer. -func (s *ReportStatus) Encode(w io.Writer) error { - e := pktline.NewEncoder(w) - if err := e.Encodef("unpack %s\n", s.UnpackStatus); err != nil { - return err - } - - for _, cs := range s.CommandStatuses { - if err := cs.encode(w); err != nil { - return err - } - } - - return e.Flush() -} - -// Decode reads from the given reader and decodes a report-status message. It -// does not read more input than what is needed to fill the report status. -func (s *ReportStatus) Decode(r io.Reader) error { - scan := pktline.NewScanner(r) - if err := s.scanFirstLine(scan); err != nil { - return err - } - - if err := s.decodeReportStatus(scan.Bytes()); err != nil { - return err - } - - flushed := false - for scan.Scan() { - b := scan.Bytes() - if isFlush(b) { - flushed = true - break - } - - if err := s.decodeCommandStatus(b); err != nil { - return err - } - } - - if !flushed { - return fmt.Errorf("missing flush") - } - - return scan.Err() -} - -func (s *ReportStatus) scanFirstLine(scan *pktline.Scanner) error { - if scan.Scan() { - return nil - } - - if scan.Err() != nil { - return scan.Err() - } - - return io.ErrUnexpectedEOF -} - -func (s *ReportStatus) decodeReportStatus(b []byte) error { - if isFlush(b) { - return fmt.Errorf("premature flush") - } - - b = bytes.TrimSuffix(b, eol) - - line := string(b) - fields := strings.SplitN(line, " ", 2) - if len(fields) != 2 || fields[0] != "unpack" { - return fmt.Errorf("malformed unpack status: %s", line) - } - - s.UnpackStatus = fields[1] - return nil -} - -func (s *ReportStatus) decodeCommandStatus(b []byte) error { - b = bytes.TrimSuffix(b, eol) - - line := string(b) - fields := strings.SplitN(line, " ", 3) - status := ok - if len(fields) == 3 && fields[0] == "ng" { - status = fields[2] - } else if len(fields) != 2 || fields[0] != "ok" { - return fmt.Errorf("malformed command status: %s", line) - } - - cs := &CommandStatus{ - ReferenceName: plumbing.ReferenceName(fields[1]), - Status: status, - } - s.CommandStatuses = append(s.CommandStatuses, cs) - return nil -} - -// CommandStatus is the status of a reference in a report status. -// See ReportStatus struct. -type CommandStatus struct { - ReferenceName plumbing.ReferenceName - Status string -} - -// Error returns the error, if any. -func (s *CommandStatus) Error() error { - if s.Status == ok { - return nil - } - - return fmt.Errorf("command error on %s: %s", - s.ReferenceName.String(), s.Status) -} - -func (s *CommandStatus) encode(w io.Writer) error { - e := pktline.NewEncoder(w) - if s.Error() == nil { - return e.Encodef("ok %s\n", s.ReferenceName.String()) - } - - return e.Encodef("ng %s %s\n", s.ReferenceName.String(), s.Status) -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/shallowupd.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/shallowupd.go deleted file mode 100644 index fe4fe688795..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/shallowupd.go +++ /dev/null @@ -1,92 +0,0 @@ -package packp - -import ( - "bytes" - "fmt" - "io" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/format/pktline" -) - -const ( - shallowLineLen = 48 - unshallowLineLen = 50 -) - -type ShallowUpdate struct { - Shallows []plumbing.Hash - Unshallows []plumbing.Hash -} - -func (r *ShallowUpdate) Decode(reader io.Reader) error { - s := pktline.NewScanner(reader) - - for s.Scan() { - line := s.Bytes() - line = bytes.TrimSpace(line) - - var err error - switch { - case bytes.HasPrefix(line, shallow): - err = r.decodeShallowLine(line) - case bytes.HasPrefix(line, unshallow): - err = r.decodeUnshallowLine(line) - case bytes.Equal(line, pktline.Flush): - return nil - } - - if err != nil { - return err - } - } - - return s.Err() -} - -func (r *ShallowUpdate) decodeShallowLine(line []byte) error { - hash, err := r.decodeLine(line, shallow, shallowLineLen) - if err != nil { - return err - } - - r.Shallows = append(r.Shallows, hash) - return nil -} - -func (r *ShallowUpdate) decodeUnshallowLine(line []byte) error { - hash, err := r.decodeLine(line, unshallow, unshallowLineLen) - if err != nil { - return err - } - - r.Unshallows = append(r.Unshallows, hash) - return nil -} - -func (r *ShallowUpdate) decodeLine(line, prefix []byte, expLen int) (plumbing.Hash, error) { - if len(line) != expLen { - return plumbing.ZeroHash, fmt.Errorf("malformed %s%q", prefix, line) - } - - raw := string(line[expLen-40 : expLen]) - return plumbing.NewHash(raw), nil -} - -func (r *ShallowUpdate) Encode(w io.Writer) error { - e := pktline.NewEncoder(w) - - for _, h := range r.Shallows { - if err := e.Encodef("%s%s\n", shallow, h.String()); err != nil { - return err - } - } - - for _, h := range r.Unshallows { - if err := e.Encodef("%s%s\n", unshallow, h.String()); err != nil { - return err - } - } - - return e.Flush() -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/common.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/common.go deleted file mode 100644 index de5001281fd..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/common.go +++ /dev/null @@ -1,33 +0,0 @@ -package sideband - -// Type sideband type "side-band" or "side-band-64k" -type Type int8 - -const ( - // Sideband legacy sideband type up to 1000-byte messages - Sideband Type = iota - // Sideband64k sideband type up to 65519-byte messages - Sideband64k Type = iota - - // MaxPackedSize for Sideband type - MaxPackedSize = 1000 - // MaxPackedSize64k for Sideband64k type - MaxPackedSize64k = 65520 -) - -// Channel sideband channel -type Channel byte - -// WithPayload encode the payload as a message -func (ch Channel) WithPayload(payload []byte) []byte { - return append([]byte{byte(ch)}, payload...) -} - -const ( - // PackData packfile content - PackData Channel = 1 - // ProgressMessage progress messages - ProgressMessage Channel = 2 - // ErrorMessage fatal error message just before stream aborts - ErrorMessage Channel = 3 -) diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/demux.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/demux.go deleted file mode 100644 index 0116f962ef2..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/demux.go +++ /dev/null @@ -1,148 +0,0 @@ -package sideband - -import ( - "errors" - "fmt" - "io" - - "github.com/go-git/go-git/v5/plumbing/format/pktline" -) - -// ErrMaxPackedExceeded returned by Read, if the maximum packed size is exceeded -var ErrMaxPackedExceeded = errors.New("max. packed size exceeded") - -// Progress where the progress information is stored -type Progress interface { - io.Writer -} - -// Demuxer demultiplexes the progress reports and error info interleaved with the -// packfile itself. -// -// A sideband has three different channels the main one, called PackData, contains -// the packfile data; the ErrorMessage channel, that contains server errors; and -// the last one, ProgressMessage channel, containing information about the ongoing -// task happening in the server (optional, can be suppressed sending NoProgress -// or Quiet capabilities to the server) -// -// In order to demultiplex the data stream, method `Read` should be called to -// retrieve the PackData channel, the incoming data from the ProgressMessage is -// written at `Progress` (if any), if any message is retrieved from the -// ErrorMessage channel an error is returned and we can assume that the -// connection has been closed. -type Demuxer struct { - t Type - r io.Reader - s *pktline.Scanner - - max int - pending []byte - - // Progress is where the progress messages are stored - Progress Progress -} - -// NewDemuxer returns a new Demuxer for the given t and read from r -func NewDemuxer(t Type, r io.Reader) *Demuxer { - max := MaxPackedSize64k - if t == Sideband { - max = MaxPackedSize - } - - return &Demuxer{ - t: t, - r: r, - max: max, - s: pktline.NewScanner(r), - } -} - -// Read reads up to len(p) bytes from the PackData channel into p, an error can -// be return if an error happens when reading or if a message is sent in the -// ErrorMessage channel. -// -// When a ProgressMessage is read, is not copy to b, instead of this is written -// to the Progress -func (d *Demuxer) Read(b []byte) (n int, err error) { - var read, req int - - req = len(b) - for read < req { - n, err := d.doRead(b[read:req]) - read += n - - if err != nil { - return read, err - } - } - - return read, nil -} - -func (d *Demuxer) doRead(b []byte) (int, error) { - read, err := d.nextPackData() - size := len(read) - wanted := len(b) - - if size > wanted { - d.pending = read[wanted:] - } - - if wanted > size { - wanted = size - } - - size = copy(b, read[:wanted]) - return size, err -} - -func (d *Demuxer) nextPackData() ([]byte, error) { - content := d.getPending() - if len(content) != 0 { - return content, nil - } - - if !d.s.Scan() { - if err := d.s.Err(); err != nil { - return nil, err - } - - return nil, io.EOF - } - - content = d.s.Bytes() - - size := len(content) - if size == 0 { - return nil, nil - } else if size > d.max { - return nil, ErrMaxPackedExceeded - } - - switch Channel(content[0]) { - case PackData: - return content[1:], nil - case ProgressMessage: - if d.Progress != nil { - _, err := d.Progress.Write(content[1:]) - return nil, err - } - case ErrorMessage: - return nil, fmt.Errorf("unexpected error: %s", content[1:]) - default: - return nil, fmt.Errorf("unknown channel %s", content) - } - - return nil, nil -} - -func (d *Demuxer) getPending() (b []byte) { - if len(d.pending) == 0 { - return nil - } - - content := d.pending - d.pending = nil - - return content -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/doc.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/doc.go deleted file mode 100644 index c5d24295291..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/doc.go +++ /dev/null @@ -1,31 +0,0 @@ -// Package sideband implements a sideband mutiplex/demultiplexer -package sideband - -// If 'side-band' or 'side-band-64k' capabilities have been specified by -// the client, the server will send the packfile data multiplexed. -// -// Either mode indicates that the packfile data will be streamed broken -// up into packets of up to either 1000 bytes in the case of 'side_band', -// or 65520 bytes in the case of 'side_band_64k'. Each packet is made up -// of a leading 4-byte pkt-line length of how much data is in the packet, -// followed by a 1-byte stream code, followed by the actual data. -// -// The stream code can be one of: -// -// 1 - pack data -// 2 - progress messages -// 3 - fatal error message just before stream aborts -// -// The "side-band-64k" capability came about as a way for newer clients -// that can handle much larger packets to request packets that are -// actually crammed nearly full, while maintaining backward compatibility -// for the older clients. -// -// Further, with side-band and its up to 1000-byte messages, it's actually -// 999 bytes of payload and 1 byte for the stream code. With side-band-64k, -// same deal, you have up to 65519 bytes of data and 1 byte for the stream -// code. -// -// The client MUST send only maximum of one of "side-band" and "side- -// band-64k". Server MUST diagnose it as an error if client requests -// both. diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/muxer.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/muxer.go deleted file mode 100644 index d51ac826952..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/muxer.go +++ /dev/null @@ -1,65 +0,0 @@ -package sideband - -import ( - "io" - - "github.com/go-git/go-git/v5/plumbing/format/pktline" -) - -// Muxer multiplex the packfile along with the progress messages and the error -// information. The multiplex is perform using pktline format. -type Muxer struct { - max int - e *pktline.Encoder -} - -const chLen = 1 - -// NewMuxer returns a new Muxer for the given t that writes on w. -// -// If t is equal to `Sideband` the max pack size is set to MaxPackedSize, in any -// other value is given, max pack is set to MaxPackedSize64k, that is the -// maximum length of a line in pktline format. -func NewMuxer(t Type, w io.Writer) *Muxer { - max := MaxPackedSize64k - if t == Sideband { - max = MaxPackedSize - } - - return &Muxer{ - max: max - chLen, - e: pktline.NewEncoder(w), - } -} - -// Write writes p in the PackData channel -func (m *Muxer) Write(p []byte) (int, error) { - return m.WriteChannel(PackData, p) -} - -// WriteChannel writes p in the given channel. This method can be used with any -// channel, but is recommend use it only for the ProgressMessage and -// ErrorMessage channels and use Write for the PackData channel -func (m *Muxer) WriteChannel(t Channel, p []byte) (int, error) { - wrote := 0 - size := len(p) - for wrote < size { - n, err := m.doWrite(t, p[wrote:]) - wrote += n - - if err != nil { - return wrote, err - } - } - - return wrote, nil -} - -func (m *Muxer) doWrite(ch Channel, p []byte) (int, error) { - sz := len(p) - if sz > m.max { - sz = m.max - } - - return sz, m.e.Encode(ch.WithPayload(p[:sz])) -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/srvresp.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/srvresp.go deleted file mode 100644 index b3a7ee804c5..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/srvresp.go +++ /dev/null @@ -1,127 +0,0 @@ -package packp - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "io" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/format/pktline" -) - -const ackLineLen = 44 - -// ServerResponse object acknowledgement from upload-pack service -type ServerResponse struct { - ACKs []plumbing.Hash -} - -// Decode decodes the response into the struct, isMultiACK should be true, if -// the request was done with multi_ack or multi_ack_detailed capabilities. -func (r *ServerResponse) Decode(reader *bufio.Reader, isMultiACK bool) error { - // TODO: implement support for multi_ack or multi_ack_detailed responses - if isMultiACK { - return errors.New("multi_ack and multi_ack_detailed are not supported") - } - - s := pktline.NewScanner(reader) - - for s.Scan() { - line := s.Bytes() - - if err := r.decodeLine(line); err != nil { - return err - } - - // we need to detect when the end of a response header and the beginning - // of a packfile header happened, some requests to the git daemon - // produces a duplicate ACK header even when multi_ack is not supported. - stop, err := r.stopReading(reader) - if err != nil { - return err - } - - if stop { - break - } - } - - return s.Err() -} - -// stopReading detects when a valid command such as ACK or NAK is found to be -// read in the buffer without moving the read pointer. -func (r *ServerResponse) stopReading(reader *bufio.Reader) (bool, error) { - ahead, err := reader.Peek(7) - if err == io.EOF { - return true, nil - } - - if err != nil { - return false, err - } - - if len(ahead) > 4 && r.isValidCommand(ahead[0:3]) { - return false, nil - } - - if len(ahead) == 7 && r.isValidCommand(ahead[4:]) { - return false, nil - } - - return true, nil -} - -func (r *ServerResponse) isValidCommand(b []byte) bool { - commands := [][]byte{ack, nak} - for _, c := range commands { - if bytes.Equal(b, c) { - return true - } - } - - return false -} - -func (r *ServerResponse) decodeLine(line []byte) error { - if len(line) == 0 { - return fmt.Errorf("unexpected flush") - } - - if bytes.Equal(line[0:3], ack) { - return r.decodeACKLine(line) - } - - if bytes.Equal(line[0:3], nak) { - return nil - } - - return fmt.Errorf("unexpected content %q", string(line)) -} - -func (r *ServerResponse) decodeACKLine(line []byte) error { - if len(line) < ackLineLen { - return fmt.Errorf("malformed ACK %q", line) - } - - sp := bytes.Index(line, []byte(" ")) - h := plumbing.NewHash(string(line[sp+1 : sp+41])) - r.ACKs = append(r.ACKs, h) - return nil -} - -// Encode encodes the ServerResponse into a writer. -func (r *ServerResponse) Encode(w io.Writer) error { - if len(r.ACKs) > 1 { - return errors.New("multi_ack and multi_ack_detailed are not supported") - } - - e := pktline.NewEncoder(w) - if len(r.ACKs) == 0 { - return e.Encodef("%s\n", nak) - } - - return e.Encodef("%s %s\n", ack, r.ACKs[0].String()) -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq.go deleted file mode 100644 index ddec06e9965..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq.go +++ /dev/null @@ -1,168 +0,0 @@ -package packp - -import ( - "fmt" - "time" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" -) - -// UploadRequest values represent the information transmitted on a -// upload-request message. Values from this type are not zero-value -// safe, use the New function instead. -// This is a low level type, use UploadPackRequest instead. -type UploadRequest struct { - Capabilities *capability.List - Wants []plumbing.Hash - Shallows []plumbing.Hash - Depth Depth -} - -// Depth values stores the desired depth of the requested packfile: see -// DepthCommit, DepthSince and DepthReference. -type Depth interface { - isDepth() - IsZero() bool -} - -// DepthCommits values stores the maximum number of requested commits in -// the packfile. Zero means infinite. A negative value will have -// undefined consequences. -type DepthCommits int - -func (d DepthCommits) isDepth() {} - -func (d DepthCommits) IsZero() bool { - return d == 0 -} - -// DepthSince values requests only commits newer than the specified time. -type DepthSince time.Time - -func (d DepthSince) isDepth() {} - -func (d DepthSince) IsZero() bool { - return time.Time(d).IsZero() -} - -// DepthReference requests only commits not to found in the specified reference. -type DepthReference string - -func (d DepthReference) isDepth() {} - -func (d DepthReference) IsZero() bool { - return string(d) == "" -} - -// NewUploadRequest returns a pointer to a new UploadRequest value, ready to be -// used. It has no capabilities, wants or shallows and an infinite depth. Please -// note that to encode an upload-request it has to have at least one wanted hash. -func NewUploadRequest() *UploadRequest { - return &UploadRequest{ - Capabilities: capability.NewList(), - Wants: []plumbing.Hash{}, - Shallows: []plumbing.Hash{}, - Depth: DepthCommits(0), - } -} - -// NewUploadRequestFromCapabilities returns a pointer to a new UploadRequest -// value, the request capabilities are filled with the most optimal ones, based -// on the adv value (advertised capabilities), the UploadRequest generated it -// has no wants or shallows and an infinite depth. -func NewUploadRequestFromCapabilities(adv *capability.List) *UploadRequest { - r := NewUploadRequest() - - if adv.Supports(capability.MultiACKDetailed) { - r.Capabilities.Set(capability.MultiACKDetailed) - } else if adv.Supports(capability.MultiACK) { - r.Capabilities.Set(capability.MultiACK) - } - - if adv.Supports(capability.Sideband64k) { - r.Capabilities.Set(capability.Sideband64k) - } else if adv.Supports(capability.Sideband) { - r.Capabilities.Set(capability.Sideband) - } - - if adv.Supports(capability.ThinPack) { - r.Capabilities.Set(capability.ThinPack) - } - - if adv.Supports(capability.OFSDelta) { - r.Capabilities.Set(capability.OFSDelta) - } - - if adv.Supports(capability.Agent) { - r.Capabilities.Set(capability.Agent, capability.DefaultAgent) - } - - return r -} - -// Validate validates the content of UploadRequest, following the next rules: -// - Wants MUST have at least one reference -// - capability.Shallow MUST be present if Shallows is not empty -// - is a non-zero DepthCommits is given capability.Shallow MUST be present -// - is a DepthSince is given capability.Shallow MUST be present -// - is a DepthReference is given capability.DeepenNot MUST be present -// - MUST contain only maximum of one of capability.Sideband and capability.Sideband64k -// - MUST contain only maximum of one of capability.MultiACK and capability.MultiACKDetailed -func (req *UploadRequest) Validate() error { - if len(req.Wants) == 0 { - return fmt.Errorf("want can't be empty") - } - - if err := req.validateRequiredCapabilities(); err != nil { - return err - } - - if err := req.validateConflictCapabilities(); err != nil { - return err - } - - return nil -} - -func (req *UploadRequest) validateRequiredCapabilities() error { - msg := "missing capability %s" - - if len(req.Shallows) != 0 && !req.Capabilities.Supports(capability.Shallow) { - return fmt.Errorf(msg, capability.Shallow) - } - - switch req.Depth.(type) { - case DepthCommits: - if req.Depth != DepthCommits(0) { - if !req.Capabilities.Supports(capability.Shallow) { - return fmt.Errorf(msg, capability.Shallow) - } - } - case DepthSince: - if !req.Capabilities.Supports(capability.DeepenSince) { - return fmt.Errorf(msg, capability.DeepenSince) - } - case DepthReference: - if !req.Capabilities.Supports(capability.DeepenNot) { - return fmt.Errorf(msg, capability.DeepenNot) - } - } - - return nil -} - -func (req *UploadRequest) validateConflictCapabilities() error { - msg := "capabilities %s and %s are mutually exclusive" - if req.Capabilities.Supports(capability.Sideband) && - req.Capabilities.Supports(capability.Sideband64k) { - return fmt.Errorf(msg, capability.Sideband, capability.Sideband64k) - } - - if req.Capabilities.Supports(capability.MultiACK) && - req.Capabilities.Supports(capability.MultiACKDetailed) { - return fmt.Errorf(msg, capability.MultiACK, capability.MultiACKDetailed) - } - - return nil -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq_decode.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq_decode.go deleted file mode 100644 index 895a3bf6dd2..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq_decode.go +++ /dev/null @@ -1,257 +0,0 @@ -package packp - -import ( - "bytes" - "encoding/hex" - "fmt" - "io" - "strconv" - "time" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/format/pktline" -) - -// Decode reads the next upload-request form its input and -// stores it in the UploadRequest. -func (req *UploadRequest) Decode(r io.Reader) error { - d := newUlReqDecoder(r) - return d.Decode(req) -} - -type ulReqDecoder struct { - s *pktline.Scanner // a pkt-line scanner from the input stream - line []byte // current pkt-line contents, use parser.nextLine() to make it advance - nLine int // current pkt-line number for debugging, begins at 1 - err error // sticky error, use the parser.error() method to fill this out - data *UploadRequest // parsed data is stored here -} - -func newUlReqDecoder(r io.Reader) *ulReqDecoder { - return &ulReqDecoder{ - s: pktline.NewScanner(r), - } -} - -func (d *ulReqDecoder) Decode(v *UploadRequest) error { - d.data = v - - for state := d.decodeFirstWant; state != nil; { - state = state() - } - - return d.err -} - -// fills out the parser stiky error -func (d *ulReqDecoder) error(format string, a ...interface{}) { - msg := fmt.Sprintf( - "pkt-line %d: %s", d.nLine, - fmt.Sprintf(format, a...), - ) - - d.err = NewErrUnexpectedData(msg, d.line) -} - -// Reads a new pkt-line from the scanner, makes its payload available as -// p.line and increments p.nLine. A successful invocation returns true, -// otherwise, false is returned and the sticky error is filled out -// accordingly. Trims eols at the end of the payloads. -func (d *ulReqDecoder) nextLine() bool { - d.nLine++ - - if !d.s.Scan() { - if d.err = d.s.Err(); d.err != nil { - return false - } - - d.error("EOF") - return false - } - - d.line = d.s.Bytes() - d.line = bytes.TrimSuffix(d.line, eol) - - return true -} - -// Expected format: want [ capabilities] -func (d *ulReqDecoder) decodeFirstWant() stateFn { - if ok := d.nextLine(); !ok { - return nil - } - - if !bytes.HasPrefix(d.line, want) { - d.error("missing 'want ' prefix") - return nil - } - d.line = bytes.TrimPrefix(d.line, want) - - hash, ok := d.readHash() - if !ok { - return nil - } - d.data.Wants = append(d.data.Wants, hash) - - return d.decodeCaps -} - -func (d *ulReqDecoder) readHash() (plumbing.Hash, bool) { - if len(d.line) < hashSize { - d.err = fmt.Errorf("malformed hash: %v", d.line) - return plumbing.ZeroHash, false - } - - var hash plumbing.Hash - if _, err := hex.Decode(hash[:], d.line[:hashSize]); err != nil { - d.error("invalid hash text: %s", err) - return plumbing.ZeroHash, false - } - d.line = d.line[hashSize:] - - return hash, true -} - -// Expected format: sp cap1 sp cap2 sp cap3... -func (d *ulReqDecoder) decodeCaps() stateFn { - d.line = bytes.TrimPrefix(d.line, sp) - if err := d.data.Capabilities.Decode(d.line); err != nil { - d.error("invalid capabilities: %s", err) - } - - return d.decodeOtherWants -} - -// Expected format: want -func (d *ulReqDecoder) decodeOtherWants() stateFn { - if ok := d.nextLine(); !ok { - return nil - } - - if bytes.HasPrefix(d.line, shallow) { - return d.decodeShallow - } - - if bytes.HasPrefix(d.line, deepen) { - return d.decodeDeepen - } - - if len(d.line) == 0 { - return nil - } - - if !bytes.HasPrefix(d.line, want) { - d.error("unexpected payload while expecting a want: %q", d.line) - return nil - } - d.line = bytes.TrimPrefix(d.line, want) - - hash, ok := d.readHash() - if !ok { - return nil - } - d.data.Wants = append(d.data.Wants, hash) - - return d.decodeOtherWants -} - -// Expected format: shallow -func (d *ulReqDecoder) decodeShallow() stateFn { - if bytes.HasPrefix(d.line, deepen) { - return d.decodeDeepen - } - - if len(d.line) == 0 { - return nil - } - - if !bytes.HasPrefix(d.line, shallow) { - d.error("unexpected payload while expecting a shallow: %q", d.line) - return nil - } - d.line = bytes.TrimPrefix(d.line, shallow) - - hash, ok := d.readHash() - if !ok { - return nil - } - d.data.Shallows = append(d.data.Shallows, hash) - - if ok := d.nextLine(); !ok { - return nil - } - - return d.decodeShallow -} - -// Expected format: deepen / deepen-since
    / deepen-not -func (d *ulReqDecoder) decodeDeepen() stateFn { - if bytes.HasPrefix(d.line, deepenCommits) { - return d.decodeDeepenCommits - } - - if bytes.HasPrefix(d.line, deepenSince) { - return d.decodeDeepenSince - } - - if bytes.HasPrefix(d.line, deepenReference) { - return d.decodeDeepenReference - } - - if len(d.line) == 0 { - return nil - } - - d.error("unexpected deepen specification: %q", d.line) - return nil -} - -func (d *ulReqDecoder) decodeDeepenCommits() stateFn { - d.line = bytes.TrimPrefix(d.line, deepenCommits) - - var n int - if n, d.err = strconv.Atoi(string(d.line)); d.err != nil { - return nil - } - if n < 0 { - d.err = fmt.Errorf("negative depth") - return nil - } - d.data.Depth = DepthCommits(n) - - return d.decodeFlush -} - -func (d *ulReqDecoder) decodeDeepenSince() stateFn { - d.line = bytes.TrimPrefix(d.line, deepenSince) - - var secs int64 - secs, d.err = strconv.ParseInt(string(d.line), 10, 64) - if d.err != nil { - return nil - } - t := time.Unix(secs, 0).UTC() - d.data.Depth = DepthSince(t) - - return d.decodeFlush -} - -func (d *ulReqDecoder) decodeDeepenReference() stateFn { - d.line = bytes.TrimPrefix(d.line, deepenReference) - - d.data.Depth = DepthReference(string(d.line)) - - return d.decodeFlush -} - -func (d *ulReqDecoder) decodeFlush() stateFn { - if ok := d.nextLine(); !ok { - return nil - } - - if len(d.line) != 0 { - d.err = fmt.Errorf("unexpected payload while expecting a flush-pkt: %q", d.line) - } - - return nil -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq_encode.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq_encode.go deleted file mode 100644 index c451e231640..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq_encode.go +++ /dev/null @@ -1,145 +0,0 @@ -package packp - -import ( - "bytes" - "fmt" - "io" - "time" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/format/pktline" -) - -// Encode writes the UlReq encoding of u to the stream. -// -// All the payloads will end with a newline character. Wants and -// shallows are sorted alphabetically. A depth of 0 means no depth -// request is sent. -func (req *UploadRequest) Encode(w io.Writer) error { - e := newUlReqEncoder(w) - return e.Encode(req) -} - -type ulReqEncoder struct { - pe *pktline.Encoder // where to write the encoded data - data *UploadRequest // the data to encode - err error // sticky error -} - -func newUlReqEncoder(w io.Writer) *ulReqEncoder { - return &ulReqEncoder{ - pe: pktline.NewEncoder(w), - } -} - -func (e *ulReqEncoder) Encode(v *UploadRequest) error { - e.data = v - - if len(v.Wants) == 0 { - return fmt.Errorf("empty wants provided") - } - - plumbing.HashesSort(e.data.Wants) - for state := e.encodeFirstWant; state != nil; { - state = state() - } - - return e.err -} - -func (e *ulReqEncoder) encodeFirstWant() stateFn { - var err error - if e.data.Capabilities.IsEmpty() { - err = e.pe.Encodef("want %s\n", e.data.Wants[0]) - } else { - err = e.pe.Encodef( - "want %s %s\n", - e.data.Wants[0], - e.data.Capabilities.String(), - ) - } - - if err != nil { - e.err = fmt.Errorf("encoding first want line: %s", err) - return nil - } - - return e.encodeAdditionalWants -} - -func (e *ulReqEncoder) encodeAdditionalWants() stateFn { - last := e.data.Wants[0] - for _, w := range e.data.Wants[1:] { - if bytes.Equal(last[:], w[:]) { - continue - } - - if err := e.pe.Encodef("want %s\n", w); err != nil { - e.err = fmt.Errorf("encoding want %q: %s", w, err) - return nil - } - - last = w - } - - return e.encodeShallows -} - -func (e *ulReqEncoder) encodeShallows() stateFn { - plumbing.HashesSort(e.data.Shallows) - - var last plumbing.Hash - for _, s := range e.data.Shallows { - if bytes.Equal(last[:], s[:]) { - continue - } - - if err := e.pe.Encodef("shallow %s\n", s); err != nil { - e.err = fmt.Errorf("encoding shallow %q: %s", s, err) - return nil - } - - last = s - } - - return e.encodeDepth -} - -func (e *ulReqEncoder) encodeDepth() stateFn { - switch depth := e.data.Depth.(type) { - case DepthCommits: - if depth != 0 { - commits := int(depth) - if err := e.pe.Encodef("deepen %d\n", commits); err != nil { - e.err = fmt.Errorf("encoding depth %d: %s", depth, err) - return nil - } - } - case DepthSince: - when := time.Time(depth).UTC() - if err := e.pe.Encodef("deepen-since %d\n", when.Unix()); err != nil { - e.err = fmt.Errorf("encoding depth %s: %s", when, err) - return nil - } - case DepthReference: - reference := string(depth) - if err := e.pe.Encodef("deepen-not %s\n", reference); err != nil { - e.err = fmt.Errorf("encoding depth %s: %s", reference, err) - return nil - } - default: - e.err = fmt.Errorf("unsupported depth type") - return nil - } - - return e.encodeFlush -} - -func (e *ulReqEncoder) encodeFlush() stateFn { - if err := e.pe.Flush(); err != nil { - e.err = fmt.Errorf("encoding flush-pkt: %s", err) - return nil - } - - return nil -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq.go deleted file mode 100644 index 4d927d8b8e0..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq.go +++ /dev/null @@ -1,122 +0,0 @@ -package packp - -import ( - "errors" - "io" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" - "github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband" -) - -var ( - ErrEmptyCommands = errors.New("commands cannot be empty") - ErrMalformedCommand = errors.New("malformed command") -) - -// ReferenceUpdateRequest values represent reference upload requests. -// Values from this type are not zero-value safe, use the New function instead. -type ReferenceUpdateRequest struct { - Capabilities *capability.List - Commands []*Command - Shallow *plumbing.Hash - // Packfile contains an optional packfile reader. - Packfile io.ReadCloser - - // Progress receives sideband progress messages from the server - Progress sideband.Progress -} - -// New returns a pointer to a new ReferenceUpdateRequest value. -func NewReferenceUpdateRequest() *ReferenceUpdateRequest { - return &ReferenceUpdateRequest{ - // TODO: Add support for push-cert - Capabilities: capability.NewList(), - Commands: nil, - } -} - -// NewReferenceUpdateRequestFromCapabilities returns a pointer to a new -// ReferenceUpdateRequest value, the request capabilities are filled with the -// most optimal ones, based on the adv value (advertised capabilities), the -// ReferenceUpdateRequest contains no commands -// -// It does set the following capabilities: -// - agent -// - report-status -// - ofs-delta -// - ref-delta -// - delete-refs -// It leaves up to the user to add the following capabilities later: -// - atomic -// - ofs-delta -// - side-band -// - side-band-64k -// - quiet -// - push-cert -func NewReferenceUpdateRequestFromCapabilities(adv *capability.List) *ReferenceUpdateRequest { - r := NewReferenceUpdateRequest() - - if adv.Supports(capability.Agent) { - r.Capabilities.Set(capability.Agent, capability.DefaultAgent) - } - - if adv.Supports(capability.ReportStatus) { - r.Capabilities.Set(capability.ReportStatus) - } - - return r -} - -func (req *ReferenceUpdateRequest) validate() error { - if len(req.Commands) == 0 { - return ErrEmptyCommands - } - - for _, c := range req.Commands { - if err := c.validate(); err != nil { - return err - } - } - - return nil -} - -type Action string - -const ( - Create Action = "create" - Update = "update" - Delete = "delete" - Invalid = "invalid" -) - -type Command struct { - Name plumbing.ReferenceName - Old plumbing.Hash - New plumbing.Hash -} - -func (c *Command) Action() Action { - if c.Old == plumbing.ZeroHash && c.New == plumbing.ZeroHash { - return Invalid - } - - if c.Old == plumbing.ZeroHash { - return Create - } - - if c.New == plumbing.ZeroHash { - return Delete - } - - return Update -} - -func (c *Command) validate() error { - if c.Action() == Invalid { - return ErrMalformedCommand - } - - return nil -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq_decode.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq_decode.go deleted file mode 100644 index 2c9843a5661..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq_decode.go +++ /dev/null @@ -1,250 +0,0 @@ -package packp - -import ( - "bytes" - "encoding/hex" - "errors" - "fmt" - "io" - "io/ioutil" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/format/pktline" -) - -var ( - shallowLineLength = len(shallow) + hashSize - minCommandLength = hashSize*2 + 2 + 1 - minCommandAndCapsLength = minCommandLength + 1 -) - -var ( - ErrEmpty = errors.New("empty update-request message") - errNoCommands = errors.New("unexpected EOF before any command") - errMissingCapabilitiesDelimiter = errors.New("capabilities delimiter not found") -) - -func errMalformedRequest(reason string) error { - return fmt.Errorf("malformed request: %s", reason) -} - -func errInvalidHashSize(got int) error { - return fmt.Errorf("invalid hash size: expected %d, got %d", - hashSize, got) -} - -func errInvalidHash(err error) error { - return fmt.Errorf("invalid hash: %s", err.Error()) -} - -func errInvalidShallowLineLength(got int) error { - return errMalformedRequest(fmt.Sprintf( - "invalid shallow line length: expected %d, got %d", - shallowLineLength, got)) -} - -func errInvalidCommandCapabilitiesLineLength(got int) error { - return errMalformedRequest(fmt.Sprintf( - "invalid command and capabilities line length: expected at least %d, got %d", - minCommandAndCapsLength, got)) -} - -func errInvalidCommandLineLength(got int) error { - return errMalformedRequest(fmt.Sprintf( - "invalid command line length: expected at least %d, got %d", - minCommandLength, got)) -} - -func errInvalidShallowObjId(err error) error { - return errMalformedRequest( - fmt.Sprintf("invalid shallow object id: %s", err.Error())) -} - -func errInvalidOldObjId(err error) error { - return errMalformedRequest( - fmt.Sprintf("invalid old object id: %s", err.Error())) -} - -func errInvalidNewObjId(err error) error { - return errMalformedRequest( - fmt.Sprintf("invalid new object id: %s", err.Error())) -} - -func errMalformedCommand(err error) error { - return errMalformedRequest(fmt.Sprintf( - "malformed command: %s", err.Error())) -} - -// Decode reads the next update-request message form the reader and wr -func (req *ReferenceUpdateRequest) Decode(r io.Reader) error { - var rc io.ReadCloser - var ok bool - rc, ok = r.(io.ReadCloser) - if !ok { - rc = ioutil.NopCloser(r) - } - - d := &updReqDecoder{r: rc, s: pktline.NewScanner(r)} - return d.Decode(req) -} - -type updReqDecoder struct { - r io.ReadCloser - s *pktline.Scanner - req *ReferenceUpdateRequest -} - -func (d *updReqDecoder) Decode(req *ReferenceUpdateRequest) error { - d.req = req - funcs := []func() error{ - d.scanLine, - d.decodeShallow, - d.decodeCommandAndCapabilities, - d.decodeCommands, - d.setPackfile, - req.validate, - } - - for _, f := range funcs { - if err := f(); err != nil { - return err - } - } - - return nil -} - -func (d *updReqDecoder) scanLine() error { - if ok := d.s.Scan(); !ok { - return d.scanErrorOr(ErrEmpty) - } - - return nil -} - -func (d *updReqDecoder) decodeShallow() error { - b := d.s.Bytes() - - if !bytes.HasPrefix(b, shallowNoSp) { - return nil - } - - if len(b) != shallowLineLength { - return errInvalidShallowLineLength(len(b)) - } - - h, err := parseHash(string(b[len(shallow):])) - if err != nil { - return errInvalidShallowObjId(err) - } - - if ok := d.s.Scan(); !ok { - return d.scanErrorOr(errNoCommands) - } - - d.req.Shallow = &h - - return nil -} - -func (d *updReqDecoder) decodeCommands() error { - for { - b := d.s.Bytes() - if bytes.Equal(b, pktline.Flush) { - return nil - } - - c, err := parseCommand(b) - if err != nil { - return err - } - - d.req.Commands = append(d.req.Commands, c) - - if ok := d.s.Scan(); !ok { - return d.s.Err() - } - } -} - -func (d *updReqDecoder) decodeCommandAndCapabilities() error { - b := d.s.Bytes() - i := bytes.IndexByte(b, 0) - if i == -1 { - return errMissingCapabilitiesDelimiter - } - - if len(b) < minCommandAndCapsLength { - return errInvalidCommandCapabilitiesLineLength(len(b)) - } - - cmd, err := parseCommand(b[:i]) - if err != nil { - return err - } - - d.req.Commands = append(d.req.Commands, cmd) - - if err := d.req.Capabilities.Decode(b[i+1:]); err != nil { - return err - } - - if err := d.scanLine(); err != nil { - return err - } - - return nil -} - -func (d *updReqDecoder) setPackfile() error { - d.req.Packfile = d.r - - return nil -} - -func parseCommand(b []byte) (*Command, error) { - if len(b) < minCommandLength { - return nil, errInvalidCommandLineLength(len(b)) - } - - var ( - os, ns string - n plumbing.ReferenceName - ) - if _, err := fmt.Sscanf(string(b), "%s %s %s", &os, &ns, &n); err != nil { - return nil, errMalformedCommand(err) - } - - oh, err := parseHash(os) - if err != nil { - return nil, errInvalidOldObjId(err) - } - - nh, err := parseHash(ns) - if err != nil { - return nil, errInvalidNewObjId(err) - } - - return &Command{Old: oh, New: nh, Name: n}, nil -} - -func parseHash(s string) (plumbing.Hash, error) { - if len(s) != hashSize { - return plumbing.ZeroHash, errInvalidHashSize(len(s)) - } - - if _, err := hex.DecodeString(s); err != nil { - return plumbing.ZeroHash, errInvalidHash(err) - } - - h := plumbing.NewHash(s) - return h, nil -} - -func (d *updReqDecoder) scanErrorOr(origErr error) error { - if err := d.s.Err(); err != nil { - return err - } - - return origErr -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq_encode.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq_encode.go deleted file mode 100644 index 2545e935e61..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq_encode.go +++ /dev/null @@ -1,75 +0,0 @@ -package packp - -import ( - "fmt" - "io" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/format/pktline" - "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" -) - -var ( - zeroHashString = plumbing.ZeroHash.String() -) - -// Encode writes the ReferenceUpdateRequest encoding to the stream. -func (req *ReferenceUpdateRequest) Encode(w io.Writer) error { - if err := req.validate(); err != nil { - return err - } - - e := pktline.NewEncoder(w) - - if err := req.encodeShallow(e, req.Shallow); err != nil { - return err - } - - if err := req.encodeCommands(e, req.Commands, req.Capabilities); err != nil { - return err - } - - if req.Packfile != nil { - if _, err := io.Copy(w, req.Packfile); err != nil { - return err - } - - return req.Packfile.Close() - } - - return nil -} - -func (req *ReferenceUpdateRequest) encodeShallow(e *pktline.Encoder, - h *plumbing.Hash) error { - - if h == nil { - return nil - } - - objId := []byte(h.String()) - return e.Encodef("%s%s", shallow, objId) -} - -func (req *ReferenceUpdateRequest) encodeCommands(e *pktline.Encoder, - cmds []*Command, cap *capability.List) error { - - if err := e.Encodef("%s\x00%s", - formatCommand(cmds[0]), cap.String()); err != nil { - return err - } - - for _, cmd := range cmds[1:] { - if err := e.Encodef(formatCommand(cmd)); err != nil { - return err - } - } - - return e.Flush() -} - -func formatCommand(cmd *Command) string { - o := cmd.Old.String() - n := cmd.New.String() - return fmt.Sprintf("%s %s %s", o, n, cmd.Name) -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/uppackreq.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/uppackreq.go deleted file mode 100644 index de2206b3fc4..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/uppackreq.go +++ /dev/null @@ -1,98 +0,0 @@ -package packp - -import ( - "bytes" - "fmt" - "io" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/format/pktline" - "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" -) - -// UploadPackRequest represents a upload-pack request. -// Zero-value is not safe, use NewUploadPackRequest instead. -type UploadPackRequest struct { - UploadRequest - UploadHaves -} - -// NewUploadPackRequest creates a new UploadPackRequest and returns a pointer. -func NewUploadPackRequest() *UploadPackRequest { - ur := NewUploadRequest() - return &UploadPackRequest{ - UploadHaves: UploadHaves{}, - UploadRequest: *ur, - } -} - -// NewUploadPackRequestFromCapabilities creates a new UploadPackRequest and -// returns a pointer. The request capabilities are filled with the most optimal -// ones, based on the adv value (advertised capabilities), the UploadPackRequest -// it has no wants, haves or shallows and an infinite depth -func NewUploadPackRequestFromCapabilities(adv *capability.List) *UploadPackRequest { - ur := NewUploadRequestFromCapabilities(adv) - return &UploadPackRequest{ - UploadHaves: UploadHaves{}, - UploadRequest: *ur, - } -} - -// IsEmpty a request if empty if Haves are contained in the Wants, or if Wants -// length is zero -func (r *UploadPackRequest) IsEmpty() bool { - return isSubset(r.Wants, r.Haves) -} - -func isSubset(needle []plumbing.Hash, haystack []plumbing.Hash) bool { - for _, h := range needle { - found := false - for _, oh := range haystack { - if h == oh { - found = true - break - } - } - - if !found { - return false - } - } - - return true -} - -// UploadHaves is a message to signal the references that a client has in a -// upload-pack. Do not use this directly. Use UploadPackRequest request instead. -type UploadHaves struct { - Haves []plumbing.Hash -} - -// Encode encodes the UploadHaves into the Writer. If flush is true, a flush -// command will be encoded at the end of the writer content. -func (u *UploadHaves) Encode(w io.Writer, flush bool) error { - e := pktline.NewEncoder(w) - - plumbing.HashesSort(u.Haves) - - var last plumbing.Hash - for _, have := range u.Haves { - if bytes.Equal(last[:], have[:]) { - continue - } - - if err := e.Encodef("have %s\n", have); err != nil { - return fmt.Errorf("sending haves for %q: %s", have, err) - } - - last = have - } - - if flush && len(u.Haves) != 0 { - if err := e.Flush(); err != nil { - return fmt.Errorf("sending flush-pkt after haves: %s", err) - } - } - - return nil -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/uppackresp.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/uppackresp.go deleted file mode 100644 index a9a7192ea56..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/uppackresp.go +++ /dev/null @@ -1,109 +0,0 @@ -package packp - -import ( - "errors" - "io" - - "bufio" - - "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -// ErrUploadPackResponseNotDecoded is returned if Read is called without -// decoding first -var ErrUploadPackResponseNotDecoded = errors.New("upload-pack-response should be decoded") - -// UploadPackResponse contains all the information responded by the upload-pack -// service, the response implements io.ReadCloser that allows to read the -// packfile directly from it. -type UploadPackResponse struct { - ShallowUpdate - ServerResponse - - r io.ReadCloser - isShallow bool - isMultiACK bool - isOk bool -} - -// NewUploadPackResponse create a new UploadPackResponse instance, the request -// being responded by the response is required. -func NewUploadPackResponse(req *UploadPackRequest) *UploadPackResponse { - isShallow := !req.Depth.IsZero() - isMultiACK := req.Capabilities.Supports(capability.MultiACK) || - req.Capabilities.Supports(capability.MultiACKDetailed) - - return &UploadPackResponse{ - isShallow: isShallow, - isMultiACK: isMultiACK, - } -} - -// NewUploadPackResponseWithPackfile creates a new UploadPackResponse instance, -// and sets its packfile reader. -func NewUploadPackResponseWithPackfile(req *UploadPackRequest, - pf io.ReadCloser) *UploadPackResponse { - - r := NewUploadPackResponse(req) - r.r = pf - return r -} - -// Decode decodes all the responses sent by upload-pack service into the struct -// and prepares it to read the packfile using the Read method -func (r *UploadPackResponse) Decode(reader io.ReadCloser) error { - buf := bufio.NewReader(reader) - - if r.isShallow { - if err := r.ShallowUpdate.Decode(buf); err != nil { - return err - } - } - - if err := r.ServerResponse.Decode(buf, r.isMultiACK); err != nil { - return err - } - - // now the reader is ready to read the packfile content - r.r = ioutil.NewReadCloser(buf, reader) - - return nil -} - -// Encode encodes an UploadPackResponse. -func (r *UploadPackResponse) Encode(w io.Writer) (err error) { - if r.isShallow { - if err := r.ShallowUpdate.Encode(w); err != nil { - return err - } - } - - if err := r.ServerResponse.Encode(w); err != nil { - return err - } - - defer ioutil.CheckClose(r.r, &err) - _, err = io.Copy(w, r.r) - return err -} - -// Read reads the packfile data, if the request was done with any Sideband -// capability the content read should be demultiplexed. If the methods wasn't -// called before the ErrUploadPackResponseNotDecoded will be return -func (r *UploadPackResponse) Read(p []byte) (int, error) { - if r.r == nil { - return 0, ErrUploadPackResponseNotDecoded - } - - return r.r.Read(p) -} - -// Close the underlying reader, if any -func (r *UploadPackResponse) Close() error { - if r.r == nil { - return nil - } - - return r.r.Close() -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/reference.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/reference.go deleted file mode 100644 index 08e908f1f37..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/reference.go +++ /dev/null @@ -1,209 +0,0 @@ -package plumbing - -import ( - "errors" - "fmt" - "strings" -) - -const ( - refPrefix = "refs/" - refHeadPrefix = refPrefix + "heads/" - refTagPrefix = refPrefix + "tags/" - refRemotePrefix = refPrefix + "remotes/" - refNotePrefix = refPrefix + "notes/" - symrefPrefix = "ref: " -) - -// RefRevParseRules are a set of rules to parse references into short names. -// These are the same rules as used by git in shorten_unambiguous_ref. -// See: https://github.com/git/git/blob/e0aaa1b6532cfce93d87af9bc813fb2e7a7ce9d7/refs.c#L417 -var RefRevParseRules = []string{ - "refs/%s", - "refs/tags/%s", - "refs/heads/%s", - "refs/remotes/%s", - "refs/remotes/%s/HEAD", -} - -var ( - ErrReferenceNotFound = errors.New("reference not found") -) - -// ReferenceType reference type's -type ReferenceType int8 - -const ( - InvalidReference ReferenceType = 0 - HashReference ReferenceType = 1 - SymbolicReference ReferenceType = 2 -) - -func (r ReferenceType) String() string { - switch r { - case InvalidReference: - return "invalid-reference" - case HashReference: - return "hash-reference" - case SymbolicReference: - return "symbolic-reference" - } - - return "" -} - -// ReferenceName reference name's -type ReferenceName string - -// NewBranchReferenceName returns a reference name describing a branch based on -// his short name. -func NewBranchReferenceName(name string) ReferenceName { - return ReferenceName(refHeadPrefix + name) -} - -// NewNoteReferenceName returns a reference name describing a note based on his -// short name. -func NewNoteReferenceName(name string) ReferenceName { - return ReferenceName(refNotePrefix + name) -} - -// NewRemoteReferenceName returns a reference name describing a remote branch -// based on his short name and the remote name. -func NewRemoteReferenceName(remote, name string) ReferenceName { - return ReferenceName(refRemotePrefix + fmt.Sprintf("%s/%s", remote, name)) -} - -// NewRemoteHEADReferenceName returns a reference name describing a the HEAD -// branch of a remote. -func NewRemoteHEADReferenceName(remote string) ReferenceName { - return ReferenceName(refRemotePrefix + fmt.Sprintf("%s/%s", remote, HEAD)) -} - -// NewTagReferenceName returns a reference name describing a tag based on short -// his name. -func NewTagReferenceName(name string) ReferenceName { - return ReferenceName(refTagPrefix + name) -} - -// IsBranch check if a reference is a branch -func (r ReferenceName) IsBranch() bool { - return strings.HasPrefix(string(r), refHeadPrefix) -} - -// IsNote check if a reference is a note -func (r ReferenceName) IsNote() bool { - return strings.HasPrefix(string(r), refNotePrefix) -} - -// IsRemote check if a reference is a remote -func (r ReferenceName) IsRemote() bool { - return strings.HasPrefix(string(r), refRemotePrefix) -} - -// IsTag check if a reference is a tag -func (r ReferenceName) IsTag() bool { - return strings.HasPrefix(string(r), refTagPrefix) -} - -func (r ReferenceName) String() string { - return string(r) -} - -// Short returns the short name of a ReferenceName -func (r ReferenceName) Short() string { - s := string(r) - res := s - for _, format := range RefRevParseRules { - _, err := fmt.Sscanf(s, format, &res) - if err == nil { - continue - } - } - - return res -} - -const ( - HEAD ReferenceName = "HEAD" - Master ReferenceName = "refs/heads/master" -) - -// Reference is a representation of git reference -type Reference struct { - t ReferenceType - n ReferenceName - h Hash - target ReferenceName -} - -// NewReferenceFromStrings creates a reference from name and target as string, -// the resulting reference can be a SymbolicReference or a HashReference base -// on the target provided -func NewReferenceFromStrings(name, target string) *Reference { - n := ReferenceName(name) - - if strings.HasPrefix(target, symrefPrefix) { - target := ReferenceName(target[len(symrefPrefix):]) - return NewSymbolicReference(n, target) - } - - return NewHashReference(n, NewHash(target)) -} - -// NewSymbolicReference creates a new SymbolicReference reference -func NewSymbolicReference(n, target ReferenceName) *Reference { - return &Reference{ - t: SymbolicReference, - n: n, - target: target, - } -} - -// NewHashReference creates a new HashReference reference -func NewHashReference(n ReferenceName, h Hash) *Reference { - return &Reference{ - t: HashReference, - n: n, - h: h, - } -} - -// Type return the type of a reference -func (r *Reference) Type() ReferenceType { - return r.t -} - -// Name return the name of a reference -func (r *Reference) Name() ReferenceName { - return r.n -} - -// Hash return the hash of a hash reference -func (r *Reference) Hash() Hash { - return r.h -} - -// Target return the target of a symbolic reference -func (r *Reference) Target() ReferenceName { - return r.target -} - -// Strings dump a reference as a [2]string -func (r *Reference) Strings() [2]string { - var o [2]string - o[0] = r.Name().String() - - switch r.Type() { - case HashReference: - o[1] = r.Hash().String() - case SymbolicReference: - o[1] = symrefPrefix + r.Target().String() - } - - return o -} - -func (r *Reference) String() string { - s := r.Strings() - return fmt.Sprintf("%s %s", s[1], s[0]) -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/revision.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/revision.go deleted file mode 100644 index 5f053b200c0..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/revision.go +++ /dev/null @@ -1,11 +0,0 @@ -package plumbing - -// Revision represents a git revision -// to get more details about git revisions -// please check git manual page : -// https://www.kernel.org/pub/software/scm/git/docs/gitrevisions.html -type Revision string - -func (r Revision) String() string { - return string(r) -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/revlist/revlist.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/revlist/revlist.go deleted file mode 100644 index b9109870f00..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/revlist/revlist.go +++ /dev/null @@ -1,230 +0,0 @@ -// Package revlist provides support to access the ancestors of commits, in a -// similar way as the git-rev-list command. -package revlist - -import ( - "fmt" - "io" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/filemode" - "github.com/go-git/go-git/v5/plumbing/object" - "github.com/go-git/go-git/v5/plumbing/storer" -) - -// Objects applies a complementary set. It gets all the hashes from all -// the reachable objects from the given objects. Ignore param are object hashes -// that we want to ignore on the result. All that objects must be accessible -// from the object storer. -func Objects( - s storer.EncodedObjectStorer, - objs, - ignore []plumbing.Hash, -) ([]plumbing.Hash, error) { - return ObjectsWithStorageForIgnores(s, s, objs, ignore) -} - -// ObjectsWithStorageForIgnores is the same as Objects, but a -// secondary storage layer can be provided, to be used to finding the -// full set of objects to be ignored while finding the reachable -// objects. This is useful when the main `s` storage layer is slow -// and/or remote, while the ignore list is available somewhere local. -func ObjectsWithStorageForIgnores( - s, ignoreStore storer.EncodedObjectStorer, - objs, - ignore []plumbing.Hash, -) ([]plumbing.Hash, error) { - ignore, err := objects(ignoreStore, ignore, nil, true) - if err != nil { - return nil, err - } - - return objects(s, objs, ignore, false) -} - -func objects( - s storer.EncodedObjectStorer, - objects, - ignore []plumbing.Hash, - allowMissingObjects bool, -) ([]plumbing.Hash, error) { - seen := hashListToSet(ignore) - result := make(map[plumbing.Hash]bool) - visited := make(map[plumbing.Hash]bool) - - walkerFunc := func(h plumbing.Hash) { - if !seen[h] { - result[h] = true - seen[h] = true - } - } - - for _, h := range objects { - if err := processObject(s, h, seen, visited, ignore, walkerFunc); err != nil { - if allowMissingObjects && err == plumbing.ErrObjectNotFound { - continue - } - - return nil, err - } - } - - return hashSetToList(result), nil -} - -// processObject obtains the object using the hash an process it depending of its type -func processObject( - s storer.EncodedObjectStorer, - h plumbing.Hash, - seen map[plumbing.Hash]bool, - visited map[plumbing.Hash]bool, - ignore []plumbing.Hash, - walkerFunc func(h plumbing.Hash), -) error { - if seen[h] { - return nil - } - - o, err := s.EncodedObject(plumbing.AnyObject, h) - if err != nil { - return err - } - - do, err := object.DecodeObject(s, o) - if err != nil { - return err - } - - switch do := do.(type) { - case *object.Commit: - return reachableObjects(do, seen, visited, ignore, walkerFunc) - case *object.Tree: - return iterateCommitTrees(seen, do, walkerFunc) - case *object.Tag: - walkerFunc(do.Hash) - return processObject(s, do.Target, seen, visited, ignore, walkerFunc) - case *object.Blob: - walkerFunc(do.Hash) - default: - return fmt.Errorf("object type not valid: %s. "+ - "Object reference: %s", o.Type(), o.Hash()) - } - - return nil -} - -// reachableObjects returns, using the callback function, all the reachable -// objects from the specified commit. To avoid to iterate over seen commits, -// if a commit hash is into the 'seen' set, we will not iterate all his trees -// and blobs objects. -func reachableObjects( - commit *object.Commit, - seen map[plumbing.Hash]bool, - visited map[plumbing.Hash]bool, - ignore []plumbing.Hash, - cb func(h plumbing.Hash), -) error { - i := object.NewCommitPreorderIter(commit, seen, ignore) - pending := make(map[plumbing.Hash]bool) - addPendingParents(pending, visited, commit) - for { - commit, err := i.Next() - if err == io.EOF { - break - } - - if err != nil { - return err - } - - if pending[commit.Hash] { - delete(pending, commit.Hash) - } - - addPendingParents(pending, visited, commit) - - if visited[commit.Hash] && len(pending) == 0 { - break - } - - if seen[commit.Hash] { - continue - } - - cb(commit.Hash) - - tree, err := commit.Tree() - if err != nil { - return err - } - - if err := iterateCommitTrees(seen, tree, cb); err != nil { - return err - } - } - - return nil -} - -func addPendingParents(pending, visited map[plumbing.Hash]bool, commit *object.Commit) { - for _, p := range commit.ParentHashes { - if !visited[p] { - pending[p] = true - } - } -} - -// iterateCommitTrees iterate all reachable trees from the given commit -func iterateCommitTrees( - seen map[plumbing.Hash]bool, - tree *object.Tree, - cb func(h plumbing.Hash), -) error { - if seen[tree.Hash] { - return nil - } - - cb(tree.Hash) - - treeWalker := object.NewTreeWalker(tree, true, seen) - - for { - _, e, err := treeWalker.Next() - if err == io.EOF { - break - } - if err != nil { - return err - } - - if e.Mode == filemode.Submodule { - continue - } - - if seen[e.Hash] { - continue - } - - cb(e.Hash) - } - - return nil -} - -func hashSetToList(hashes map[plumbing.Hash]bool) []plumbing.Hash { - var result []plumbing.Hash - for key := range hashes { - result = append(result, key) - } - - return result -} - -func hashListToSet(hashes []plumbing.Hash) map[plumbing.Hash]bool { - result := make(map[plumbing.Hash]bool) - for _, h := range hashes { - result[h] = true - } - - return result -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/storer/doc.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/storer/doc.go deleted file mode 100644 index 4d4f179c618..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/storer/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package storer defines the interfaces to store objects, references, etc. -package storer diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/storer/index.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/storer/index.go deleted file mode 100644 index 33113949b3a..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/storer/index.go +++ /dev/null @@ -1,9 +0,0 @@ -package storer - -import "github.com/go-git/go-git/v5/plumbing/format/index" - -// IndexStorer generic storage of index.Index -type IndexStorer interface { - SetIndex(*index.Index) error - Index() (*index.Index, error) -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/storer/object.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/storer/object.go deleted file mode 100644 index dfe309db166..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/storer/object.go +++ /dev/null @@ -1,288 +0,0 @@ -package storer - -import ( - "errors" - "io" - "time" - - "github.com/go-git/go-git/v5/plumbing" -) - -var ( - //ErrStop is used to stop a ForEach function in an Iter - ErrStop = errors.New("stop iter") -) - -// EncodedObjectStorer generic storage of objects -type EncodedObjectStorer interface { - // NewEncodedObject returns a new plumbing.EncodedObject, the real type - // of the object can be a custom implementation or the default one, - // plumbing.MemoryObject. - NewEncodedObject() plumbing.EncodedObject - // SetEncodedObject saves an object into the storage, the object should - // be create with the NewEncodedObject, method, and file if the type is - // not supported. - SetEncodedObject(plumbing.EncodedObject) (plumbing.Hash, error) - // EncodedObject gets an object by hash with the given - // plumbing.ObjectType. Implementors should return - // (nil, plumbing.ErrObjectNotFound) if an object doesn't exist with - // both the given hash and object type. - // - // Valid plumbing.ObjectType values are CommitObject, BlobObject, TagObject, - // TreeObject and AnyObject. If plumbing.AnyObject is given, the object must - // be looked up regardless of its type. - EncodedObject(plumbing.ObjectType, plumbing.Hash) (plumbing.EncodedObject, error) - // IterObjects returns a custom EncodedObjectStorer over all the object - // on the storage. - // - // Valid plumbing.ObjectType values are CommitObject, BlobObject, TagObject, - IterEncodedObjects(plumbing.ObjectType) (EncodedObjectIter, error) - // HasEncodedObject returns ErrObjNotFound if the object doesn't - // exist. If the object does exist, it returns nil. - HasEncodedObject(plumbing.Hash) error - // EncodedObjectSize returns the plaintext size of the encoded object. - EncodedObjectSize(plumbing.Hash) (int64, error) -} - -// DeltaObjectStorer is an EncodedObjectStorer that can return delta -// objects. -type DeltaObjectStorer interface { - // DeltaObject is the same as EncodedObject but without resolving deltas. - // Deltas will be returned as plumbing.DeltaObject instances. - DeltaObject(plumbing.ObjectType, plumbing.Hash) (plumbing.EncodedObject, error) -} - -// Transactioner is a optional method for ObjectStorer, it enable transaction -// base write and read operations in the storage -type Transactioner interface { - // Begin starts a transaction. - Begin() Transaction -} - -// LooseObjectStorer is an optional interface for managing "loose" -// objects, i.e. those not in packfiles. -type LooseObjectStorer interface { - // ForEachObjectHash iterates over all the (loose) object hashes - // in the repository without necessarily having to read those objects. - // Objects only inside pack files may be omitted. - // If ErrStop is sent the iteration is stop but no error is returned. - ForEachObjectHash(func(plumbing.Hash) error) error - // LooseObjectTime looks up the (m)time associated with the - // loose object (that is not in a pack file). Some - // implementations (e.g. without loose objects) - // always return an error. - LooseObjectTime(plumbing.Hash) (time.Time, error) - // DeleteLooseObject deletes a loose object if it exists. - DeleteLooseObject(plumbing.Hash) error -} - -// PackedObjectStorer is an optional interface for managing objects in -// packfiles. -type PackedObjectStorer interface { - // ObjectPacks returns hashes of object packs if the underlying - // implementation has pack files. - ObjectPacks() ([]plumbing.Hash, error) - // DeleteOldObjectPackAndIndex deletes an object pack and the corresponding index file if they exist. - // Deletion is only performed if the pack is older than the supplied time (or the time is zero). - DeleteOldObjectPackAndIndex(plumbing.Hash, time.Time) error -} - -// PackfileWriter is a optional method for ObjectStorer, it enable direct write -// of packfile to the storage -type PackfileWriter interface { - // PackfileWriter returns a writer for writing a packfile to the storage - // - // If the Storer not implements PackfileWriter the objects should be written - // using the Set method. - PackfileWriter() (io.WriteCloser, error) -} - -// EncodedObjectIter is a generic closable interface for iterating over objects. -type EncodedObjectIter interface { - Next() (plumbing.EncodedObject, error) - ForEach(func(plumbing.EncodedObject) error) error - Close() -} - -// Transaction is an in-progress storage transaction. A transaction must end -// with a call to Commit or Rollback. -type Transaction interface { - SetEncodedObject(plumbing.EncodedObject) (plumbing.Hash, error) - EncodedObject(plumbing.ObjectType, plumbing.Hash) (plumbing.EncodedObject, error) - Commit() error - Rollback() error -} - -// EncodedObjectLookupIter implements EncodedObjectIter. It iterates over a -// series of object hashes and yields their associated objects by retrieving -// each one from object storage. The retrievals are lazy and only occur when the -// iterator moves forward with a call to Next(). -// -// The EncodedObjectLookupIter must be closed with a call to Close() when it is -// no longer needed. -type EncodedObjectLookupIter struct { - storage EncodedObjectStorer - series []plumbing.Hash - t plumbing.ObjectType - pos int -} - -// NewEncodedObjectLookupIter returns an object iterator given an object storage -// and a slice of object hashes. -func NewEncodedObjectLookupIter( - storage EncodedObjectStorer, t plumbing.ObjectType, series []plumbing.Hash) *EncodedObjectLookupIter { - return &EncodedObjectLookupIter{ - storage: storage, - series: series, - t: t, - } -} - -// Next returns the next object from the iterator. If the iterator has reached -// the end it will return io.EOF as an error. If the object can't be found in -// the object storage, it will return plumbing.ErrObjectNotFound as an error. -// If the object is retrieved successfully error will be nil. -func (iter *EncodedObjectLookupIter) Next() (plumbing.EncodedObject, error) { - if iter.pos >= len(iter.series) { - return nil, io.EOF - } - - hash := iter.series[iter.pos] - obj, err := iter.storage.EncodedObject(iter.t, hash) - if err == nil { - iter.pos++ - } - - return obj, err -} - -// ForEach call the cb function for each object contained on this iter until -// an error happens or the end of the iter is reached. If ErrStop is sent -// the iteration is stop but no error is returned. The iterator is closed. -func (iter *EncodedObjectLookupIter) ForEach(cb func(plumbing.EncodedObject) error) error { - return ForEachIterator(iter, cb) -} - -// Close releases any resources used by the iterator. -func (iter *EncodedObjectLookupIter) Close() { - iter.pos = len(iter.series) -} - -// EncodedObjectSliceIter implements EncodedObjectIter. It iterates over a -// series of objects stored in a slice and yields each one in turn when Next() -// is called. -// -// The EncodedObjectSliceIter must be closed with a call to Close() when it is -// no longer needed. -type EncodedObjectSliceIter struct { - series []plumbing.EncodedObject -} - -// NewEncodedObjectSliceIter returns an object iterator for the given slice of -// objects. -func NewEncodedObjectSliceIter(series []plumbing.EncodedObject) *EncodedObjectSliceIter { - return &EncodedObjectSliceIter{ - series: series, - } -} - -// Next returns the next object from the iterator. If the iterator has reached -// the end it will return io.EOF as an error. If the object is retrieved -// successfully error will be nil. -func (iter *EncodedObjectSliceIter) Next() (plumbing.EncodedObject, error) { - if len(iter.series) == 0 { - return nil, io.EOF - } - - obj := iter.series[0] - iter.series = iter.series[1:] - - return obj, nil -} - -// ForEach call the cb function for each object contained on this iter until -// an error happens or the end of the iter is reached. If ErrStop is sent -// the iteration is stop but no error is returned. The iterator is closed. -func (iter *EncodedObjectSliceIter) ForEach(cb func(plumbing.EncodedObject) error) error { - return ForEachIterator(iter, cb) -} - -// Close releases any resources used by the iterator. -func (iter *EncodedObjectSliceIter) Close() { - iter.series = []plumbing.EncodedObject{} -} - -// MultiEncodedObjectIter implements EncodedObjectIter. It iterates over several -// EncodedObjectIter, -// -// The MultiObjectIter must be closed with a call to Close() when it is no -// longer needed. -type MultiEncodedObjectIter struct { - iters []EncodedObjectIter -} - -// NewMultiEncodedObjectIter returns an object iterator for the given slice of -// EncodedObjectIters. -func NewMultiEncodedObjectIter(iters []EncodedObjectIter) EncodedObjectIter { - return &MultiEncodedObjectIter{iters: iters} -} - -// Next returns the next object from the iterator, if one iterator reach io.EOF -// is removed and the next one is used. -func (iter *MultiEncodedObjectIter) Next() (plumbing.EncodedObject, error) { - if len(iter.iters) == 0 { - return nil, io.EOF - } - - obj, err := iter.iters[0].Next() - if err == io.EOF { - iter.iters[0].Close() - iter.iters = iter.iters[1:] - return iter.Next() - } - - return obj, err -} - -// ForEach call the cb function for each object contained on this iter until -// an error happens or the end of the iter is reached. If ErrStop is sent -// the iteration is stop but no error is returned. The iterator is closed. -func (iter *MultiEncodedObjectIter) ForEach(cb func(plumbing.EncodedObject) error) error { - return ForEachIterator(iter, cb) -} - -// Close releases any resources used by the iterator. -func (iter *MultiEncodedObjectIter) Close() { - for _, i := range iter.iters { - i.Close() - } -} - -type bareIterator interface { - Next() (plumbing.EncodedObject, error) - Close() -} - -// ForEachIterator is a helper function to build iterators without need to -// rewrite the same ForEach function each time. -func ForEachIterator(iter bareIterator, cb func(plumbing.EncodedObject) error) error { - defer iter.Close() - for { - obj, err := iter.Next() - if err != nil { - if err == io.EOF { - return nil - } - - return err - } - - if err := cb(obj); err != nil { - if err == ErrStop { - return nil - } - - return err - } - } -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/storer/reference.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/storer/reference.go deleted file mode 100644 index 1d74ef3c6ac..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/storer/reference.go +++ /dev/null @@ -1,240 +0,0 @@ -package storer - -import ( - "errors" - "io" - - "github.com/go-git/go-git/v5/plumbing" -) - -const MaxResolveRecursion = 1024 - -// ErrMaxResolveRecursion is returned by ResolveReference is MaxResolveRecursion -// is exceeded -var ErrMaxResolveRecursion = errors.New("max. recursion level reached") - -// ReferenceStorer is a generic storage of references. -type ReferenceStorer interface { - SetReference(*plumbing.Reference) error - // CheckAndSetReference sets the reference `new`, but if `old` is - // not `nil`, it first checks that the current stored value for - // `old.Name()` matches the given reference value in `old`. If - // not, it returns an error and doesn't update `new`. - CheckAndSetReference(new, old *plumbing.Reference) error - Reference(plumbing.ReferenceName) (*plumbing.Reference, error) - IterReferences() (ReferenceIter, error) - RemoveReference(plumbing.ReferenceName) error - CountLooseRefs() (int, error) - PackRefs() error -} - -// ReferenceIter is a generic closable interface for iterating over references. -type ReferenceIter interface { - Next() (*plumbing.Reference, error) - ForEach(func(*plumbing.Reference) error) error - Close() -} - -type referenceFilteredIter struct { - ff func(r *plumbing.Reference) bool - iter ReferenceIter -} - -// NewReferenceFilteredIter returns a reference iterator for the given reference -// Iterator. This iterator will iterate only references that accomplish the -// provided function. -func NewReferenceFilteredIter( - ff func(r *plumbing.Reference) bool, iter ReferenceIter) ReferenceIter { - return &referenceFilteredIter{ff, iter} -} - -// Next returns the next reference from the iterator. If the iterator has reached -// the end it will return io.EOF as an error. -func (iter *referenceFilteredIter) Next() (*plumbing.Reference, error) { - for { - r, err := iter.iter.Next() - if err != nil { - return nil, err - } - - if iter.ff(r) { - return r, nil - } - - continue - } -} - -// ForEach call the cb function for each reference contained on this iter until -// an error happens or the end of the iter is reached. If ErrStop is sent -// the iteration is stopped but no error is returned. The iterator is closed. -func (iter *referenceFilteredIter) ForEach(cb func(*plumbing.Reference) error) error { - defer iter.Close() - for { - r, err := iter.Next() - if err == io.EOF { - break - } - if err != nil { - return err - } - - if err := cb(r); err != nil { - if err == ErrStop { - break - } - - return err - } - } - - return nil -} - -// Close releases any resources used by the iterator. -func (iter *referenceFilteredIter) Close() { - iter.iter.Close() -} - -// ReferenceSliceIter implements ReferenceIter. It iterates over a series of -// references stored in a slice and yields each one in turn when Next() is -// called. -// -// The ReferenceSliceIter must be closed with a call to Close() when it is no -// longer needed. -type ReferenceSliceIter struct { - series []*plumbing.Reference - pos int -} - -// NewReferenceSliceIter returns a reference iterator for the given slice of -// objects. -func NewReferenceSliceIter(series []*plumbing.Reference) ReferenceIter { - return &ReferenceSliceIter{ - series: series, - } -} - -// Next returns the next reference from the iterator. If the iterator has -// reached the end it will return io.EOF as an error. -func (iter *ReferenceSliceIter) Next() (*plumbing.Reference, error) { - if iter.pos >= len(iter.series) { - return nil, io.EOF - } - - obj := iter.series[iter.pos] - iter.pos++ - return obj, nil -} - -// ForEach call the cb function for each reference contained on this iter until -// an error happens or the end of the iter is reached. If ErrStop is sent -// the iteration is stop but no error is returned. The iterator is closed. -func (iter *ReferenceSliceIter) ForEach(cb func(*plumbing.Reference) error) error { - return forEachReferenceIter(iter, cb) -} - -type bareReferenceIterator interface { - Next() (*plumbing.Reference, error) - Close() -} - -func forEachReferenceIter(iter bareReferenceIterator, cb func(*plumbing.Reference) error) error { - defer iter.Close() - for { - obj, err := iter.Next() - if err != nil { - if err == io.EOF { - return nil - } - - return err - } - - if err := cb(obj); err != nil { - if err == ErrStop { - return nil - } - - return err - } - } -} - -// Close releases any resources used by the iterator. -func (iter *ReferenceSliceIter) Close() { - iter.pos = len(iter.series) -} - -// MultiReferenceIter implements ReferenceIter. It iterates over several -// ReferenceIter, -// -// The MultiReferenceIter must be closed with a call to Close() when it is no -// longer needed. -type MultiReferenceIter struct { - iters []ReferenceIter -} - -// NewMultiReferenceIter returns an reference iterator for the given slice of -// EncodedObjectIters. -func NewMultiReferenceIter(iters []ReferenceIter) ReferenceIter { - return &MultiReferenceIter{iters: iters} -} - -// Next returns the next reference from the iterator, if one iterator reach -// io.EOF is removed and the next one is used. -func (iter *MultiReferenceIter) Next() (*plumbing.Reference, error) { - if len(iter.iters) == 0 { - return nil, io.EOF - } - - obj, err := iter.iters[0].Next() - if err == io.EOF { - iter.iters[0].Close() - iter.iters = iter.iters[1:] - return iter.Next() - } - - return obj, err -} - -// ForEach call the cb function for each reference contained on this iter until -// an error happens or the end of the iter is reached. If ErrStop is sent -// the iteration is stop but no error is returned. The iterator is closed. -func (iter *MultiReferenceIter) ForEach(cb func(*plumbing.Reference) error) error { - return forEachReferenceIter(iter, cb) -} - -// Close releases any resources used by the iterator. -func (iter *MultiReferenceIter) Close() { - for _, i := range iter.iters { - i.Close() - } -} - -// ResolveReference resolves a SymbolicReference to a HashReference. -func ResolveReference(s ReferenceStorer, n plumbing.ReferenceName) (*plumbing.Reference, error) { - r, err := s.Reference(n) - if err != nil || r == nil { - return r, err - } - return resolveReference(s, r, 0) -} - -func resolveReference(s ReferenceStorer, r *plumbing.Reference, recursion int) (*plumbing.Reference, error) { - if r.Type() != plumbing.SymbolicReference { - return r, nil - } - - if recursion > MaxResolveRecursion { - return nil, ErrMaxResolveRecursion - } - - t, err := s.Reference(r.Target()) - if err != nil { - return nil, err - } - - recursion++ - return resolveReference(s, t, recursion) -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/storer/shallow.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/storer/shallow.go deleted file mode 100644 index 39ef5ea5c67..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/storer/shallow.go +++ /dev/null @@ -1,10 +0,0 @@ -package storer - -import "github.com/go-git/go-git/v5/plumbing" - -// ShallowStorer is a storage of references to shallow commits by hash, -// meaning that these commits have missing parents because of a shallow fetch. -type ShallowStorer interface { - SetShallow([]plumbing.Hash) error - Shallow() ([]plumbing.Hash, error) -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/storer/storer.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/storer/storer.go deleted file mode 100644 index c7bc65a0c49..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/storer/storer.go +++ /dev/null @@ -1,15 +0,0 @@ -package storer - -// Storer is a basic storer for encoded objects and references. -type Storer interface { - EncodedObjectStorer - ReferenceStorer -} - -// Initializer should be implemented by storers that require to perform any -// operation when creating a new repository (i.e. git init). -type Initializer interface { - // Init performs initialization of the storer and returns the error, if - // any. - Init() error -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/transport/client/client.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/transport/client/client.go deleted file mode 100644 index 20c3d0560e0..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/transport/client/client.go +++ /dev/null @@ -1,83 +0,0 @@ -// Package client contains helper function to deal with the different client -// protocols. -package client - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - gohttp "net/http" - - "github.com/go-git/go-git/v5/plumbing/transport" - "github.com/go-git/go-git/v5/plumbing/transport/file" - "github.com/go-git/go-git/v5/plumbing/transport/git" - "github.com/go-git/go-git/v5/plumbing/transport/http" - "github.com/go-git/go-git/v5/plumbing/transport/ssh" -) - -// Protocols are the protocols supported by default. -var Protocols = map[string]transport.Transport{ - "http": http.DefaultClient, - "https": http.DefaultClient, - "ssh": ssh.DefaultClient, - "git": git.DefaultClient, - "file": file.DefaultClient, -} - -var insecureClient = http.NewClient(&gohttp.Client{ - Transport: &gohttp.Transport{ - TLSClientConfig: &tls.Config{ - InsecureSkipVerify: true, - }, - }, -}) - -// InstallProtocol adds or modifies an existing protocol. -func InstallProtocol(scheme string, c transport.Transport) { - if c == nil { - delete(Protocols, scheme) - return - } - - Protocols[scheme] = c -} - -// NewClient returns the appropriate client among of the set of known protocols: -// http://, https://, ssh:// and file://. -// See `InstallProtocol` to add or modify protocols. -func NewClient(endpoint *transport.Endpoint) (transport.Transport, error) { - return getTransport(endpoint) -} - -func getTransport(endpoint *transport.Endpoint) (transport.Transport, error) { - if endpoint.Protocol == "https" { - if endpoint.InsecureSkipTLS { - return insecureClient, nil - } - - if len(endpoint.CaBundle) != 0 { - rootCAs, _ := x509.SystemCertPool() - if rootCAs == nil { - rootCAs = x509.NewCertPool() - } - rootCAs.AppendCertsFromPEM(endpoint.CaBundle) - return http.NewClient(&gohttp.Client{ - Transport: &gohttp.Transport{ - TLSClientConfig: &tls.Config{ - RootCAs: rootCAs, - }, - }, - }), nil - } - } - - f, ok := Protocols[endpoint.Protocol] - if !ok { - return nil, fmt.Errorf("unsupported scheme %q", endpoint.Protocol) - } - - if f == nil { - return nil, fmt.Errorf("malformed client for scheme %q, client is defined as nil", endpoint.Protocol) - } - return f, nil -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/transport/common.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/transport/common.go deleted file mode 100644 index a9ee2caee7e..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/transport/common.go +++ /dev/null @@ -1,283 +0,0 @@ -// Package transport includes the implementation for different transport -// protocols. -// -// `Client` can be used to fetch and send packfiles to a git server. -// The `client` package provides higher level functions to instantiate the -// appropriate `Client` based on the repository URL. -// -// go-git supports HTTP and SSH (see `Protocols`), but you can also install -// your own protocols (see the `client` package). -// -// Each protocol has its own implementation of `Client`, but you should -// generally not use them directly, use `client.NewClient` instead. -package transport - -import ( - "bytes" - "context" - "errors" - "fmt" - "io" - "net/url" - "strconv" - "strings" - - giturl "github.com/go-git/go-git/v5/internal/url" - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/protocol/packp" - "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" -) - -var ( - ErrRepositoryNotFound = errors.New("repository not found") - ErrEmptyRemoteRepository = errors.New("remote repository is empty") - ErrAuthenticationRequired = errors.New("authentication required") - ErrAuthorizationFailed = errors.New("authorization failed") - ErrEmptyUploadPackRequest = errors.New("empty git-upload-pack given") - ErrInvalidAuthMethod = errors.New("invalid auth method") - ErrAlreadyConnected = errors.New("session already established") -) - -const ( - UploadPackServiceName = "git-upload-pack" - ReceivePackServiceName = "git-receive-pack" -) - -// Transport can initiate git-upload-pack and git-receive-pack processes. -// It is implemented both by the client and the server, making this a RPC. -type Transport interface { - // NewUploadPackSession starts a git-upload-pack session for an endpoint. - NewUploadPackSession(*Endpoint, AuthMethod) (UploadPackSession, error) - // NewReceivePackSession starts a git-receive-pack session for an endpoint. - NewReceivePackSession(*Endpoint, AuthMethod) (ReceivePackSession, error) -} - -type Session interface { - // AdvertisedReferences retrieves the advertised references for a - // repository. - // If the repository does not exist, returns ErrRepositoryNotFound. - // If the repository exists, but is empty, returns ErrEmptyRemoteRepository. - AdvertisedReferences() (*packp.AdvRefs, error) - // AdvertisedReferencesContext retrieves the advertised references for a - // repository. - // If the repository does not exist, returns ErrRepositoryNotFound. - // If the repository exists, but is empty, returns ErrEmptyRemoteRepository. - AdvertisedReferencesContext(context.Context) (*packp.AdvRefs, error) - io.Closer -} - -type AuthMethod interface { - fmt.Stringer - Name() string -} - -// UploadPackSession represents a git-upload-pack session. -// A git-upload-pack session has two steps: reference discovery -// (AdvertisedReferences) and uploading pack (UploadPack). -type UploadPackSession interface { - Session - // UploadPack takes a git-upload-pack request and returns a response, - // including a packfile. Don't be confused by terminology, the client - // side of a git-upload-pack is called git-fetch-pack, although here - // the same interface is used to make it RPC-like. - UploadPack(context.Context, *packp.UploadPackRequest) (*packp.UploadPackResponse, error) -} - -// ReceivePackSession represents a git-receive-pack session. -// A git-receive-pack session has two steps: reference discovery -// (AdvertisedReferences) and receiving pack (ReceivePack). -// In that order. -type ReceivePackSession interface { - Session - // ReceivePack sends an update references request and a packfile - // reader and returns a ReportStatus and error. Don't be confused by - // terminology, the client side of a git-receive-pack is called - // git-send-pack, although here the same interface is used to make it - // RPC-like. - ReceivePack(context.Context, *packp.ReferenceUpdateRequest) (*packp.ReportStatus, error) -} - -// Endpoint represents a Git URL in any supported protocol. -type Endpoint struct { - // Protocol is the protocol of the endpoint (e.g. git, https, file). - Protocol string - // User is the user. - User string - // Password is the password. - Password string - // Host is the host. - Host string - // Port is the port to connect, if 0 the default port for the given protocol - // wil be used. - Port int - // Path is the repository path. - Path string - // InsecureSkipTLS skips ssl verify if protocal is https - InsecureSkipTLS bool - // CaBundle specify additional ca bundle with system cert pool - CaBundle []byte -} - -var defaultPorts = map[string]int{ - "http": 80, - "https": 443, - "git": 9418, - "ssh": 22, -} - -// String returns a string representation of the Git URL. -func (u *Endpoint) String() string { - var buf bytes.Buffer - if u.Protocol != "" { - buf.WriteString(u.Protocol) - buf.WriteByte(':') - } - - if u.Protocol != "" || u.Host != "" || u.User != "" || u.Password != "" { - buf.WriteString("//") - - if u.User != "" || u.Password != "" { - buf.WriteString(url.PathEscape(u.User)) - if u.Password != "" { - buf.WriteByte(':') - buf.WriteString(url.PathEscape(u.Password)) - } - - buf.WriteByte('@') - } - - if u.Host != "" { - buf.WriteString(u.Host) - - if u.Port != 0 { - port, ok := defaultPorts[strings.ToLower(u.Protocol)] - if !ok || ok && port != u.Port { - fmt.Fprintf(&buf, ":%d", u.Port) - } - } - } - } - - if u.Path != "" && u.Path[0] != '/' && u.Host != "" { - buf.WriteByte('/') - } - - buf.WriteString(u.Path) - return buf.String() -} - -func NewEndpoint(endpoint string) (*Endpoint, error) { - if e, ok := parseSCPLike(endpoint); ok { - return e, nil - } - - if e, ok := parseFile(endpoint); ok { - return e, nil - } - - return parseURL(endpoint) -} - -func parseURL(endpoint string) (*Endpoint, error) { - u, err := url.Parse(endpoint) - if err != nil { - return nil, err - } - - if !u.IsAbs() { - return nil, plumbing.NewPermanentError(fmt.Errorf( - "invalid endpoint: %s", endpoint, - )) - } - - var user, pass string - if u.User != nil { - user = u.User.Username() - pass, _ = u.User.Password() - } - - return &Endpoint{ - Protocol: u.Scheme, - User: user, - Password: pass, - Host: u.Hostname(), - Port: getPort(u), - Path: getPath(u), - }, nil -} - -func getPort(u *url.URL) int { - p := u.Port() - if p == "" { - return 0 - } - - i, err := strconv.Atoi(p) - if err != nil { - return 0 - } - - return i -} - -func getPath(u *url.URL) string { - var res string = u.Path - if u.RawQuery != "" { - res += "?" + u.RawQuery - } - - if u.Fragment != "" { - res += "#" + u.Fragment - } - - return res -} - -func parseSCPLike(endpoint string) (*Endpoint, bool) { - if giturl.MatchesScheme(endpoint) || !giturl.MatchesScpLike(endpoint) { - return nil, false - } - - user, host, portStr, path := giturl.FindScpLikeComponents(endpoint) - port, err := strconv.Atoi(portStr) - if err != nil { - port = 22 - } - - return &Endpoint{ - Protocol: "ssh", - User: user, - Host: host, - Port: port, - Path: path, - }, true -} - -func parseFile(endpoint string) (*Endpoint, bool) { - if giturl.MatchesScheme(endpoint) { - return nil, false - } - - path := endpoint - return &Endpoint{ - Protocol: "file", - Path: path, - }, true -} - -// UnsupportedCapabilities are the capabilities not supported by any client -// implementation -var UnsupportedCapabilities = []capability.Capability{ - capability.MultiACK, - capability.MultiACKDetailed, - capability.ThinPack, -} - -// FilterUnsupportedCapabilities it filter out all the UnsupportedCapabilities -// from a capability.List, the intended usage is on the client implementation -// to filter the capabilities from an AdvRefs message. -func FilterUnsupportedCapabilities(list *capability.List) { - for _, c := range UnsupportedCapabilities { - list.Delete(c) - } -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/transport/file/client.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/transport/file/client.go deleted file mode 100644 index 6f0a38012b5..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/transport/file/client.go +++ /dev/null @@ -1,157 +0,0 @@ -// Package file implements the file transport protocol. -package file - -import ( - "bufio" - "errors" - "io" - "os" - "path/filepath" - "strings" - - "github.com/go-git/go-git/v5/plumbing/transport" - "github.com/go-git/go-git/v5/plumbing/transport/internal/common" - "github.com/go-git/go-git/v5/utils/ioutil" - "golang.org/x/sys/execabs" -) - -// DefaultClient is the default local client. -var DefaultClient = NewClient( - transport.UploadPackServiceName, - transport.ReceivePackServiceName, -) - -type runner struct { - UploadPackBin string - ReceivePackBin string -} - -// NewClient returns a new local client using the given git-upload-pack and -// git-receive-pack binaries. -func NewClient(uploadPackBin, receivePackBin string) transport.Transport { - return common.NewClient(&runner{ - UploadPackBin: uploadPackBin, - ReceivePackBin: receivePackBin, - }) -} - -func prefixExecPath(cmd string) (string, error) { - // Use `git --exec-path` to find the exec path. - execCmd := execabs.Command("git", "--exec-path") - - stdout, err := execCmd.StdoutPipe() - if err != nil { - return "", err - } - stdoutBuf := bufio.NewReader(stdout) - - err = execCmd.Start() - if err != nil { - return "", err - } - - execPathBytes, isPrefix, err := stdoutBuf.ReadLine() - if err != nil { - return "", err - } - if isPrefix { - return "", errors.New("couldn't read exec-path line all at once") - } - - err = execCmd.Wait() - if err != nil { - return "", err - } - execPath := string(execPathBytes) - execPath = strings.TrimSpace(execPath) - cmd = filepath.Join(execPath, cmd) - - // Make sure it actually exists. - _, err = execabs.LookPath(cmd) - if err != nil { - return "", err - } - return cmd, nil -} - -func (r *runner) Command(cmd string, ep *transport.Endpoint, auth transport.AuthMethod, -) (common.Command, error) { - - switch cmd { - case transport.UploadPackServiceName: - cmd = r.UploadPackBin - case transport.ReceivePackServiceName: - cmd = r.ReceivePackBin - } - - _, err := execabs.LookPath(cmd) - if err != nil { - if e, ok := err.(*execabs.Error); ok && e.Err == execabs.ErrNotFound { - cmd, err = prefixExecPath(cmd) - if err != nil { - return nil, err - } - } else { - return nil, err - } - } - - return &command{cmd: execabs.Command(cmd, ep.Path)}, nil -} - -type command struct { - cmd *execabs.Cmd - stderrCloser io.Closer - closed bool -} - -func (c *command) Start() error { - return c.cmd.Start() -} - -func (c *command) StderrPipe() (io.Reader, error) { - // Pipe returned by Command.StderrPipe has a race with Read + Command.Wait. - // We use an io.Pipe and close it after the command finishes. - r, w := ioutil.Pipe() - c.cmd.Stderr = w - c.stderrCloser = r - return r, nil -} - -func (c *command) StdinPipe() (io.WriteCloser, error) { - return c.cmd.StdinPipe() -} - -func (c *command) StdoutPipe() (io.Reader, error) { - return c.cmd.StdoutPipe() -} - -func (c *command) Kill() error { - c.cmd.Process.Kill() - return c.Close() -} - -// Close waits for the command to exit. -func (c *command) Close() error { - if c.closed { - return nil - } - - defer func() { - c.closed = true - _ = c.stderrCloser.Close() - - }() - - err := c.cmd.Wait() - if _, ok := err.(*os.PathError); ok { - return nil - } - - // When a repository does not exist, the command exits with code 128. - if _, ok := err.(*execabs.ExitError); ok { - return nil - } - - return err -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/transport/file/server.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/transport/file/server.go deleted file mode 100644 index b45d7a71c2f..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/transport/file/server.go +++ /dev/null @@ -1,53 +0,0 @@ -package file - -import ( - "fmt" - "os" - - "github.com/go-git/go-git/v5/plumbing/transport" - "github.com/go-git/go-git/v5/plumbing/transport/internal/common" - "github.com/go-git/go-git/v5/plumbing/transport/server" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -// ServeUploadPack serves a git-upload-pack request using standard output, input -// and error. This is meant to be used when implementing a git-upload-pack -// command. -func ServeUploadPack(path string) error { - ep, err := transport.NewEndpoint(path) - if err != nil { - return err - } - - // TODO: define and implement a server-side AuthMethod - s, err := server.DefaultServer.NewUploadPackSession(ep, nil) - if err != nil { - return fmt.Errorf("error creating session: %s", err) - } - - return common.ServeUploadPack(srvCmd, s) -} - -// ServeReceivePack serves a git-receive-pack request using standard output, -// input and error. This is meant to be used when implementing a -// git-receive-pack command. -func ServeReceivePack(path string) error { - ep, err := transport.NewEndpoint(path) - if err != nil { - return err - } - - // TODO: define and implement a server-side AuthMethod - s, err := server.DefaultServer.NewReceivePackSession(ep, nil) - if err != nil { - return fmt.Errorf("error creating session: %s", err) - } - - return common.ServeReceivePack(srvCmd, s) -} - -var srvCmd = common.ServerCommand{ - Stdin: os.Stdin, - Stdout: ioutil.WriteNopCloser(os.Stdout), - Stderr: os.Stderr, -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/transport/git/common.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/transport/git/common.go deleted file mode 100644 index 306aae261f0..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/transport/git/common.go +++ /dev/null @@ -1,109 +0,0 @@ -// Package git implements the git transport protocol. -package git - -import ( - "fmt" - "io" - "net" - - "github.com/go-git/go-git/v5/plumbing/format/pktline" - "github.com/go-git/go-git/v5/plumbing/transport" - "github.com/go-git/go-git/v5/plumbing/transport/internal/common" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -// DefaultClient is the default git client. -var DefaultClient = common.NewClient(&runner{}) - -const DefaultPort = 9418 - -type runner struct{} - -// Command returns a new Command for the given cmd in the given Endpoint -func (r *runner) Command(cmd string, ep *transport.Endpoint, auth transport.AuthMethod) (common.Command, error) { - // auth not allowed since git protocol doesn't support authentication - if auth != nil { - return nil, transport.ErrInvalidAuthMethod - } - c := &command{command: cmd, endpoint: ep} - if err := c.connect(); err != nil { - return nil, err - } - return c, nil -} - -type command struct { - conn net.Conn - connected bool - command string - endpoint *transport.Endpoint -} - -// Start executes the command sending the required message to the TCP connection -func (c *command) Start() error { - cmd := endpointToCommand(c.command, c.endpoint) - - e := pktline.NewEncoder(c.conn) - return e.Encode([]byte(cmd)) -} - -func (c *command) connect() error { - if c.connected { - return transport.ErrAlreadyConnected - } - - var err error - c.conn, err = net.Dial("tcp", c.getHostWithPort()) - if err != nil { - return err - } - - c.connected = true - return nil -} - -func (c *command) getHostWithPort() string { - host := c.endpoint.Host - port := c.endpoint.Port - if port <= 0 { - port = DefaultPort - } - - return fmt.Sprintf("%s:%d", host, port) -} - -// StderrPipe git protocol doesn't have any dedicated error channel -func (c *command) StderrPipe() (io.Reader, error) { - return nil, nil -} - -// StdinPipe return the underlying connection as WriteCloser, wrapped to prevent -// call to the Close function from the connection, a command execution in git -// protocol can't be closed or killed -func (c *command) StdinPipe() (io.WriteCloser, error) { - return ioutil.WriteNopCloser(c.conn), nil -} - -// StdoutPipe return the underlying connection as Reader -func (c *command) StdoutPipe() (io.Reader, error) { - return c.conn, nil -} - -func endpointToCommand(cmd string, ep *transport.Endpoint) string { - host := ep.Host - if ep.Port != DefaultPort { - host = fmt.Sprintf("%s:%d", ep.Host, ep.Port) - } - - return fmt.Sprintf("%s %s%chost=%s%c", cmd, ep.Path, 0, host, 0) -} - -// Close closes the TCP connection and connection. -func (c *command) Close() error { - if !c.connected { - return nil - } - - c.connected = false - return c.conn.Close() -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/common.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/common.go deleted file mode 100644 index d57c0feef59..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/common.go +++ /dev/null @@ -1,282 +0,0 @@ -// Package http implements the HTTP transport protocol. -package http - -import ( - "bytes" - "context" - "fmt" - "net" - "net/http" - "strconv" - "strings" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/protocol/packp" - "github.com/go-git/go-git/v5/plumbing/transport" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -// it requires a bytes.Buffer, because we need to know the length -func applyHeadersToRequest(req *http.Request, content *bytes.Buffer, host string, requestType string) { - req.Header.Add("User-Agent", "git/1.0") - req.Header.Add("Host", host) // host:port - - if content == nil { - req.Header.Add("Accept", "*/*") - return - } - - req.Header.Add("Accept", fmt.Sprintf("application/x-%s-result", requestType)) - req.Header.Add("Content-Type", fmt.Sprintf("application/x-%s-request", requestType)) - req.Header.Add("Content-Length", strconv.Itoa(content.Len())) -} - -const infoRefsPath = "/info/refs" - -func advertisedReferences(ctx context.Context, s *session, serviceName string) (ref *packp.AdvRefs, err error) { - url := fmt.Sprintf( - "%s%s?service=%s", - s.endpoint.String(), infoRefsPath, serviceName, - ) - - req, err := http.NewRequest(http.MethodGet, url, nil) - if err != nil { - return nil, err - } - - s.ApplyAuthToRequest(req) - applyHeadersToRequest(req, nil, s.endpoint.Host, serviceName) - res, err := s.client.Do(req.WithContext(ctx)) - if err != nil { - return nil, err - } - - s.ModifyEndpointIfRedirect(res) - defer ioutil.CheckClose(res.Body, &err) - - if err = NewErr(res); err != nil { - return nil, err - } - - ar := packp.NewAdvRefs() - if err = ar.Decode(res.Body); err != nil { - if err == packp.ErrEmptyAdvRefs { - err = transport.ErrEmptyRemoteRepository - } - - return nil, err - } - - transport.FilterUnsupportedCapabilities(ar.Capabilities) - s.advRefs = ar - - return ar, nil -} - -type client struct { - c *http.Client -} - -// DefaultClient is the default HTTP client, which uses `http.DefaultClient`. -var DefaultClient = NewClient(nil) - -// NewClient creates a new client with a custom net/http client. -// See `InstallProtocol` to install and override default http client. -// Unless a properly initialized client is given, it will fall back into -// `http.DefaultClient`. -// -// Note that for HTTP client cannot distinguish between private repositories and -// unexistent repositories on GitHub. So it returns `ErrAuthorizationRequired` -// for both. -func NewClient(c *http.Client) transport.Transport { - if c == nil { - return &client{http.DefaultClient} - } - - return &client{ - c: c, - } -} - -func (c *client) NewUploadPackSession(ep *transport.Endpoint, auth transport.AuthMethod) ( - transport.UploadPackSession, error) { - - return newUploadPackSession(c.c, ep, auth) -} - -func (c *client) NewReceivePackSession(ep *transport.Endpoint, auth transport.AuthMethod) ( - transport.ReceivePackSession, error) { - - return newReceivePackSession(c.c, ep, auth) -} - -type session struct { - auth AuthMethod - client *http.Client - endpoint *transport.Endpoint - advRefs *packp.AdvRefs -} - -func newSession(c *http.Client, ep *transport.Endpoint, auth transport.AuthMethod) (*session, error) { - s := &session{ - auth: basicAuthFromEndpoint(ep), - client: c, - endpoint: ep, - } - if auth != nil { - a, ok := auth.(AuthMethod) - if !ok { - return nil, transport.ErrInvalidAuthMethod - } - - s.auth = a - } - - return s, nil -} - -func (s *session) ApplyAuthToRequest(req *http.Request) { - if s.auth == nil { - return - } - - s.auth.SetAuth(req) -} - -func (s *session) ModifyEndpointIfRedirect(res *http.Response) { - if res.Request == nil { - return - } - - r := res.Request - if !strings.HasSuffix(r.URL.Path, infoRefsPath) { - return - } - - h, p, err := net.SplitHostPort(r.URL.Host) - if err != nil { - h = r.URL.Host - } - if p != "" { - port, err := strconv.Atoi(p) - if err == nil { - s.endpoint.Port = port - } - } - s.endpoint.Host = h - - s.endpoint.Protocol = r.URL.Scheme - s.endpoint.Path = r.URL.Path[:len(r.URL.Path)-len(infoRefsPath)] -} - -func (*session) Close() error { - return nil -} - -// AuthMethod is concrete implementation of common.AuthMethod for HTTP services -type AuthMethod interface { - transport.AuthMethod - SetAuth(r *http.Request) -} - -func basicAuthFromEndpoint(ep *transport.Endpoint) *BasicAuth { - u := ep.User - if u == "" { - return nil - } - - return &BasicAuth{u, ep.Password} -} - -// BasicAuth represent a HTTP basic auth -type BasicAuth struct { - Username, Password string -} - -func (a *BasicAuth) SetAuth(r *http.Request) { - if a == nil { - return - } - - r.SetBasicAuth(a.Username, a.Password) -} - -// Name is name of the auth -func (a *BasicAuth) Name() string { - return "http-basic-auth" -} - -func (a *BasicAuth) String() string { - masked := "*******" - if a.Password == "" { - masked = "" - } - - return fmt.Sprintf("%s - %s:%s", a.Name(), a.Username, masked) -} - -// TokenAuth implements an http.AuthMethod that can be used with http transport -// to authenticate with HTTP token authentication (also known as bearer -// authentication). -// -// IMPORTANT: If you are looking to use OAuth tokens with popular servers (e.g. -// GitHub, Bitbucket, GitLab) you should use BasicAuth instead. These servers -// use basic HTTP authentication, with the OAuth token as user or password. -// Check the documentation of your git server for details. -type TokenAuth struct { - Token string -} - -func (a *TokenAuth) SetAuth(r *http.Request) { - if a == nil { - return - } - r.Header.Add("Authorization", fmt.Sprintf("Bearer %s", a.Token)) -} - -// Name is name of the auth -func (a *TokenAuth) Name() string { - return "http-token-auth" -} - -func (a *TokenAuth) String() string { - masked := "*******" - if a.Token == "" { - masked = "" - } - return fmt.Sprintf("%s - %s", a.Name(), masked) -} - -// Err is a dedicated error to return errors based on status code -type Err struct { - Response *http.Response -} - -// NewErr returns a new Err based on a http response -func NewErr(r *http.Response) error { - if r.StatusCode >= http.StatusOK && r.StatusCode < http.StatusMultipleChoices { - return nil - } - - switch r.StatusCode { - case http.StatusUnauthorized: - return transport.ErrAuthenticationRequired - case http.StatusForbidden: - return transport.ErrAuthorizationFailed - case http.StatusNotFound: - return transport.ErrRepositoryNotFound - } - - return plumbing.NewUnexpectedError(&Err{r}) -} - -// StatusCode returns the status code of the response -func (e *Err) StatusCode() int { - return e.Response.StatusCode -} - -func (e *Err) Error() string { - return fmt.Sprintf("unexpected requesting %q status code: %d", - e.Response.Request.URL, e.Response.StatusCode, - ) -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/receive_pack.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/receive_pack.go deleted file mode 100644 index 4d14ff21ea4..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/receive_pack.go +++ /dev/null @@ -1,110 +0,0 @@ -package http - -import ( - "bytes" - "context" - "fmt" - "io" - "net/http" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/protocol/packp" - "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" - "github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband" - "github.com/go-git/go-git/v5/plumbing/transport" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -type rpSession struct { - *session -} - -func newReceivePackSession(c *http.Client, ep *transport.Endpoint, auth transport.AuthMethod) (transport.ReceivePackSession, error) { - s, err := newSession(c, ep, auth) - return &rpSession{s}, err -} - -func (s *rpSession) AdvertisedReferences() (*packp.AdvRefs, error) { - return advertisedReferences(context.TODO(), s.session, transport.ReceivePackServiceName) -} - -func (s *rpSession) AdvertisedReferencesContext(ctx context.Context) (*packp.AdvRefs, error) { - return advertisedReferences(ctx, s.session, transport.ReceivePackServiceName) -} - -func (s *rpSession) ReceivePack(ctx context.Context, req *packp.ReferenceUpdateRequest) ( - *packp.ReportStatus, error) { - url := fmt.Sprintf( - "%s/%s", - s.endpoint.String(), transport.ReceivePackServiceName, - ) - - buf := bytes.NewBuffer(nil) - if err := req.Encode(buf); err != nil { - return nil, err - } - - res, err := s.doRequest(ctx, http.MethodPost, url, buf) - if err != nil { - return nil, err - } - - r, err := ioutil.NonEmptyReader(res.Body) - if err == ioutil.ErrEmptyReader { - return nil, nil - } - - if err != nil { - return nil, err - } - - var d *sideband.Demuxer - if req.Capabilities.Supports(capability.Sideband64k) { - d = sideband.NewDemuxer(sideband.Sideband64k, r) - } else if req.Capabilities.Supports(capability.Sideband) { - d = sideband.NewDemuxer(sideband.Sideband, r) - } - if d != nil { - d.Progress = req.Progress - r = d - } - - rc := ioutil.NewReadCloser(r, res.Body) - - report := packp.NewReportStatus() - if err := report.Decode(rc); err != nil { - return nil, err - } - - return report, report.Error() -} - -func (s *rpSession) doRequest( - ctx context.Context, method, url string, content *bytes.Buffer, -) (*http.Response, error) { - - var body io.Reader - if content != nil { - body = content - } - - req, err := http.NewRequest(method, url, body) - if err != nil { - return nil, plumbing.NewPermanentError(err) - } - - applyHeadersToRequest(req, content, s.endpoint.Host, transport.ReceivePackServiceName) - s.ApplyAuthToRequest(req) - - res, err := s.client.Do(req.WithContext(ctx)) - if err != nil { - return nil, plumbing.NewUnexpectedError(err) - } - - if err := NewErr(res); err != nil { - _ = res.Body.Close() - return nil, err - } - - return res, nil -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/upload_pack.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/upload_pack.go deleted file mode 100644 index e735b3d7c98..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/upload_pack.go +++ /dev/null @@ -1,127 +0,0 @@ -package http - -import ( - "bytes" - "context" - "fmt" - "io" - "net/http" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/format/pktline" - "github.com/go-git/go-git/v5/plumbing/protocol/packp" - "github.com/go-git/go-git/v5/plumbing/transport" - "github.com/go-git/go-git/v5/plumbing/transport/internal/common" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -type upSession struct { - *session -} - -func newUploadPackSession(c *http.Client, ep *transport.Endpoint, auth transport.AuthMethod) (transport.UploadPackSession, error) { - s, err := newSession(c, ep, auth) - return &upSession{s}, err -} - -func (s *upSession) AdvertisedReferences() (*packp.AdvRefs, error) { - return advertisedReferences(context.TODO(), s.session, transport.UploadPackServiceName) -} - -func (s *upSession) AdvertisedReferencesContext(ctx context.Context) (*packp.AdvRefs, error) { - return advertisedReferences(ctx, s.session, transport.UploadPackServiceName) -} - -func (s *upSession) UploadPack( - ctx context.Context, req *packp.UploadPackRequest, -) (*packp.UploadPackResponse, error) { - - if req.IsEmpty() { - return nil, transport.ErrEmptyUploadPackRequest - } - - if err := req.Validate(); err != nil { - return nil, err - } - - url := fmt.Sprintf( - "%s/%s", - s.endpoint.String(), transport.UploadPackServiceName, - ) - - content, err := uploadPackRequestToReader(req) - if err != nil { - return nil, err - } - - res, err := s.doRequest(ctx, http.MethodPost, url, content) - if err != nil { - return nil, err - } - - r, err := ioutil.NonEmptyReader(res.Body) - if err != nil { - if err == ioutil.ErrEmptyReader || err == io.ErrUnexpectedEOF { - return nil, transport.ErrEmptyUploadPackRequest - } - - return nil, err - } - - rc := ioutil.NewReadCloser(r, res.Body) - return common.DecodeUploadPackResponse(rc, req) -} - -// Close does nothing. -func (s *upSession) Close() error { - return nil -} - -func (s *upSession) doRequest( - ctx context.Context, method, url string, content *bytes.Buffer, -) (*http.Response, error) { - - var body io.Reader - if content != nil { - body = content - } - - req, err := http.NewRequest(method, url, body) - if err != nil { - return nil, plumbing.NewPermanentError(err) - } - - applyHeadersToRequest(req, content, s.endpoint.Host, transport.UploadPackServiceName) - s.ApplyAuthToRequest(req) - - res, err := s.client.Do(req.WithContext(ctx)) - if err != nil { - return nil, plumbing.NewUnexpectedError(err) - } - - if err := NewErr(res); err != nil { - _ = res.Body.Close() - return nil, err - } - - return res, nil -} - -func uploadPackRequestToReader(req *packp.UploadPackRequest) (*bytes.Buffer, error) { - buf := bytes.NewBuffer(nil) - e := pktline.NewEncoder(buf) - - if err := req.UploadRequest.Encode(buf); err != nil { - return nil, fmt.Errorf("sending upload-req message: %s", err) - } - - if err := req.UploadHaves.Encode(buf, false); err != nil { - return nil, fmt.Errorf("sending haves message: %s", err) - } - - if err := e.EncodeString("done\n"); err != nil { - return nil, err - } - - return buf, nil -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/common.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/common.go deleted file mode 100644 index fdb148f5990..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/common.go +++ /dev/null @@ -1,478 +0,0 @@ -// Package common implements the git pack protocol with a pluggable transport. -// This is a low-level package to implement new transports. Use a concrete -// implementation instead (e.g. http, file, ssh). -// -// A simple example of usage can be found in the file package. -package common - -import ( - "bufio" - "context" - "errors" - "fmt" - "io" - stdioutil "io/ioutil" - "strings" - "time" - - "github.com/go-git/go-git/v5/plumbing/format/pktline" - "github.com/go-git/go-git/v5/plumbing/protocol/packp" - "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" - "github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband" - "github.com/go-git/go-git/v5/plumbing/transport" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -const ( - readErrorSecondsTimeout = 10 -) - -var ( - ErrTimeoutExceeded = errors.New("timeout exceeded") -) - -// Commander creates Command instances. This is the main entry point for -// transport implementations. -type Commander interface { - // Command creates a new Command for the given git command and - // endpoint. cmd can be git-upload-pack or git-receive-pack. An - // error should be returned if the endpoint is not supported or the - // command cannot be created (e.g. binary does not exist, connection - // cannot be established). - Command(cmd string, ep *transport.Endpoint, auth transport.AuthMethod) (Command, error) -} - -// Command is used for a single command execution. -// This interface is modeled after exec.Cmd and ssh.Session in the standard -// library. -type Command interface { - // StderrPipe returns a pipe that will be connected to the command's - // standard error when the command starts. It should not be called after - // Start. - StderrPipe() (io.Reader, error) - // StdinPipe returns a pipe that will be connected to the command's - // standard input when the command starts. It should not be called after - // Start. The pipe should be closed when no more input is expected. - StdinPipe() (io.WriteCloser, error) - // StdoutPipe returns a pipe that will be connected to the command's - // standard output when the command starts. It should not be called after - // Start. - StdoutPipe() (io.Reader, error) - // Start starts the specified command. It does not wait for it to - // complete. - Start() error - // Close closes the command and releases any resources used by it. It - // will block until the command exits. - Close() error -} - -// CommandKiller expands the Command interface, enabling it for being killed. -type CommandKiller interface { - // Kill and close the session whatever the state it is. It will block until - // the command is terminated. - Kill() error -} - -type client struct { - cmdr Commander -} - -// NewClient creates a new client using the given Commander. -func NewClient(runner Commander) transport.Transport { - return &client{runner} -} - -// NewUploadPackSession creates a new UploadPackSession. -func (c *client) NewUploadPackSession(ep *transport.Endpoint, auth transport.AuthMethod) ( - transport.UploadPackSession, error) { - - return c.newSession(transport.UploadPackServiceName, ep, auth) -} - -// NewReceivePackSession creates a new ReceivePackSession. -func (c *client) NewReceivePackSession(ep *transport.Endpoint, auth transport.AuthMethod) ( - transport.ReceivePackSession, error) { - - return c.newSession(transport.ReceivePackServiceName, ep, auth) -} - -type session struct { - Stdin io.WriteCloser - Stdout io.Reader - Command Command - - isReceivePack bool - advRefs *packp.AdvRefs - packRun bool - finished bool - firstErrLine chan string -} - -func (c *client) newSession(s string, ep *transport.Endpoint, auth transport.AuthMethod) (*session, error) { - cmd, err := c.cmdr.Command(s, ep, auth) - if err != nil { - return nil, err - } - - stdin, err := cmd.StdinPipe() - if err != nil { - return nil, err - } - - stdout, err := cmd.StdoutPipe() - if err != nil { - return nil, err - } - - stderr, err := cmd.StderrPipe() - if err != nil { - return nil, err - } - - if err := cmd.Start(); err != nil { - return nil, err - } - - return &session{ - Stdin: stdin, - Stdout: stdout, - Command: cmd, - firstErrLine: c.listenFirstError(stderr), - isReceivePack: s == transport.ReceivePackServiceName, - }, nil -} - -func (c *client) listenFirstError(r io.Reader) chan string { - if r == nil { - return nil - } - - errLine := make(chan string, 1) - go func() { - s := bufio.NewScanner(r) - if s.Scan() { - errLine <- s.Text() - } else { - close(errLine) - } - - _, _ = io.Copy(stdioutil.Discard, r) - }() - - return errLine -} - -func (s *session) AdvertisedReferences() (*packp.AdvRefs, error) { - return s.AdvertisedReferencesContext(context.TODO()) -} - -// AdvertisedReferences retrieves the advertised references from the server. -func (s *session) AdvertisedReferencesContext(ctx context.Context) (*packp.AdvRefs, error) { - if s.advRefs != nil { - return s.advRefs, nil - } - - ar := packp.NewAdvRefs() - if err := ar.Decode(s.StdoutContext(ctx)); err != nil { - if err := s.handleAdvRefDecodeError(err); err != nil { - return nil, err - } - } - - // Some servers like jGit, announce capabilities instead of returning an - // packp message with a flush. This verifies that we received a empty - // adv-refs, even it contains capabilities. - if !s.isReceivePack && ar.IsEmpty() { - return nil, transport.ErrEmptyRemoteRepository - } - - transport.FilterUnsupportedCapabilities(ar.Capabilities) - s.advRefs = ar - return ar, nil -} - -func (s *session) handleAdvRefDecodeError(err error) error { - // If repository is not found, we get empty stdout and server writes an - // error to stderr. - if err == packp.ErrEmptyInput { - s.finished = true - if err := s.checkNotFoundError(); err != nil { - return err - } - - return io.ErrUnexpectedEOF - } - - // For empty (but existing) repositories, we get empty advertised-references - // message. But valid. That is, it includes at least a flush. - if err == packp.ErrEmptyAdvRefs { - // Empty repositories are valid for git-receive-pack. - if s.isReceivePack { - return nil - } - - if err := s.finish(); err != nil { - return err - } - - return transport.ErrEmptyRemoteRepository - } - - // Some server sends the errors as normal content (git protocol), so when - // we try to decode it fails, we need to check the content of it, to detect - // not found errors - if uerr, ok := err.(*packp.ErrUnexpectedData); ok { - if isRepoNotFoundError(string(uerr.Data)) { - return transport.ErrRepositoryNotFound - } - } - - return err -} - -// UploadPack performs a request to the server to fetch a packfile. A reader is -// returned with the packfile content. The reader must be closed after reading. -func (s *session) UploadPack(ctx context.Context, req *packp.UploadPackRequest) (*packp.UploadPackResponse, error) { - if req.IsEmpty() && len(req.Shallows) == 0 { - return nil, transport.ErrEmptyUploadPackRequest - } - - if err := req.Validate(); err != nil { - return nil, err - } - - if _, err := s.AdvertisedReferencesContext(ctx); err != nil { - return nil, err - } - - s.packRun = true - - in := s.StdinContext(ctx) - out := s.StdoutContext(ctx) - - if err := uploadPack(in, out, req); err != nil { - return nil, err - } - - r, err := ioutil.NonEmptyReader(out) - if err == ioutil.ErrEmptyReader { - if c, ok := s.Stdout.(io.Closer); ok { - _ = c.Close() - } - - return nil, transport.ErrEmptyUploadPackRequest - } - - if err != nil { - return nil, err - } - - rc := ioutil.NewReadCloser(r, s) - return DecodeUploadPackResponse(rc, req) -} - -func (s *session) StdinContext(ctx context.Context) io.WriteCloser { - return ioutil.NewWriteCloserOnError( - ioutil.NewContextWriteCloser(ctx, s.Stdin), - s.onError, - ) -} - -func (s *session) StdoutContext(ctx context.Context) io.Reader { - return ioutil.NewReaderOnError( - ioutil.NewContextReader(ctx, s.Stdout), - s.onError, - ) -} - -func (s *session) onError(err error) { - if k, ok := s.Command.(CommandKiller); ok { - _ = k.Kill() - } - - _ = s.Close() -} - -func (s *session) ReceivePack(ctx context.Context, req *packp.ReferenceUpdateRequest) (*packp.ReportStatus, error) { - if _, err := s.AdvertisedReferences(); err != nil { - return nil, err - } - - s.packRun = true - - w := s.StdinContext(ctx) - if err := req.Encode(w); err != nil { - return nil, err - } - - if err := w.Close(); err != nil { - return nil, err - } - - if !req.Capabilities.Supports(capability.ReportStatus) { - // If we don't have report-status, we can only - // check return value error. - return nil, s.Command.Close() - } - - r := s.StdoutContext(ctx) - - var d *sideband.Demuxer - if req.Capabilities.Supports(capability.Sideband64k) { - d = sideband.NewDemuxer(sideband.Sideband64k, r) - } else if req.Capabilities.Supports(capability.Sideband) { - d = sideband.NewDemuxer(sideband.Sideband, r) - } - if d != nil { - d.Progress = req.Progress - r = d - } - - report := packp.NewReportStatus() - if err := report.Decode(r); err != nil { - return nil, err - } - - if err := report.Error(); err != nil { - defer s.Close() - return report, err - } - - return report, s.Command.Close() -} - -func (s *session) finish() error { - if s.finished { - return nil - } - - s.finished = true - - // If we did not run a upload/receive-pack, we close the connection - // gracefully by sending a flush packet to the server. If the server - // operates correctly, it will exit with status 0. - if !s.packRun { - _, err := s.Stdin.Write(pktline.FlushPkt) - return err - } - - return nil -} - -func (s *session) Close() (err error) { - err = s.finish() - - defer ioutil.CheckClose(s.Command, &err) - return -} - -func (s *session) checkNotFoundError() error { - t := time.NewTicker(time.Second * readErrorSecondsTimeout) - defer t.Stop() - - select { - case <-t.C: - return ErrTimeoutExceeded - case line, ok := <-s.firstErrLine: - if !ok { - return nil - } - - if isRepoNotFoundError(line) { - return transport.ErrRepositoryNotFound - } - - return fmt.Errorf("unknown error: %s", line) - } -} - -var ( - githubRepoNotFoundErr = "ERROR: Repository not found." - bitbucketRepoNotFoundErr = "conq: repository does not exist." - localRepoNotFoundErr = "does not appear to be a git repository" - gitProtocolNotFoundErr = "ERR \n Repository not found." - gitProtocolNoSuchErr = "ERR no such repository" - gitProtocolAccessDeniedErr = "ERR access denied" - gogsAccessDeniedErr = "Gogs: Repository does not exist or you do not have access" -) - -func isRepoNotFoundError(s string) bool { - if strings.HasPrefix(s, githubRepoNotFoundErr) { - return true - } - - if strings.HasPrefix(s, bitbucketRepoNotFoundErr) { - return true - } - - if strings.HasSuffix(s, localRepoNotFoundErr) { - return true - } - - if strings.HasPrefix(s, gitProtocolNotFoundErr) { - return true - } - - if strings.HasPrefix(s, gitProtocolNoSuchErr) { - return true - } - - if strings.HasPrefix(s, gitProtocolAccessDeniedErr) { - return true - } - - if strings.HasPrefix(s, gogsAccessDeniedErr) { - return true - } - - return false -} - -var ( - nak = []byte("NAK") - eol = []byte("\n") -) - -// uploadPack implements the git-upload-pack protocol. -func uploadPack(w io.WriteCloser, r io.Reader, req *packp.UploadPackRequest) error { - // TODO support multi_ack mode - // TODO support multi_ack_detailed mode - // TODO support acks for common objects - // TODO build a proper state machine for all these processing options - - if err := req.UploadRequest.Encode(w); err != nil { - return fmt.Errorf("sending upload-req message: %s", err) - } - - if err := req.UploadHaves.Encode(w, true); err != nil { - return fmt.Errorf("sending haves message: %s", err) - } - - if err := sendDone(w); err != nil { - return fmt.Errorf("sending done message: %s", err) - } - - if err := w.Close(); err != nil { - return fmt.Errorf("closing input: %s", err) - } - - return nil -} - -func sendDone(w io.Writer) error { - e := pktline.NewEncoder(w) - - return e.Encodef("done\n") -} - -// DecodeUploadPackResponse decodes r into a new packp.UploadPackResponse -func DecodeUploadPackResponse(r io.ReadCloser, req *packp.UploadPackRequest) ( - *packp.UploadPackResponse, error, -) { - res := packp.NewUploadPackResponse(req) - if err := res.Decode(r); err != nil { - return nil, fmt.Errorf("error decoding upload-pack response: %s", err) - } - - return res, nil -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/server.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/server.go deleted file mode 100644 index e2480848a47..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/server.go +++ /dev/null @@ -1,73 +0,0 @@ -package common - -import ( - "context" - "fmt" - "io" - - "github.com/go-git/go-git/v5/plumbing/protocol/packp" - "github.com/go-git/go-git/v5/plumbing/transport" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -// ServerCommand is used for a single server command execution. -type ServerCommand struct { - Stderr io.Writer - Stdout io.WriteCloser - Stdin io.Reader -} - -func ServeUploadPack(cmd ServerCommand, s transport.UploadPackSession) (err error) { - ioutil.CheckClose(cmd.Stdout, &err) - - ar, err := s.AdvertisedReferences() - if err != nil { - return err - } - - if err := ar.Encode(cmd.Stdout); err != nil { - return err - } - - req := packp.NewUploadPackRequest() - if err := req.Decode(cmd.Stdin); err != nil { - return err - } - - var resp *packp.UploadPackResponse - resp, err = s.UploadPack(context.TODO(), req) - if err != nil { - return err - } - - return resp.Encode(cmd.Stdout) -} - -func ServeReceivePack(cmd ServerCommand, s transport.ReceivePackSession) error { - ar, err := s.AdvertisedReferences() - if err != nil { - return fmt.Errorf("internal error in advertised references: %s", err) - } - - if err := ar.Encode(cmd.Stdout); err != nil { - return fmt.Errorf("error in advertised references encoding: %s", err) - } - - req := packp.NewReferenceUpdateRequest() - if err := req.Decode(cmd.Stdin); err != nil { - return fmt.Errorf("error decoding: %s", err) - } - - rs, err := s.ReceivePack(context.TODO(), req) - if rs != nil { - if err := rs.Encode(cmd.Stdout); err != nil { - return fmt.Errorf("error in encoding report status %s", err) - } - } - - if err != nil { - return fmt.Errorf("error in receive pack: %s", err) - } - - return nil -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/transport/server/loader.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/transport/server/loader.go deleted file mode 100644 index e7e2b075e5e..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/transport/server/loader.go +++ /dev/null @@ -1,64 +0,0 @@ -package server - -import ( - "github.com/go-git/go-git/v5/plumbing/cache" - "github.com/go-git/go-git/v5/plumbing/storer" - "github.com/go-git/go-git/v5/plumbing/transport" - "github.com/go-git/go-git/v5/storage/filesystem" - - "github.com/go-git/go-billy/v5" - "github.com/go-git/go-billy/v5/osfs" -) - -// DefaultLoader is a filesystem loader ignoring host and resolving paths to /. -var DefaultLoader = NewFilesystemLoader(osfs.New("")) - -// Loader loads repository's storer.Storer based on an optional host and a path. -type Loader interface { - // Load loads a storer.Storer given a transport.Endpoint. - // Returns transport.ErrRepositoryNotFound if the repository does not - // exist. - Load(ep *transport.Endpoint) (storer.Storer, error) -} - -type fsLoader struct { - base billy.Filesystem -} - -// NewFilesystemLoader creates a Loader that ignores host and resolves paths -// with a given base filesystem. -func NewFilesystemLoader(base billy.Filesystem) Loader { - return &fsLoader{base} -} - -// Load looks up the endpoint's path in the base file system and returns a -// storer for it. Returns transport.ErrRepositoryNotFound if a repository does -// not exist in the given path. -func (l *fsLoader) Load(ep *transport.Endpoint) (storer.Storer, error) { - fs, err := l.base.Chroot(ep.Path) - if err != nil { - return nil, err - } - - if _, err := fs.Stat("config"); err != nil { - return nil, transport.ErrRepositoryNotFound - } - - return filesystem.NewStorage(fs, cache.NewObjectLRUDefault()), nil -} - -// MapLoader is a Loader that uses a lookup map of storer.Storer by -// transport.Endpoint. -type MapLoader map[string]storer.Storer - -// Load returns a storer.Storer for given a transport.Endpoint by looking it up -// in the map. Returns transport.ErrRepositoryNotFound if the endpoint does not -// exist. -func (l MapLoader) Load(ep *transport.Endpoint) (storer.Storer, error) { - s, ok := l[ep.String()] - if !ok { - return nil, transport.ErrRepositoryNotFound - } - - return s, nil -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/transport/server/server.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/transport/server/server.go deleted file mode 100644 index 8ab70fe7072..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/transport/server/server.go +++ /dev/null @@ -1,432 +0,0 @@ -// Package server implements the git server protocol. For most use cases, the -// transport-specific implementations should be used. -package server - -import ( - "context" - "errors" - "fmt" - "io" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/format/packfile" - "github.com/go-git/go-git/v5/plumbing/protocol/packp" - "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" - "github.com/go-git/go-git/v5/plumbing/revlist" - "github.com/go-git/go-git/v5/plumbing/storer" - "github.com/go-git/go-git/v5/plumbing/transport" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -var DefaultServer = NewServer(DefaultLoader) - -type server struct { - loader Loader - handler *handler -} - -// NewServer returns a transport.Transport implementing a git server, -// independent of transport. Each transport must wrap this. -func NewServer(loader Loader) transport.Transport { - return &server{ - loader, - &handler{asClient: false}, - } -} - -// NewClient returns a transport.Transport implementing a client with an -// embedded server. -func NewClient(loader Loader) transport.Transport { - return &server{ - loader, - &handler{asClient: true}, - } -} - -func (s *server) NewUploadPackSession(ep *transport.Endpoint, auth transport.AuthMethod) (transport.UploadPackSession, error) { - sto, err := s.loader.Load(ep) - if err != nil { - return nil, err - } - - return s.handler.NewUploadPackSession(sto) -} - -func (s *server) NewReceivePackSession(ep *transport.Endpoint, auth transport.AuthMethod) (transport.ReceivePackSession, error) { - sto, err := s.loader.Load(ep) - if err != nil { - return nil, err - } - - return s.handler.NewReceivePackSession(sto) -} - -type handler struct { - asClient bool -} - -func (h *handler) NewUploadPackSession(s storer.Storer) (transport.UploadPackSession, error) { - return &upSession{ - session: session{storer: s, asClient: h.asClient}, - }, nil -} - -func (h *handler) NewReceivePackSession(s storer.Storer) (transport.ReceivePackSession, error) { - return &rpSession{ - session: session{storer: s, asClient: h.asClient}, - cmdStatus: map[plumbing.ReferenceName]error{}, - }, nil -} - -type session struct { - storer storer.Storer - caps *capability.List - asClient bool -} - -func (s *session) Close() error { - return nil -} - -func (s *session) SetAuth(transport.AuthMethod) error { - //TODO: deprecate - return nil -} - -func (s *session) checkSupportedCapabilities(cl *capability.List) error { - for _, c := range cl.All() { - if !s.caps.Supports(c) { - return fmt.Errorf("unsupported capability: %s", c) - } - } - - return nil -} - -type upSession struct { - session -} - -func (s *upSession) AdvertisedReferences() (*packp.AdvRefs, error) { - return s.AdvertisedReferencesContext(context.TODO()) -} - -func (s *upSession) AdvertisedReferencesContext(ctx context.Context) (*packp.AdvRefs, error) { - ar := packp.NewAdvRefs() - - if err := s.setSupportedCapabilities(ar.Capabilities); err != nil { - return nil, err - } - - s.caps = ar.Capabilities - - if err := setReferences(s.storer, ar); err != nil { - return nil, err - } - - if err := setHEAD(s.storer, ar); err != nil { - return nil, err - } - - if s.asClient && len(ar.References) == 0 { - return nil, transport.ErrEmptyRemoteRepository - } - - return ar, nil -} - -func (s *upSession) UploadPack(ctx context.Context, req *packp.UploadPackRequest) (*packp.UploadPackResponse, error) { - if req.IsEmpty() { - return nil, transport.ErrEmptyUploadPackRequest - } - - if err := req.Validate(); err != nil { - return nil, err - } - - if s.caps == nil { - s.caps = capability.NewList() - if err := s.setSupportedCapabilities(s.caps); err != nil { - return nil, err - } - } - - if err := s.checkSupportedCapabilities(req.Capabilities); err != nil { - return nil, err - } - - s.caps = req.Capabilities - - if len(req.Shallows) > 0 { - return nil, fmt.Errorf("shallow not supported") - } - - objs, err := s.objectsToUpload(req) - if err != nil { - return nil, err - } - - pr, pw := ioutil.Pipe() - e := packfile.NewEncoder(pw, s.storer, false) - go func() { - // TODO: plumb through a pack window. - _, err := e.Encode(objs, 10) - pw.CloseWithError(err) - }() - - return packp.NewUploadPackResponseWithPackfile(req, - ioutil.NewContextReadCloser(ctx, pr), - ), nil -} - -func (s *upSession) objectsToUpload(req *packp.UploadPackRequest) ([]plumbing.Hash, error) { - haves, err := revlist.Objects(s.storer, req.Haves, nil) - if err != nil { - return nil, err - } - - return revlist.Objects(s.storer, req.Wants, haves) -} - -func (*upSession) setSupportedCapabilities(c *capability.List) error { - if err := c.Set(capability.Agent, capability.DefaultAgent); err != nil { - return err - } - - if err := c.Set(capability.OFSDelta); err != nil { - return err - } - - return nil -} - -type rpSession struct { - session - cmdStatus map[plumbing.ReferenceName]error - firstErr error - unpackErr error -} - -func (s *rpSession) AdvertisedReferences() (*packp.AdvRefs, error) { - return s.AdvertisedReferencesContext(context.TODO()) -} - -func (s *rpSession) AdvertisedReferencesContext(ctx context.Context) (*packp.AdvRefs, error) { - ar := packp.NewAdvRefs() - - if err := s.setSupportedCapabilities(ar.Capabilities); err != nil { - return nil, err - } - - s.caps = ar.Capabilities - - if err := setReferences(s.storer, ar); err != nil { - return nil, err - } - - if err := setHEAD(s.storer, ar); err != nil { - return nil, err - } - - return ar, nil -} - -var ( - ErrUpdateReference = errors.New("failed to update ref") -) - -func (s *rpSession) ReceivePack(ctx context.Context, req *packp.ReferenceUpdateRequest) (*packp.ReportStatus, error) { - if s.caps == nil { - s.caps = capability.NewList() - if err := s.setSupportedCapabilities(s.caps); err != nil { - return nil, err - } - } - - if err := s.checkSupportedCapabilities(req.Capabilities); err != nil { - return nil, err - } - - s.caps = req.Capabilities - - //TODO: Implement 'atomic' update of references. - - if req.Packfile != nil { - r := ioutil.NewContextReadCloser(ctx, req.Packfile) - if err := s.writePackfile(r); err != nil { - s.unpackErr = err - s.firstErr = err - return s.reportStatus(), err - } - } - - s.updateReferences(req) - return s.reportStatus(), s.firstErr -} - -func (s *rpSession) updateReferences(req *packp.ReferenceUpdateRequest) { - for _, cmd := range req.Commands { - exists, err := referenceExists(s.storer, cmd.Name) - if err != nil { - s.setStatus(cmd.Name, err) - continue - } - - switch cmd.Action() { - case packp.Create: - if exists { - s.setStatus(cmd.Name, ErrUpdateReference) - continue - } - - ref := plumbing.NewHashReference(cmd.Name, cmd.New) - err := s.storer.SetReference(ref) - s.setStatus(cmd.Name, err) - case packp.Delete: - if !exists { - s.setStatus(cmd.Name, ErrUpdateReference) - continue - } - - err := s.storer.RemoveReference(cmd.Name) - s.setStatus(cmd.Name, err) - case packp.Update: - if !exists { - s.setStatus(cmd.Name, ErrUpdateReference) - continue - } - - ref := plumbing.NewHashReference(cmd.Name, cmd.New) - err := s.storer.SetReference(ref) - s.setStatus(cmd.Name, err) - } - } -} - -func (s *rpSession) writePackfile(r io.ReadCloser) error { - if r == nil { - return nil - } - - if err := packfile.UpdateObjectStorage(s.storer, r); err != nil { - _ = r.Close() - return err - } - - return r.Close() -} - -func (s *rpSession) setStatus(ref plumbing.ReferenceName, err error) { - s.cmdStatus[ref] = err - if s.firstErr == nil && err != nil { - s.firstErr = err - } -} - -func (s *rpSession) reportStatus() *packp.ReportStatus { - if !s.caps.Supports(capability.ReportStatus) { - return nil - } - - rs := packp.NewReportStatus() - rs.UnpackStatus = "ok" - - if s.unpackErr != nil { - rs.UnpackStatus = s.unpackErr.Error() - } - - if s.cmdStatus == nil { - return rs - } - - for ref, err := range s.cmdStatus { - msg := "ok" - if err != nil { - msg = err.Error() - } - status := &packp.CommandStatus{ - ReferenceName: ref, - Status: msg, - } - rs.CommandStatuses = append(rs.CommandStatuses, status) - } - - return rs -} - -func (*rpSession) setSupportedCapabilities(c *capability.List) error { - if err := c.Set(capability.Agent, capability.DefaultAgent); err != nil { - return err - } - - if err := c.Set(capability.OFSDelta); err != nil { - return err - } - - if err := c.Set(capability.DeleteRefs); err != nil { - return err - } - - return c.Set(capability.ReportStatus) -} - -func setHEAD(s storer.Storer, ar *packp.AdvRefs) error { - ref, err := s.Reference(plumbing.HEAD) - if err == plumbing.ErrReferenceNotFound { - return nil - } - - if err != nil { - return err - } - - if ref.Type() == plumbing.SymbolicReference { - if err := ar.AddReference(ref); err != nil { - return nil - } - - ref, err = storer.ResolveReference(s, ref.Target()) - if err == plumbing.ErrReferenceNotFound { - return nil - } - - if err != nil { - return err - } - } - - if ref.Type() != plumbing.HashReference { - return plumbing.ErrInvalidType - } - - h := ref.Hash() - ar.Head = &h - - return nil -} - -func setReferences(s storer.Storer, ar *packp.AdvRefs) error { - //TODO: add peeled references. - iter, err := s.IterReferences() - if err != nil { - return err - } - - return iter.ForEach(func(ref *plumbing.Reference) error { - if ref.Type() != plumbing.HashReference { - return nil - } - - ar.References[ref.Name().String()] = ref.Hash() - return nil - }) -} - -func referenceExists(s storer.ReferenceStorer, n plumbing.ReferenceName) (bool, error) { - _, err := s.Reference(n) - if err == plumbing.ErrReferenceNotFound { - return false, nil - } - - return err == nil, err -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/auth_method.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/auth_method.go deleted file mode 100644 index 35146695462..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/auth_method.go +++ /dev/null @@ -1,308 +0,0 @@ -package ssh - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "os/user" - "path/filepath" - - "github.com/go-git/go-git/v5/plumbing/transport" - - "github.com/mitchellh/go-homedir" - sshagent "github.com/xanzy/ssh-agent" - "golang.org/x/crypto/ssh" - "golang.org/x/crypto/ssh/knownhosts" -) - -const DefaultUsername = "git" - -// AuthMethod is the interface all auth methods for the ssh client -// must implement. The clientConfig method returns the ssh client -// configuration needed to establish an ssh connection. -type AuthMethod interface { - transport.AuthMethod - // ClientConfig should return a valid ssh.ClientConfig to be used to create - // a connection to the SSH server. - ClientConfig() (*ssh.ClientConfig, error) -} - -// The names of the AuthMethod implementations. To be returned by the -// Name() method. Most git servers only allow PublicKeysName and -// PublicKeysCallbackName. -const ( - KeyboardInteractiveName = "ssh-keyboard-interactive" - PasswordName = "ssh-password" - PasswordCallbackName = "ssh-password-callback" - PublicKeysName = "ssh-public-keys" - PublicKeysCallbackName = "ssh-public-key-callback" -) - -// KeyboardInteractive implements AuthMethod by using a -// prompt/response sequence controlled by the server. -type KeyboardInteractive struct { - User string - Challenge ssh.KeyboardInteractiveChallenge - HostKeyCallbackHelper -} - -func (a *KeyboardInteractive) Name() string { - return KeyboardInteractiveName -} - -func (a *KeyboardInteractive) String() string { - return fmt.Sprintf("user: %s, name: %s", a.User, a.Name()) -} - -func (a *KeyboardInteractive) ClientConfig() (*ssh.ClientConfig, error) { - return a.SetHostKeyCallback(&ssh.ClientConfig{ - User: a.User, - Auth: []ssh.AuthMethod{ - a.Challenge, - }, - }) -} - -// Password implements AuthMethod by using the given password. -type Password struct { - User string - Password string - HostKeyCallbackHelper -} - -func (a *Password) Name() string { - return PasswordName -} - -func (a *Password) String() string { - return fmt.Sprintf("user: %s, name: %s", a.User, a.Name()) -} - -func (a *Password) ClientConfig() (*ssh.ClientConfig, error) { - return a.SetHostKeyCallback(&ssh.ClientConfig{ - User: a.User, - Auth: []ssh.AuthMethod{ssh.Password(a.Password)}, - }) -} - -// PasswordCallback implements AuthMethod by using a callback -// to fetch the password. -type PasswordCallback struct { - User string - Callback func() (pass string, err error) - HostKeyCallbackHelper -} - -func (a *PasswordCallback) Name() string { - return PasswordCallbackName -} - -func (a *PasswordCallback) String() string { - return fmt.Sprintf("user: %s, name: %s", a.User, a.Name()) -} - -func (a *PasswordCallback) ClientConfig() (*ssh.ClientConfig, error) { - return a.SetHostKeyCallback(&ssh.ClientConfig{ - User: a.User, - Auth: []ssh.AuthMethod{ssh.PasswordCallback(a.Callback)}, - }) -} - -// PublicKeys implements AuthMethod by using the given key pairs. -type PublicKeys struct { - User string - Signer ssh.Signer - HostKeyCallbackHelper -} - -// NewPublicKeys returns a PublicKeys from a PEM encoded private key. An -// encryption password should be given if the pemBytes contains a password -// encrypted PEM block otherwise password should be empty. It supports RSA -// (PKCS#1), PKCS#8, DSA (OpenSSL), and ECDSA private keys. -func NewPublicKeys(user string, pemBytes []byte, password string) (*PublicKeys, error) { - signer, err := ssh.ParsePrivateKey(pemBytes) - if _, ok := err.(*ssh.PassphraseMissingError); ok { - signer, err = ssh.ParsePrivateKeyWithPassphrase(pemBytes, []byte(password)) - } - if err != nil { - return nil, err - } - return &PublicKeys{User: user, Signer: signer}, nil -} - -// NewPublicKeysFromFile returns a PublicKeys from a file containing a PEM -// encoded private key. An encryption password should be given if the pemBytes -// contains a password encrypted PEM block otherwise password should be empty. -func NewPublicKeysFromFile(user, pemFile, password string) (*PublicKeys, error) { - bytes, err := ioutil.ReadFile(pemFile) - if err != nil { - return nil, err - } - - return NewPublicKeys(user, bytes, password) -} - -func (a *PublicKeys) Name() string { - return PublicKeysName -} - -func (a *PublicKeys) String() string { - return fmt.Sprintf("user: %s, name: %s", a.User, a.Name()) -} - -func (a *PublicKeys) ClientConfig() (*ssh.ClientConfig, error) { - return a.SetHostKeyCallback(&ssh.ClientConfig{ - User: a.User, - Auth: []ssh.AuthMethod{ssh.PublicKeys(a.Signer)}, - }) -} - -func username() (string, error) { - var username string - if user, err := user.Current(); err == nil { - username = user.Username - } else { - username = os.Getenv("USER") - } - - if username == "" { - return "", errors.New("failed to get username") - } - - return username, nil -} - -// PublicKeysCallback implements AuthMethod by asking a -// ssh.agent.Agent to act as a signer. -type PublicKeysCallback struct { - User string - Callback func() (signers []ssh.Signer, err error) - HostKeyCallbackHelper -} - -// NewSSHAgentAuth returns a PublicKeysCallback based on a SSH agent, it opens -// a pipe with the SSH agent and uses the pipe as the implementer of the public -// key callback function. -func NewSSHAgentAuth(u string) (*PublicKeysCallback, error) { - var err error - if u == "" { - u, err = username() - if err != nil { - return nil, err - } - } - - a, _, err := sshagent.New() - if err != nil { - return nil, fmt.Errorf("error creating SSH agent: %q", err) - } - - return &PublicKeysCallback{ - User: u, - Callback: a.Signers, - }, nil -} - -func (a *PublicKeysCallback) Name() string { - return PublicKeysCallbackName -} - -func (a *PublicKeysCallback) String() string { - return fmt.Sprintf("user: %s, name: %s", a.User, a.Name()) -} - -func (a *PublicKeysCallback) ClientConfig() (*ssh.ClientConfig, error) { - return a.SetHostKeyCallback(&ssh.ClientConfig{ - User: a.User, - Auth: []ssh.AuthMethod{ssh.PublicKeysCallback(a.Callback)}, - }) -} - -// NewKnownHostsCallback returns ssh.HostKeyCallback based on a file based on a -// known_hosts file. http://man.openbsd.org/sshd#SSH_KNOWN_HOSTS_FILE_FORMAT -// -// If list of files is empty, then it will be read from the SSH_KNOWN_HOSTS -// environment variable, example: -// /home/foo/custom_known_hosts_file:/etc/custom_known/hosts_file -// -// If SSH_KNOWN_HOSTS is not set the following file locations will be used: -// ~/.ssh/known_hosts -// /etc/ssh/ssh_known_hosts -func NewKnownHostsCallback(files ...string) (ssh.HostKeyCallback, error) { - var err error - - if len(files) == 0 { - if files, err = getDefaultKnownHostsFiles(); err != nil { - return nil, err - } - } - - if files, err = filterKnownHostsFiles(files...); err != nil { - return nil, err - } - - return knownhosts.New(files...) -} - -func getDefaultKnownHostsFiles() ([]string, error) { - files := filepath.SplitList(os.Getenv("SSH_KNOWN_HOSTS")) - if len(files) != 0 { - return files, nil - } - - homeDirPath, err := homedir.Dir() - if err != nil { - return nil, err - } - - return []string{ - filepath.Join(homeDirPath, "/.ssh/known_hosts"), - "/etc/ssh/ssh_known_hosts", - }, nil -} - -func filterKnownHostsFiles(files ...string) ([]string, error) { - var out []string - for _, file := range files { - _, err := os.Stat(file) - if err == nil { - out = append(out, file) - continue - } - - if !os.IsNotExist(err) { - return nil, err - } - } - - if len(out) == 0 { - return nil, fmt.Errorf("unable to find any valid known_hosts file, set SSH_KNOWN_HOSTS env variable") - } - - return out, nil -} - -// HostKeyCallbackHelper is a helper that provides common functionality to -// configure HostKeyCallback into a ssh.ClientConfig. -type HostKeyCallbackHelper struct { - // HostKeyCallback is the function type used for verifying server keys. - // If nil default callback will be create using NewKnownHostsCallback - // without argument. - HostKeyCallback ssh.HostKeyCallback -} - -// SetHostKeyCallback sets the field HostKeyCallback in the given cfg. If -// HostKeyCallback is empty a default callback is created using -// NewKnownHostsCallback. -func (m *HostKeyCallbackHelper) SetHostKeyCallback(cfg *ssh.ClientConfig) (*ssh.ClientConfig, error) { - var err error - if m.HostKeyCallback == nil { - if m.HostKeyCallback, err = NewKnownHostsCallback(); err != nil { - return cfg, err - } - } - - cfg.HostKeyCallback = m.HostKeyCallback - return cfg, nil -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/common.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/common.go deleted file mode 100644 index 46e79134c33..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/common.go +++ /dev/null @@ -1,235 +0,0 @@ -// Package ssh implements the SSH transport protocol. -package ssh - -import ( - "context" - "fmt" - "reflect" - "strconv" - "strings" - - "github.com/go-git/go-git/v5/plumbing/transport" - "github.com/go-git/go-git/v5/plumbing/transport/internal/common" - - "github.com/kevinburke/ssh_config" - "golang.org/x/crypto/ssh" - "golang.org/x/net/proxy" -) - -// DefaultClient is the default SSH client. -var DefaultClient = NewClient(nil) - -// DefaultSSHConfig is the reader used to access parameters stored in the -// system's ssh_config files. If nil all the ssh_config are ignored. -var DefaultSSHConfig sshConfig = ssh_config.DefaultUserSettings - -type sshConfig interface { - Get(alias, key string) string -} - -// NewClient creates a new SSH client with an optional *ssh.ClientConfig. -func NewClient(config *ssh.ClientConfig) transport.Transport { - return common.NewClient(&runner{config: config}) -} - -// DefaultAuthBuilder is the function used to create a default AuthMethod, when -// the user doesn't provide any. -var DefaultAuthBuilder = func(user string) (AuthMethod, error) { - return NewSSHAgentAuth(user) -} - -const DefaultPort = 22 - -type runner struct { - config *ssh.ClientConfig -} - -func (r *runner) Command(cmd string, ep *transport.Endpoint, auth transport.AuthMethod) (common.Command, error) { - c := &command{command: cmd, endpoint: ep, config: r.config} - if auth != nil { - c.setAuth(auth) - } - - if err := c.connect(); err != nil { - return nil, err - } - return c, nil -} - -type command struct { - *ssh.Session - connected bool - command string - endpoint *transport.Endpoint - client *ssh.Client - auth AuthMethod - config *ssh.ClientConfig -} - -func (c *command) setAuth(auth transport.AuthMethod) error { - a, ok := auth.(AuthMethod) - if !ok { - return transport.ErrInvalidAuthMethod - } - - c.auth = a - return nil -} - -func (c *command) Start() error { - return c.Session.Start(endpointToCommand(c.command, c.endpoint)) -} - -// Close closes the SSH session and connection. -func (c *command) Close() error { - if !c.connected { - return nil - } - - c.connected = false - - //XXX: If did read the full packfile, then the session might be already - // closed. - _ = c.Session.Close() - err := c.client.Close() - - //XXX: in go1.16+ we can use errors.Is(err, net.ErrClosed) - if err != nil && strings.HasSuffix(err.Error(), "use of closed network connection") { - return nil - } - - return err -} - -// connect connects to the SSH server, unless a AuthMethod was set with -// SetAuth method, by default uses an auth method based on PublicKeysCallback, -// it connects to a SSH agent, using the address stored in the SSH_AUTH_SOCK -// environment var. -func (c *command) connect() error { - if c.connected { - return transport.ErrAlreadyConnected - } - - if c.auth == nil { - if err := c.setAuthFromEndpoint(); err != nil { - return err - } - } - - var err error - config, err := c.auth.ClientConfig() - if err != nil { - return err - } - - overrideConfig(c.config, config) - - c.client, err = dial("tcp", c.getHostWithPort(), config) - if err != nil { - return err - } - - c.Session, err = c.client.NewSession() - if err != nil { - _ = c.client.Close() - return err - } - - c.connected = true - return nil -} - -func dial(network, addr string, config *ssh.ClientConfig) (*ssh.Client, error) { - var ( - ctx = context.Background() - cancel context.CancelFunc - ) - if config.Timeout > 0 { - ctx, cancel = context.WithTimeout(ctx, config.Timeout) - } else { - ctx, cancel = context.WithCancel(ctx) - } - defer cancel() - - conn, err := proxy.Dial(ctx, network, addr) - if err != nil { - return nil, err - } - c, chans, reqs, err := ssh.NewClientConn(conn, addr, config) - if err != nil { - return nil, err - } - return ssh.NewClient(c, chans, reqs), nil -} - -func (c *command) getHostWithPort() string { - if addr, found := c.doGetHostWithPortFromSSHConfig(); found { - return addr - } - - host := c.endpoint.Host - port := c.endpoint.Port - if port <= 0 { - port = DefaultPort - } - - return fmt.Sprintf("%s:%d", host, port) -} - -func (c *command) doGetHostWithPortFromSSHConfig() (addr string, found bool) { - if DefaultSSHConfig == nil { - return - } - - host := c.endpoint.Host - port := c.endpoint.Port - - configHost := DefaultSSHConfig.Get(c.endpoint.Host, "Hostname") - if configHost != "" { - host = configHost - found = true - } - - if !found { - return - } - - configPort := DefaultSSHConfig.Get(c.endpoint.Host, "Port") - if configPort != "" { - if i, err := strconv.Atoi(configPort); err == nil { - port = i - } - } - - addr = fmt.Sprintf("%s:%d", host, port) - return -} - -func (c *command) setAuthFromEndpoint() error { - var err error - c.auth, err = DefaultAuthBuilder(c.endpoint.User) - return err -} - -func endpointToCommand(cmd string, ep *transport.Endpoint) string { - return fmt.Sprintf("%s '%s'", cmd, ep.Path) -} - -func overrideConfig(overrides *ssh.ClientConfig, c *ssh.ClientConfig) { - if overrides == nil { - return - } - - t := reflect.TypeOf(*c) - vc := reflect.ValueOf(c).Elem() - vo := reflect.ValueOf(overrides).Elem() - - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - vcf := vc.FieldByName(f.Name) - vof := vo.FieldByName(f.Name) - vcf.Set(vof) - } - - *c = vc.Interface().(ssh.ClientConfig) -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/prune.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/prune.go deleted file mode 100644 index cc5907a1429..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/prune.go +++ /dev/null @@ -1,66 +0,0 @@ -package git - -import ( - "errors" - "time" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/storer" -) - -type PruneHandler func(unreferencedObjectHash plumbing.Hash) error -type PruneOptions struct { - // OnlyObjectsOlderThan if set to non-zero value - // selects only objects older than the time provided. - OnlyObjectsOlderThan time.Time - // Handler is called on matching objects - Handler PruneHandler -} - -var ErrLooseObjectsNotSupported = errors.New("Loose objects not supported") - -// DeleteObject deletes an object from a repository. -// The type conveniently matches PruneHandler. -func (r *Repository) DeleteObject(hash plumbing.Hash) error { - los, ok := r.Storer.(storer.LooseObjectStorer) - if !ok { - return ErrLooseObjectsNotSupported - } - - return los.DeleteLooseObject(hash) -} - -func (r *Repository) Prune(opt PruneOptions) error { - los, ok := r.Storer.(storer.LooseObjectStorer) - if !ok { - return ErrLooseObjectsNotSupported - } - - pw := newObjectWalker(r.Storer) - err := pw.walkAllRefs() - if err != nil { - return err - } - // Now walk all (loose) objects in storage. - return los.ForEachObjectHash(func(hash plumbing.Hash) error { - // Get out if we have seen this object. - if pw.isSeen(hash) { - return nil - } - // Otherwise it is a candidate for pruning. - // Check out for too new objects next. - if !opt.OnlyObjectsOlderThan.IsZero() { - // Errors here are non-fatal. The object may be e.g. packed. - // Or concurrently deleted. Skip such objects. - t, err := los.LooseObjectTime(hash) - if err != nil { - return nil - } - // Skip too new objects. - if !t.Before(opt.OnlyObjectsOlderThan) { - return nil - } - } - return opt.Handler(hash) - }) -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/references.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/references.go deleted file mode 100644 index 6d96035afab..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/references.go +++ /dev/null @@ -1,264 +0,0 @@ -package git - -import ( - "io" - "sort" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/object" - "github.com/go-git/go-git/v5/utils/diff" - - "github.com/sergi/go-diff/diffmatchpatch" -) - -// References returns a slice of Commits for the file at "path", starting from -// the commit provided that contains the file from the provided path. The last -// commit into the returned slice is the commit where the file was created. -// If the provided commit does not contains the specified path, a nil slice is -// returned. The commits are sorted in commit order, newer to older. -// -// Caveats: -// -// - Moves and copies are not currently supported. -// -// - Cherry-picks are not detected unless there are no commits between them and -// therefore can appear repeated in the list. (see git path-id for hints on how -// to fix this). -func references(c *object.Commit, path string) ([]*object.Commit, error) { - var result []*object.Commit - seen := make(map[plumbing.Hash]struct{}) - if err := walkGraph(&result, &seen, c, path); err != nil { - return nil, err - } - - // TODO result should be returned without ordering - sortCommits(result) - - // for merges of identical cherry-picks - return removeComp(path, result, equivalent) -} - -type commitSorterer struct { - l []*object.Commit -} - -func (s commitSorterer) Len() int { - return len(s.l) -} - -func (s commitSorterer) Less(i, j int) bool { - return s.l[i].Committer.When.Before(s.l[j].Committer.When) || - s.l[i].Committer.When.Equal(s.l[j].Committer.When) && - s.l[i].Author.When.Before(s.l[j].Author.When) -} - -func (s commitSorterer) Swap(i, j int) { - s.l[i], s.l[j] = s.l[j], s.l[i] -} - -// SortCommits sorts a commit list by commit date, from older to newer. -func sortCommits(l []*object.Commit) { - s := &commitSorterer{l} - sort.Sort(s) -} - -// Recursive traversal of the commit graph, generating a linear history of the -// path. -func walkGraph(result *[]*object.Commit, seen *map[plumbing.Hash]struct{}, current *object.Commit, path string) error { - // check and update seen - if _, ok := (*seen)[current.Hash]; ok { - return nil - } - (*seen)[current.Hash] = struct{}{} - - // if the path is not in the current commit, stop searching. - if _, err := current.File(path); err != nil { - return nil - } - - // optimization: don't traverse branches that does not - // contain the path. - parents, err := parentsContainingPath(path, current) - if err != nil { - return err - } - switch len(parents) { - // if the path is not found in any of its parents, the path was - // created by this commit; we must add it to the revisions list and - // stop searching. This includes the case when current is the - // initial commit. - case 0: - *result = append(*result, current) - return nil - case 1: // only one parent contains the path - // if the file contents has change, add the current commit - different, err := differentContents(path, current, parents) - if err != nil { - return err - } - if len(different) == 1 { - *result = append(*result, current) - } - // in any case, walk the parent - return walkGraph(result, seen, parents[0], path) - default: // more than one parent contains the path - // TODO: detect merges that had a conflict, because they must be - // included in the result here. - for _, p := range parents { - err := walkGraph(result, seen, p, path) - if err != nil { - return err - } - } - } - return nil -} - -func parentsContainingPath(path string, c *object.Commit) ([]*object.Commit, error) { - // TODO: benchmark this method making git.object.Commit.parent public instead of using - // an iterator - var result []*object.Commit - iter := c.Parents() - for { - parent, err := iter.Next() - if err == io.EOF { - return result, nil - } - if err != nil { - return nil, err - } - if _, err := parent.File(path); err == nil { - result = append(result, parent) - } - } -} - -// Returns an slice of the commits in "cs" that has the file "path", but with different -// contents than what can be found in "c". -func differentContents(path string, c *object.Commit, cs []*object.Commit) ([]*object.Commit, error) { - result := make([]*object.Commit, 0, len(cs)) - h, found := blobHash(path, c) - if !found { - return nil, object.ErrFileNotFound - } - for _, cx := range cs { - if hx, found := blobHash(path, cx); found && h != hx { - result = append(result, cx) - } - } - return result, nil -} - -// blobHash returns the hash of a path in a commit -func blobHash(path string, commit *object.Commit) (hash plumbing.Hash, found bool) { - file, err := commit.File(path) - if err != nil { - var empty plumbing.Hash - return empty, found - } - return file.Hash, true -} - -type contentsComparatorFn func(path string, a, b *object.Commit) (bool, error) - -// Returns a new slice of commits, with duplicates removed. Expects a -// sorted commit list. Duplication is defined according to "comp". It -// will always keep the first commit of a series of duplicated commits. -func removeComp(path string, cs []*object.Commit, comp contentsComparatorFn) ([]*object.Commit, error) { - result := make([]*object.Commit, 0, len(cs)) - if len(cs) == 0 { - return result, nil - } - result = append(result, cs[0]) - for i := 1; i < len(cs); i++ { - equals, err := comp(path, cs[i], cs[i-1]) - if err != nil { - return nil, err - } - if !equals { - result = append(result, cs[i]) - } - } - return result, nil -} - -// Equivalent commits are commits whose patch is the same. -func equivalent(path string, a, b *object.Commit) (bool, error) { - numParentsA := a.NumParents() - numParentsB := b.NumParents() - - // the first commit is not equivalent to anyone - // and "I think" merges can not be equivalent to anything - if numParentsA != 1 || numParentsB != 1 { - return false, nil - } - - diffsA, err := patch(a, path) - if err != nil { - return false, err - } - diffsB, err := patch(b, path) - if err != nil { - return false, err - } - - return sameDiffs(diffsA, diffsB), nil -} - -func patch(c *object.Commit, path string) ([]diffmatchpatch.Diff, error) { - // get contents of the file in the commit - file, err := c.File(path) - if err != nil { - return nil, err - } - content, err := file.Contents() - if err != nil { - return nil, err - } - - // get contents of the file in the first parent of the commit - var contentParent string - iter := c.Parents() - parent, err := iter.Next() - if err != nil { - return nil, err - } - file, err = parent.File(path) - if err != nil { - contentParent = "" - } else { - contentParent, err = file.Contents() - if err != nil { - return nil, err - } - } - - // compare the contents of parent and child - return diff.Do(content, contentParent), nil -} - -func sameDiffs(a, b []diffmatchpatch.Diff) bool { - if len(a) != len(b) { - return false - } - for i := range a { - if !sameDiff(a[i], b[i]) { - return false - } - } - return true -} - -func sameDiff(a, b diffmatchpatch.Diff) bool { - if a.Type != b.Type { - return false - } - switch a.Type { - case 0: - return countLines(a.Text) == countLines(b.Text) - case 1, -1: - return a.Text == b.Text - default: - panic("unreachable") - } -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/remote.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/remote.go deleted file mode 100644 index 4a06106c536..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/remote.go +++ /dev/null @@ -1,1262 +0,0 @@ -package git - -import ( - "context" - "errors" - "fmt" - "io" - "time" - - "github.com/go-git/go-billy/v5/osfs" - "github.com/go-git/go-git/v5/config" - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/cache" - "github.com/go-git/go-git/v5/plumbing/format/packfile" - "github.com/go-git/go-git/v5/plumbing/object" - "github.com/go-git/go-git/v5/plumbing/protocol/packp" - "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" - "github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband" - "github.com/go-git/go-git/v5/plumbing/revlist" - "github.com/go-git/go-git/v5/plumbing/storer" - "github.com/go-git/go-git/v5/plumbing/transport" - "github.com/go-git/go-git/v5/plumbing/transport/client" - "github.com/go-git/go-git/v5/storage" - "github.com/go-git/go-git/v5/storage/filesystem" - "github.com/go-git/go-git/v5/storage/memory" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -var ( - NoErrAlreadyUpToDate = errors.New("already up-to-date") - ErrDeleteRefNotSupported = errors.New("server does not support delete-refs") - ErrForceNeeded = errors.New("some refs were not updated") - ErrExactSHA1NotSupported = errors.New("server does not support exact SHA1 refspec") -) - -type NoMatchingRefSpecError struct { - refSpec config.RefSpec -} - -func (e NoMatchingRefSpecError) Error() string { - return fmt.Sprintf("couldn't find remote ref %q", e.refSpec.Src()) -} - -func (e NoMatchingRefSpecError) Is(target error) bool { - _, ok := target.(NoMatchingRefSpecError) - return ok -} - -const ( - // This describes the maximum number of commits to walk when - // computing the haves to send to a server, for each ref in the - // repo containing this remote, when not using the multi-ack - // protocol. Setting this to 0 means there is no limit. - maxHavesToVisitPerRef = 100 -) - -// Remote represents a connection to a remote repository. -type Remote struct { - c *config.RemoteConfig - s storage.Storer -} - -// NewRemote creates a new Remote. -// The intended purpose is to use the Remote for tasks such as listing remote references (like using git ls-remote). -// Otherwise Remotes should be created via the use of a Repository. -func NewRemote(s storage.Storer, c *config.RemoteConfig) *Remote { - return &Remote{s: s, c: c} -} - -// Config returns the RemoteConfig object used to instantiate this Remote. -func (r *Remote) Config() *config.RemoteConfig { - return r.c -} - -func (r *Remote) String() string { - var fetch, push string - if len(r.c.URLs) > 0 { - fetch = r.c.URLs[0] - push = r.c.URLs[0] - } - - return fmt.Sprintf("%s\t%s (fetch)\n%[1]s\t%[3]s (push)", r.c.Name, fetch, push) -} - -// Push performs a push to the remote. Returns NoErrAlreadyUpToDate if the -// remote was already up-to-date. -func (r *Remote) Push(o *PushOptions) error { - return r.PushContext(context.Background(), o) -} - -// PushContext performs a push to the remote. Returns NoErrAlreadyUpToDate if -// the remote was already up-to-date. -// -// The provided Context must be non-nil. If the context expires before the -// operation is complete, an error is returned. The context only affects the -// transport operations. -func (r *Remote) PushContext(ctx context.Context, o *PushOptions) (err error) { - if err := o.Validate(); err != nil { - return err - } - - if o.RemoteName != r.c.Name { - return fmt.Errorf("remote names don't match: %s != %s", o.RemoteName, r.c.Name) - } - - s, err := newSendPackSession(r.c.URLs[0], o.Auth, o.InsecureSkipTLS, o.CABundle) - if err != nil { - return err - } - - defer ioutil.CheckClose(s, &err) - - ar, err := s.AdvertisedReferencesContext(ctx) - if err != nil { - return err - } - - remoteRefs, err := ar.AllReferences() - if err != nil { - return err - } - - if err := r.checkRequireRemoteRefs(o.RequireRemoteRefs, remoteRefs); err != nil { - return err - } - - isDelete := false - allDelete := true - for _, rs := range o.RefSpecs { - if rs.IsDelete() { - isDelete = true - } else { - allDelete = false - } - if isDelete && !allDelete { - break - } - } - - if isDelete && !ar.Capabilities.Supports(capability.DeleteRefs) { - return ErrDeleteRefNotSupported - } - - if o.Force { - for i := 0; i < len(o.RefSpecs); i++ { - rs := &o.RefSpecs[i] - if !rs.IsForceUpdate() && !rs.IsDelete() { - o.RefSpecs[i] = config.RefSpec("+" + rs.String()) - } - } - } - - localRefs, err := r.references() - if err != nil { - return err - } - - req, err := r.newReferenceUpdateRequest(o, localRefs, remoteRefs, ar) - if err != nil { - return err - } - - if len(req.Commands) == 0 { - return NoErrAlreadyUpToDate - } - - objects := objectsToPush(req.Commands) - - haves, err := referencesToHashes(remoteRefs) - if err != nil { - return err - } - - stop, err := r.s.Shallow() - if err != nil { - return err - } - - // if we have shallow we should include this as part of the objects that - // we are aware. - haves = append(haves, stop...) - - var hashesToPush []plumbing.Hash - // Avoid the expensive revlist operation if we're only doing deletes. - if !allDelete { - if r.c.IsFirstURLLocal() { - // If we're are pushing to a local repo, it might be much - // faster to use a local storage layer to get the commits - // to ignore, when calculating the object revlist. - localStorer := filesystem.NewStorage( - osfs.New(r.c.URLs[0]), cache.NewObjectLRUDefault()) - hashesToPush, err = revlist.ObjectsWithStorageForIgnores( - r.s, localStorer, objects, haves) - } else { - hashesToPush, err = revlist.Objects(r.s, objects, haves) - } - if err != nil { - return err - } - } - - if len(hashesToPush) == 0 { - allDelete = true - for _, command := range req.Commands { - if command.Action() != packp.Delete { - allDelete = false - break - } - } - } - - rs, err := pushHashes(ctx, s, r.s, req, hashesToPush, r.useRefDeltas(ar), allDelete) - if err != nil { - return err - } - - if err = rs.Error(); err != nil { - return err - } - - return r.updateRemoteReferenceStorage(req, rs) -} - -func (r *Remote) useRefDeltas(ar *packp.AdvRefs) bool { - return !ar.Capabilities.Supports(capability.OFSDelta) -} - -func (r *Remote) newReferenceUpdateRequest( - o *PushOptions, - localRefs []*plumbing.Reference, - remoteRefs storer.ReferenceStorer, - ar *packp.AdvRefs, -) (*packp.ReferenceUpdateRequest, error) { - req := packp.NewReferenceUpdateRequestFromCapabilities(ar.Capabilities) - - if o.Progress != nil { - req.Progress = o.Progress - if ar.Capabilities.Supports(capability.Sideband64k) { - _ = req.Capabilities.Set(capability.Sideband64k) - } else if ar.Capabilities.Supports(capability.Sideband) { - _ = req.Capabilities.Set(capability.Sideband) - } - } - - if err := r.addReferencesToUpdate(o.RefSpecs, localRefs, remoteRefs, req, o.Prune); err != nil { - return nil, err - } - - return req, nil -} - -func (r *Remote) updateRemoteReferenceStorage( - req *packp.ReferenceUpdateRequest, - result *packp.ReportStatus, -) error { - - for _, spec := range r.c.Fetch { - for _, c := range req.Commands { - if !spec.Match(c.Name) { - continue - } - - local := spec.Dst(c.Name) - ref := plumbing.NewHashReference(local, c.New) - switch c.Action() { - case packp.Create, packp.Update: - if err := r.s.SetReference(ref); err != nil { - return err - } - case packp.Delete: - if err := r.s.RemoveReference(local); err != nil { - return err - } - } - } - } - - return nil -} - -// FetchContext fetches references along with the objects necessary to complete -// their histories. -// -// Returns nil if the operation is successful, NoErrAlreadyUpToDate if there are -// no changes to be fetched, or an error. -// -// The provided Context must be non-nil. If the context expires before the -// operation is complete, an error is returned. The context only affects the -// transport operations. -func (r *Remote) FetchContext(ctx context.Context, o *FetchOptions) error { - _, err := r.fetch(ctx, o) - return err -} - -// Fetch fetches references along with the objects necessary to complete their -// histories. -// -// Returns nil if the operation is successful, NoErrAlreadyUpToDate if there are -// no changes to be fetched, or an error. -func (r *Remote) Fetch(o *FetchOptions) error { - return r.FetchContext(context.Background(), o) -} - -func (r *Remote) fetch(ctx context.Context, o *FetchOptions) (sto storer.ReferenceStorer, err error) { - if o.RemoteName == "" { - o.RemoteName = r.c.Name - } - - if err = o.Validate(); err != nil { - return nil, err - } - - if len(o.RefSpecs) == 0 { - o.RefSpecs = r.c.Fetch - } - - s, err := newUploadPackSession(r.c.URLs[0], o.Auth, o.InsecureSkipTLS, o.CABundle) - if err != nil { - return nil, err - } - - defer ioutil.CheckClose(s, &err) - - ar, err := s.AdvertisedReferencesContext(ctx) - if err != nil { - return nil, err - } - - req, err := r.newUploadPackRequest(o, ar) - if err != nil { - return nil, err - } - - if err := r.isSupportedRefSpec(o.RefSpecs, ar); err != nil { - return nil, err - } - - remoteRefs, err := ar.AllReferences() - if err != nil { - return nil, err - } - - localRefs, err := r.references() - if err != nil { - return nil, err - } - - refs, err := calculateRefs(o.RefSpecs, remoteRefs, o.Tags) - if err != nil { - return nil, err - } - - if !req.Depth.IsZero() { - req.Shallows, err = r.s.Shallow() - if err != nil { - return nil, fmt.Errorf("existing checkout is not shallow") - } - } - - req.Wants, err = getWants(r.s, refs) - if len(req.Wants) > 0 { - req.Haves, err = getHaves(localRefs, remoteRefs, r.s) - if err != nil { - return nil, err - } - - if err = r.fetchPack(ctx, o, s, req); err != nil { - return nil, err - } - } - - updated, err := r.updateLocalReferenceStorage(o.RefSpecs, refs, remoteRefs, o.Tags, o.Force) - if err != nil { - return nil, err - } - - if !updated { - updated, err = depthChanged(req.Shallows, r.s) - if err != nil { - return nil, fmt.Errorf("error checking depth change: %v", err) - } - } - - if !updated { - return remoteRefs, NoErrAlreadyUpToDate - } - - return remoteRefs, nil -} - -func depthChanged(before []plumbing.Hash, s storage.Storer) (bool, error) { - after, err := s.Shallow() - if err != nil { - return false, err - } - - if len(before) != len(after) { - return true, nil - } - - bm := make(map[plumbing.Hash]bool, len(before)) - for _, b := range before { - bm[b] = true - } - for _, a := range after { - if _, ok := bm[a]; !ok { - return true, nil - } - } - - return false, nil -} - -func newUploadPackSession(url string, auth transport.AuthMethod, insecure bool, cabundle []byte) (transport.UploadPackSession, error) { - c, ep, err := newClient(url, auth, insecure, cabundle) - if err != nil { - return nil, err - } - - return c.NewUploadPackSession(ep, auth) -} - -func newSendPackSession(url string, auth transport.AuthMethod, insecure bool, cabundle []byte) (transport.ReceivePackSession, error) { - c, ep, err := newClient(url, auth, insecure, cabundle) - if err != nil { - return nil, err - } - - return c.NewReceivePackSession(ep, auth) -} - -func newClient(url string, auth transport.AuthMethod, insecure bool, cabundle []byte) (transport.Transport, *transport.Endpoint, error) { - ep, err := transport.NewEndpoint(url) - if err != nil { - return nil, nil, err - } - ep.InsecureSkipTLS = insecure - ep.CaBundle = cabundle - - c, err := client.NewClient(ep) - if err != nil { - return nil, nil, err - } - - return c, ep, err -} - -func (r *Remote) fetchPack(ctx context.Context, o *FetchOptions, s transport.UploadPackSession, - req *packp.UploadPackRequest) (err error) { - - reader, err := s.UploadPack(ctx, req) - if err != nil { - return err - } - - defer ioutil.CheckClose(reader, &err) - - if err = r.updateShallow(o, reader); err != nil { - return err - } - - if err = packfile.UpdateObjectStorage(r.s, - buildSidebandIfSupported(req.Capabilities, reader, o.Progress), - ); err != nil { - return err - } - - return err -} - -func (r *Remote) addReferencesToUpdate( - refspecs []config.RefSpec, - localRefs []*plumbing.Reference, - remoteRefs storer.ReferenceStorer, - req *packp.ReferenceUpdateRequest, - prune bool, -) error { - // This references dictionary will be used to search references by name. - refsDict := make(map[string]*plumbing.Reference) - for _, ref := range localRefs { - refsDict[ref.Name().String()] = ref - } - - for _, rs := range refspecs { - if rs.IsDelete() { - if err := r.deleteReferences(rs, remoteRefs, refsDict, req, false); err != nil { - return err - } - } else { - err := r.addOrUpdateReferences(rs, localRefs, refsDict, remoteRefs, req) - if err != nil { - return err - } - - if prune { - if err := r.deleteReferences(rs, remoteRefs, refsDict, req, true); err != nil { - return err - } - } - } - } - - return nil -} - -func (r *Remote) addOrUpdateReferences( - rs config.RefSpec, - localRefs []*plumbing.Reference, - refsDict map[string]*plumbing.Reference, - remoteRefs storer.ReferenceStorer, - req *packp.ReferenceUpdateRequest, -) error { - // If it is not a wilcard refspec we can directly search for the reference - // in the references dictionary. - if !rs.IsWildcard() { - ref, ok := refsDict[rs.Src()] - if !ok { - return nil - } - - return r.addReferenceIfRefSpecMatches(rs, remoteRefs, ref, req) - } - - for _, ref := range localRefs { - err := r.addReferenceIfRefSpecMatches(rs, remoteRefs, ref, req) - if err != nil { - return err - } - } - - return nil -} - -func (r *Remote) deleteReferences(rs config.RefSpec, - remoteRefs storer.ReferenceStorer, - refsDict map[string]*plumbing.Reference, - req *packp.ReferenceUpdateRequest, - prune bool) error { - iter, err := remoteRefs.IterReferences() - if err != nil { - return err - } - - return iter.ForEach(func(ref *plumbing.Reference) error { - if ref.Type() != plumbing.HashReference { - return nil - } - - if prune { - rs := rs.Reverse() - if !rs.Match(ref.Name()) { - return nil - } - - if _, ok := refsDict[rs.Dst(ref.Name()).String()]; ok { - return nil - } - } else if rs.Dst("") != ref.Name() { - return nil - } - - cmd := &packp.Command{ - Name: ref.Name(), - Old: ref.Hash(), - New: plumbing.ZeroHash, - } - req.Commands = append(req.Commands, cmd) - return nil - }) -} - -func (r *Remote) addReferenceIfRefSpecMatches(rs config.RefSpec, - remoteRefs storer.ReferenceStorer, localRef *plumbing.Reference, - req *packp.ReferenceUpdateRequest) error { - - if localRef.Type() != plumbing.HashReference { - return nil - } - - if !rs.Match(localRef.Name()) { - return nil - } - - cmd := &packp.Command{ - Name: rs.Dst(localRef.Name()), - Old: plumbing.ZeroHash, - New: localRef.Hash(), - } - - remoteRef, err := remoteRefs.Reference(cmd.Name) - if err == nil { - if remoteRef.Type() != plumbing.HashReference { - //TODO: check actual git behavior here - return nil - } - - cmd.Old = remoteRef.Hash() - } else if err != plumbing.ErrReferenceNotFound { - return err - } - - if cmd.Old == cmd.New { - return nil - } - - if !rs.IsForceUpdate() { - if err := checkFastForwardUpdate(r.s, remoteRefs, cmd); err != nil { - return err - } - } - - req.Commands = append(req.Commands, cmd) - return nil -} - -func (r *Remote) references() ([]*plumbing.Reference, error) { - var localRefs []*plumbing.Reference - - iter, err := r.s.IterReferences() - if err != nil { - return nil, err - } - - for { - ref, err := iter.Next() - if err == io.EOF { - break - } - - if err != nil { - return nil, err - } - - localRefs = append(localRefs, ref) - } - - return localRefs, nil -} - -func getRemoteRefsFromStorer(remoteRefStorer storer.ReferenceStorer) ( - map[plumbing.Hash]bool, error) { - remoteRefs := map[plumbing.Hash]bool{} - iter, err := remoteRefStorer.IterReferences() - if err != nil { - return nil, err - } - err = iter.ForEach(func(ref *plumbing.Reference) error { - if ref.Type() != plumbing.HashReference { - return nil - } - remoteRefs[ref.Hash()] = true - return nil - }) - if err != nil { - return nil, err - } - return remoteRefs, nil -} - -// getHavesFromRef populates the given `haves` map with the given -// reference, and up to `maxHavesToVisitPerRef` ancestor commits. -func getHavesFromRef( - ref *plumbing.Reference, - remoteRefs map[plumbing.Hash]bool, - s storage.Storer, - haves map[plumbing.Hash]bool, -) error { - h := ref.Hash() - if haves[h] { - return nil - } - - // No need to load the commit if we know the remote already - // has this hash. - if remoteRefs[h] { - haves[h] = true - return nil - } - - commit, err := object.GetCommit(s, h) - if err != nil { - // Ignore the error if this isn't a commit. - haves[ref.Hash()] = true - return nil - } - - // Until go-git supports proper commit negotiation during an - // upload pack request, include up to `maxHavesToVisitPerRef` - // commits from the history of each ref. - walker := object.NewCommitPreorderIter(commit, haves, nil) - toVisit := maxHavesToVisitPerRef - return walker.ForEach(func(c *object.Commit) error { - haves[c.Hash] = true - toVisit-- - // If toVisit starts out at 0 (indicating there is no - // max), then it will be negative here and we won't stop - // early. - if toVisit == 0 || remoteRefs[c.Hash] { - return storer.ErrStop - } - return nil - }) -} - -func getHaves( - localRefs []*plumbing.Reference, - remoteRefStorer storer.ReferenceStorer, - s storage.Storer, -) ([]plumbing.Hash, error) { - haves := map[plumbing.Hash]bool{} - - // Build a map of all the remote references, to avoid loading too - // many parent commits for references we know don't need to be - // transferred. - remoteRefs, err := getRemoteRefsFromStorer(remoteRefStorer) - if err != nil { - return nil, err - } - - for _, ref := range localRefs { - if haves[ref.Hash()] { - continue - } - - if ref.Type() != plumbing.HashReference { - continue - } - - err = getHavesFromRef(ref, remoteRefs, s, haves) - if err != nil { - return nil, err - } - } - - var result []plumbing.Hash - for h := range haves { - result = append(result, h) - } - - return result, nil -} - -const refspecAllTags = "+refs/tags/*:refs/tags/*" - -func calculateRefs( - spec []config.RefSpec, - remoteRefs storer.ReferenceStorer, - tagMode TagMode, -) (memory.ReferenceStorage, error) { - if tagMode == AllTags { - spec = append(spec, refspecAllTags) - } - - refs := make(memory.ReferenceStorage) - for _, s := range spec { - if err := doCalculateRefs(s, remoteRefs, refs); err != nil { - return nil, err - } - } - - return refs, nil -} - -func doCalculateRefs( - s config.RefSpec, - remoteRefs storer.ReferenceStorer, - refs memory.ReferenceStorage, -) error { - iter, err := remoteRefs.IterReferences() - if err != nil { - return err - } - - if s.IsExactSHA1() { - ref := plumbing.NewHashReference(s.Dst(""), plumbing.NewHash(s.Src())) - return refs.SetReference(ref) - } - - var matched bool - err = iter.ForEach(func(ref *plumbing.Reference) error { - if !s.Match(ref.Name()) { - return nil - } - - if ref.Type() == plumbing.SymbolicReference { - target, err := storer.ResolveReference(remoteRefs, ref.Name()) - if err != nil { - return err - } - - ref = plumbing.NewHashReference(ref.Name(), target.Hash()) - } - - if ref.Type() != plumbing.HashReference { - return nil - } - - matched = true - if err := refs.SetReference(ref); err != nil { - return err - } - - if !s.IsWildcard() { - return storer.ErrStop - } - - return nil - }) - - if !matched && !s.IsWildcard() { - return NoMatchingRefSpecError{refSpec: s} - } - - return err -} - -func getWants(localStorer storage.Storer, refs memory.ReferenceStorage) ([]plumbing.Hash, error) { - shallow := false - if s, _ := localStorer.Shallow(); len(s) > 0 { - shallow = true - } - - wants := map[plumbing.Hash]bool{} - for _, ref := range refs { - hash := ref.Hash() - exists, err := objectExists(localStorer, ref.Hash()) - if err != nil { - return nil, err - } - - if !exists || shallow { - wants[hash] = true - } - } - - var result []plumbing.Hash - for h := range wants { - result = append(result, h) - } - - return result, nil -} - -func objectExists(s storer.EncodedObjectStorer, h plumbing.Hash) (bool, error) { - _, err := s.EncodedObject(plumbing.AnyObject, h) - if err == plumbing.ErrObjectNotFound { - return false, nil - } - - return true, err -} - -func checkFastForwardUpdate(s storer.EncodedObjectStorer, remoteRefs storer.ReferenceStorer, cmd *packp.Command) error { - if cmd.Old == plumbing.ZeroHash { - _, err := remoteRefs.Reference(cmd.Name) - if err == plumbing.ErrReferenceNotFound { - return nil - } - - if err != nil { - return err - } - - return fmt.Errorf("non-fast-forward update: %s", cmd.Name.String()) - } - - ff, err := isFastForward(s, cmd.Old, cmd.New) - if err != nil { - return err - } - - if !ff { - return fmt.Errorf("non-fast-forward update: %s", cmd.Name.String()) - } - - return nil -} - -func isFastForward(s storer.EncodedObjectStorer, old, new plumbing.Hash) (bool, error) { - c, err := object.GetCommit(s, new) - if err != nil { - return false, err - } - - found := false - iter := object.NewCommitPreorderIter(c, nil, nil) - err = iter.ForEach(func(c *object.Commit) error { - if c.Hash != old { - return nil - } - - found = true - return storer.ErrStop - }) - return found, err -} - -func (r *Remote) newUploadPackRequest(o *FetchOptions, - ar *packp.AdvRefs) (*packp.UploadPackRequest, error) { - - req := packp.NewUploadPackRequestFromCapabilities(ar.Capabilities) - - if o.Depth != 0 { - req.Depth = packp.DepthCommits(o.Depth) - if err := req.Capabilities.Set(capability.Shallow); err != nil { - return nil, err - } - } - - if o.Progress == nil && ar.Capabilities.Supports(capability.NoProgress) { - if err := req.Capabilities.Set(capability.NoProgress); err != nil { - return nil, err - } - } - - isWildcard := true - for _, s := range o.RefSpecs { - if !s.IsWildcard() { - isWildcard = false - break - } - } - - if isWildcard && o.Tags == TagFollowing && ar.Capabilities.Supports(capability.IncludeTag) { - if err := req.Capabilities.Set(capability.IncludeTag); err != nil { - return nil, err - } - } - - return req, nil -} - -func (r *Remote) isSupportedRefSpec(refs []config.RefSpec, ar *packp.AdvRefs) error { - var containsIsExact bool - for _, ref := range refs { - if ref.IsExactSHA1() { - containsIsExact = true - } - } - - if !containsIsExact { - return nil - } - - if ar.Capabilities.Supports(capability.AllowReachableSHA1InWant) || - ar.Capabilities.Supports(capability.AllowTipSHA1InWant) { - return nil - } - - return ErrExactSHA1NotSupported -} - -func buildSidebandIfSupported(l *capability.List, reader io.Reader, p sideband.Progress) io.Reader { - var t sideband.Type - - switch { - case l.Supports(capability.Sideband): - t = sideband.Sideband - case l.Supports(capability.Sideband64k): - t = sideband.Sideband64k - default: - return reader - } - - d := sideband.NewDemuxer(t, reader) - d.Progress = p - - return d -} - -func (r *Remote) updateLocalReferenceStorage( - specs []config.RefSpec, - fetchedRefs, remoteRefs memory.ReferenceStorage, - tagMode TagMode, - force bool, -) (updated bool, err error) { - isWildcard := true - forceNeeded := false - - for _, spec := range specs { - if !spec.IsWildcard() { - isWildcard = false - } - - for _, ref := range fetchedRefs { - if !spec.Match(ref.Name()) && !spec.IsExactSHA1() { - continue - } - - if ref.Type() != plumbing.HashReference { - continue - } - - localName := spec.Dst(ref.Name()) - old, _ := storer.ResolveReference(r.s, localName) - new := plumbing.NewHashReference(localName, ref.Hash()) - - // If the ref exists locally as a branch and force is not specified, - // only update if the new ref is an ancestor of the old - if old != nil && old.Name().IsBranch() && !force && !spec.IsForceUpdate() { - ff, err := isFastForward(r.s, old.Hash(), new.Hash()) - if err != nil { - return updated, err - } - - if !ff { - forceNeeded = true - continue - } - } - - refUpdated, err := checkAndUpdateReferenceStorerIfNeeded(r.s, new, old) - if err != nil { - return updated, err - } - - if refUpdated { - updated = true - } - } - } - - if tagMode == NoTags { - return updated, nil - } - - tags := fetchedRefs - if isWildcard { - tags = remoteRefs - } - tagUpdated, err := r.buildFetchedTags(tags) - if err != nil { - return updated, err - } - - if tagUpdated { - updated = true - } - - if forceNeeded { - err = ErrForceNeeded - } - - return -} - -func (r *Remote) buildFetchedTags(refs memory.ReferenceStorage) (updated bool, err error) { - for _, ref := range refs { - if !ref.Name().IsTag() { - continue - } - - _, err := r.s.EncodedObject(plumbing.AnyObject, ref.Hash()) - if err == plumbing.ErrObjectNotFound { - continue - } - - if err != nil { - return false, err - } - - refUpdated, err := updateReferenceStorerIfNeeded(r.s, ref) - if err != nil { - return updated, err - } - - if refUpdated { - updated = true - } - } - - return -} - -// List the references on the remote repository. -// The provided Context must be non-nil. If the context expires before the -// operation is complete, an error is returned. The context only affects to the -// transport operations. -func (r *Remote) ListContext(ctx context.Context, o *ListOptions) (rfs []*plumbing.Reference, err error) { - refs, err := r.list(ctx, o) - if err != nil { - return refs, err - } - return refs, nil -} - -func (r *Remote) List(o *ListOptions) (rfs []*plumbing.Reference, err error) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - return r.ListContext(ctx, o) -} - -func (r *Remote) list(ctx context.Context, o *ListOptions) (rfs []*plumbing.Reference, err error) { - s, err := newUploadPackSession(r.c.URLs[0], o.Auth, o.InsecureSkipTLS, o.CABundle) - if err != nil { - return nil, err - } - - defer ioutil.CheckClose(s, &err) - - ar, err := s.AdvertisedReferencesContext(ctx) - if err != nil { - return nil, err - } - - allRefs, err := ar.AllReferences() - if err != nil { - return nil, err - } - - refs, err := allRefs.IterReferences() - if err != nil { - return nil, err - } - - var resultRefs []*plumbing.Reference - err = refs.ForEach(func(ref *plumbing.Reference) error { - resultRefs = append(resultRefs, ref) - return nil - }) - if err != nil { - return nil, err - } - return resultRefs, nil -} - -func objectsToPush(commands []*packp.Command) []plumbing.Hash { - objects := make([]plumbing.Hash, 0, len(commands)) - for _, cmd := range commands { - if cmd.New == plumbing.ZeroHash { - continue - } - objects = append(objects, cmd.New) - } - return objects -} - -func referencesToHashes(refs storer.ReferenceStorer) ([]plumbing.Hash, error) { - iter, err := refs.IterReferences() - if err != nil { - return nil, err - } - - var hs []plumbing.Hash - err = iter.ForEach(func(ref *plumbing.Reference) error { - if ref.Type() != plumbing.HashReference { - return nil - } - - hs = append(hs, ref.Hash()) - return nil - }) - if err != nil { - return nil, err - } - - return hs, nil -} - -func pushHashes( - ctx context.Context, - sess transport.ReceivePackSession, - s storage.Storer, - req *packp.ReferenceUpdateRequest, - hs []plumbing.Hash, - useRefDeltas bool, - allDelete bool, -) (*packp.ReportStatus, error) { - - rd, wr := ioutil.Pipe() - - config, err := s.Config() - if err != nil { - return nil, err - } - - // Set buffer size to 1 so the error message can be written when - // ReceivePack fails. Otherwise the goroutine will be blocked writing - // to the channel. - done := make(chan error, 1) - - if !allDelete { - req.Packfile = rd - go func() { - e := packfile.NewEncoder(wr, s, useRefDeltas) - if _, err := e.Encode(hs, config.Pack.Window); err != nil { - done <- wr.CloseWithError(err) - return - } - - done <- wr.Close() - }() - } else { - close(done) - } - - rs, err := sess.ReceivePack(ctx, req) - if err != nil { - // close the pipe to unlock encode write - _ = rd.Close() - return nil, err - } - - if err := <-done; err != nil { - return nil, err - } - - return rs, nil -} - -func (r *Remote) updateShallow(o *FetchOptions, resp *packp.UploadPackResponse) error { - if o.Depth == 0 || len(resp.Shallows) == 0 { - return nil - } - - shallows, err := r.s.Shallow() - if err != nil { - return err - } - -outer: - for _, s := range resp.Shallows { - for _, oldS := range shallows { - if s == oldS { - continue outer - } - } - shallows = append(shallows, s) - } - - return r.s.SetShallow(shallows) -} - -func (r *Remote) checkRequireRemoteRefs(requires []config.RefSpec, remoteRefs storer.ReferenceStorer) error { - for _, require := range requires { - if require.IsWildcard() { - return fmt.Errorf("wildcards not supported in RequireRemoteRefs, got %s", require.String()) - } - - name := require.Dst("") - remote, err := remoteRefs.Reference(name) - if err != nil { - return fmt.Errorf("remote ref %s required to be %s but is absent", name.String(), require.Src()) - } - - var requireHash string - if require.IsExactSHA1() { - requireHash = require.Src() - } else { - target, err := storer.ResolveReference(remoteRefs, plumbing.ReferenceName(require.Src())) - if err != nil { - return fmt.Errorf("could not resolve ref %s in RequireRemoteRefs", require.Src()) - } - requireHash = target.Hash().String() - } - - if remote.Hash().String() != requireHash { - return fmt.Errorf("remote ref %s required to be %s but is %s", name.String(), requireHash, remote.Hash().String()) - } - } - return nil -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/repository.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/repository.go deleted file mode 100644 index d3fbf97593e..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/repository.go +++ /dev/null @@ -1,1722 +0,0 @@ -package git - -import ( - "bytes" - "context" - "encoding/hex" - "errors" - "fmt" - stdioutil "io/ioutil" - "os" - "path" - "path/filepath" - "strings" - "time" - - "github.com/ProtonMail/go-crypto/openpgp" - "github.com/go-git/go-billy/v5" - "github.com/go-git/go-billy/v5/osfs" - "github.com/go-git/go-billy/v5/util" - "github.com/go-git/go-git/v5/config" - "github.com/go-git/go-git/v5/internal/revision" - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/cache" - "github.com/go-git/go-git/v5/plumbing/format/packfile" - "github.com/go-git/go-git/v5/plumbing/object" - "github.com/go-git/go-git/v5/plumbing/storer" - "github.com/go-git/go-git/v5/storage" - "github.com/go-git/go-git/v5/storage/filesystem" - "github.com/go-git/go-git/v5/storage/filesystem/dotgit" - "github.com/go-git/go-git/v5/utils/ioutil" - "github.com/imdario/mergo" -) - -// GitDirName this is a special folder where all the git stuff is. -const GitDirName = ".git" - -var ( - // ErrBranchExists an error stating the specified branch already exists - ErrBranchExists = errors.New("branch already exists") - // ErrBranchNotFound an error stating the specified branch does not exist - ErrBranchNotFound = errors.New("branch not found") - // ErrTagExists an error stating the specified tag already exists - ErrTagExists = errors.New("tag already exists") - // ErrTagNotFound an error stating the specified tag does not exist - ErrTagNotFound = errors.New("tag not found") - // ErrFetching is returned when the packfile could not be downloaded - ErrFetching = errors.New("unable to fetch packfile") - - ErrInvalidReference = errors.New("invalid reference, should be a tag or a branch") - ErrRepositoryNotExists = errors.New("repository does not exist") - ErrRepositoryIncomplete = errors.New("repository's commondir path does not exist") - ErrRepositoryAlreadyExists = errors.New("repository already exists") - ErrRemoteNotFound = errors.New("remote not found") - ErrRemoteExists = errors.New("remote already exists") - ErrAnonymousRemoteName = errors.New("anonymous remote name must be 'anonymous'") - ErrWorktreeNotProvided = errors.New("worktree should be provided") - ErrIsBareRepository = errors.New("worktree not available in a bare repository") - ErrUnableToResolveCommit = errors.New("unable to resolve commit") - ErrPackedObjectsNotSupported = errors.New("Packed objects not supported") -) - -// Repository represents a git repository -type Repository struct { - Storer storage.Storer - - r map[string]*Remote - wt billy.Filesystem -} - -// Init creates an empty git repository, based on the given Storer and worktree. -// The worktree Filesystem is optional, if nil a bare repository is created. If -// the given storer is not empty ErrRepositoryAlreadyExists is returned -func Init(s storage.Storer, worktree billy.Filesystem) (*Repository, error) { - if err := initStorer(s); err != nil { - return nil, err - } - - r := newRepository(s, worktree) - _, err := r.Reference(plumbing.HEAD, false) - switch err { - case plumbing.ErrReferenceNotFound: - case nil: - return nil, ErrRepositoryAlreadyExists - default: - return nil, err - } - - h := plumbing.NewSymbolicReference(plumbing.HEAD, plumbing.Master) - if err := s.SetReference(h); err != nil { - return nil, err - } - - if worktree == nil { - _ = r.setIsBare(true) - return r, nil - } - - return r, setWorktreeAndStoragePaths(r, worktree) -} - -func initStorer(s storer.Storer) error { - i, ok := s.(storer.Initializer) - if !ok { - return nil - } - - return i.Init() -} - -func setWorktreeAndStoragePaths(r *Repository, worktree billy.Filesystem) error { - type fsBased interface { - Filesystem() billy.Filesystem - } - - // .git file is only created if the storage is file based and the file - // system is osfs.OS - fs, isFSBased := r.Storer.(fsBased) - if !isFSBased { - return nil - } - - if err := createDotGitFile(worktree, fs.Filesystem()); err != nil { - return err - } - - return setConfigWorktree(r, worktree, fs.Filesystem()) -} - -func createDotGitFile(worktree, storage billy.Filesystem) error { - path, err := filepath.Rel(worktree.Root(), storage.Root()) - if err != nil { - path = storage.Root() - } - - if path == GitDirName { - // not needed, since the folder is the default place - return nil - } - - f, err := worktree.Create(GitDirName) - if err != nil { - return err - } - - defer f.Close() - _, err = fmt.Fprintf(f, "gitdir: %s\n", path) - return err -} - -func setConfigWorktree(r *Repository, worktree, storage billy.Filesystem) error { - path, err := filepath.Rel(storage.Root(), worktree.Root()) - if err != nil { - path = worktree.Root() - } - - if path == ".." { - // not needed, since the folder is the default place - return nil - } - - cfg, err := r.Config() - if err != nil { - return err - } - - cfg.Core.Worktree = path - return r.Storer.SetConfig(cfg) -} - -// Open opens a git repository using the given Storer and worktree filesystem, -// if the given storer is complete empty ErrRepositoryNotExists is returned. -// The worktree can be nil when the repository being opened is bare, if the -// repository is a normal one (not bare) and worktree is nil the err -// ErrWorktreeNotProvided is returned -func Open(s storage.Storer, worktree billy.Filesystem) (*Repository, error) { - _, err := s.Reference(plumbing.HEAD) - if err == plumbing.ErrReferenceNotFound { - return nil, ErrRepositoryNotExists - } - - if err != nil { - return nil, err - } - - return newRepository(s, worktree), nil -} - -// Clone a repository into the given Storer and worktree Filesystem with the -// given options, if worktree is nil a bare repository is created. If the given -// storer is not empty ErrRepositoryAlreadyExists is returned. -func Clone(s storage.Storer, worktree billy.Filesystem, o *CloneOptions) (*Repository, error) { - return CloneContext(context.Background(), s, worktree, o) -} - -// CloneContext a repository into the given Storer and worktree Filesystem with -// the given options, if worktree is nil a bare repository is created. If the -// given storer is not empty ErrRepositoryAlreadyExists is returned. -// -// The provided Context must be non-nil. If the context expires before the -// operation is complete, an error is returned. The context only affects the -// transport operations. -func CloneContext( - ctx context.Context, s storage.Storer, worktree billy.Filesystem, o *CloneOptions, -) (*Repository, error) { - r, err := Init(s, worktree) - if err != nil { - return nil, err - } - - return r, r.clone(ctx, o) -} - -// PlainInit create an empty git repository at the given path. isBare defines -// if the repository will have worktree (non-bare) or not (bare), if the path -// is not empty ErrRepositoryAlreadyExists is returned. -func PlainInit(path string, isBare bool) (*Repository, error) { - var wt, dot billy.Filesystem - - if isBare { - dot = osfs.New(path) - } else { - wt = osfs.New(path) - dot, _ = wt.Chroot(GitDirName) - } - - s := filesystem.NewStorage(dot, cache.NewObjectLRUDefault()) - - return Init(s, wt) -} - -// PlainOpen opens a git repository from the given path. It detects if the -// repository is bare or a normal one. If the path doesn't contain a valid -// repository ErrRepositoryNotExists is returned -func PlainOpen(path string) (*Repository, error) { - return PlainOpenWithOptions(path, &PlainOpenOptions{}) -} - -// PlainOpenWithOptions opens a git repository from the given path with specific -// options. See PlainOpen for more info. -func PlainOpenWithOptions(path string, o *PlainOpenOptions) (*Repository, error) { - dot, wt, err := dotGitToOSFilesystems(path, o.DetectDotGit) - if err != nil { - return nil, err - } - - if _, err := dot.Stat(""); err != nil { - if os.IsNotExist(err) { - return nil, ErrRepositoryNotExists - } - - return nil, err - } - - var repositoryFs billy.Filesystem - - if o.EnableDotGitCommonDir { - dotGitCommon, err := dotGitCommonDirectory(dot) - if err != nil { - return nil, err - } - repositoryFs = dotgit.NewRepositoryFilesystem(dot, dotGitCommon) - } else { - repositoryFs = dot - } - - s := filesystem.NewStorage(repositoryFs, cache.NewObjectLRUDefault()) - - return Open(s, wt) -} - -func dotGitToOSFilesystems(path string, detect bool) (dot, wt billy.Filesystem, err error) { - if path, err = filepath.Abs(path); err != nil { - return nil, nil, err - } - - var fs billy.Filesystem - var fi os.FileInfo - for { - fs = osfs.New(path) - - pathinfo, err := fs.Stat("/") - if !os.IsNotExist(err) { - if !pathinfo.IsDir() && detect { - fs = osfs.New(filepath.Dir(path)) - } - } - - fi, err = fs.Stat(GitDirName) - if err == nil { - // no error; stop - break - } - if !os.IsNotExist(err) { - // unknown error; stop - return nil, nil, err - } - if detect { - // try its parent as long as we haven't reached - // the root dir - if dir := filepath.Dir(path); dir != path { - path = dir - continue - } - } - // not detecting via parent dirs and the dir does not exist; - // stop - return fs, nil, nil - } - - if fi.IsDir() { - dot, err = fs.Chroot(GitDirName) - return dot, fs, err - } - - dot, err = dotGitFileToOSFilesystem(path, fs) - if err != nil { - return nil, nil, err - } - - return dot, fs, nil -} - -func dotGitFileToOSFilesystem(path string, fs billy.Filesystem) (bfs billy.Filesystem, err error) { - f, err := fs.Open(GitDirName) - if err != nil { - return nil, err - } - defer ioutil.CheckClose(f, &err) - - b, err := stdioutil.ReadAll(f) - if err != nil { - return nil, err - } - - line := string(b) - const prefix = "gitdir: " - if !strings.HasPrefix(line, prefix) { - return nil, fmt.Errorf(".git file has no %s prefix", prefix) - } - - gitdir := strings.Split(line[len(prefix):], "\n")[0] - gitdir = strings.TrimSpace(gitdir) - if filepath.IsAbs(gitdir) { - return osfs.New(gitdir), nil - } - - return osfs.New(fs.Join(path, gitdir)), nil -} - -func dotGitCommonDirectory(fs billy.Filesystem) (commonDir billy.Filesystem, err error) { - f, err := fs.Open("commondir") - if os.IsNotExist(err) { - return nil, nil - } - if err != nil { - return nil, err - } - - b, err := stdioutil.ReadAll(f) - if err != nil { - return nil, err - } - if len(b) > 0 { - path := strings.TrimSpace(string(b)) - if filepath.IsAbs(path) { - commonDir = osfs.New(path) - } else { - commonDir = osfs.New(filepath.Join(fs.Root(), path)) - } - if _, err := commonDir.Stat(""); err != nil { - if os.IsNotExist(err) { - return nil, ErrRepositoryIncomplete - } - - return nil, err - } - } - - return commonDir, nil -} - -// PlainClone a repository into the path with the given options, isBare defines -// if the new repository will be bare or normal. If the path is not empty -// ErrRepositoryAlreadyExists is returned. -// -// TODO(mcuadros): move isBare to CloneOptions in v5 -func PlainClone(path string, isBare bool, o *CloneOptions) (*Repository, error) { - return PlainCloneContext(context.Background(), path, isBare, o) -} - -// PlainCloneContext a repository into the path with the given options, isBare -// defines if the new repository will be bare or normal. If the path is not empty -// ErrRepositoryAlreadyExists is returned. -// -// The provided Context must be non-nil. If the context expires before the -// operation is complete, an error is returned. The context only affects the -// transport operations. -// -// TODO(mcuadros): move isBare to CloneOptions in v5 -// TODO(smola): refuse upfront to clone on a non-empty directory in v5, see #1027 -func PlainCloneContext(ctx context.Context, path string, isBare bool, o *CloneOptions) (*Repository, error) { - cleanup, cleanupParent, err := checkIfCleanupIsNeeded(path) - if err != nil { - return nil, err - } - - r, err := PlainInit(path, isBare) - if err != nil { - return nil, err - } - - err = r.clone(ctx, o) - if err != nil && err != ErrRepositoryAlreadyExists { - if cleanup { - _ = cleanUpDir(path, cleanupParent) - } - } - - return r, err -} - -func newRepository(s storage.Storer, worktree billy.Filesystem) *Repository { - return &Repository{ - Storer: s, - wt: worktree, - r: make(map[string]*Remote), - } -} - -func checkIfCleanupIsNeeded(path string) (cleanup bool, cleanParent bool, err error) { - fi, err := osfs.Default.Stat(path) - if err != nil { - if os.IsNotExist(err) { - return true, true, nil - } - - return false, false, err - } - - if !fi.IsDir() { - return false, false, fmt.Errorf("path is not a directory: %s", path) - } - - files, err := osfs.Default.ReadDir(path) - if err != nil { - return false, false, err - } - - if len(files) == 0 { - return true, false, nil - } - - return false, false, nil -} - -func cleanUpDir(path string, all bool) error { - if all { - return util.RemoveAll(osfs.Default, path) - } - - files, err := osfs.Default.ReadDir(path) - if err != nil { - return err - } - - for _, fi := range files { - if err := util.RemoveAll(osfs.Default, osfs.Default.Join(path, fi.Name())); err != nil { - return err - } - } - - return err -} - -// Config return the repository config. In a filesystem backed repository this -// means read the `.git/config`. -func (r *Repository) Config() (*config.Config, error) { - return r.Storer.Config() -} - -// SetConfig marshall and writes the repository config. In a filesystem backed -// repository this means write the `.git/config`. This function should be called -// with the result of `Repository.Config` and never with the output of -// `Repository.ConfigScoped`. -func (r *Repository) SetConfig(cfg *config.Config) error { - return r.Storer.SetConfig(cfg) -} - -// ConfigScoped returns the repository config, merged with requested scope and -// lower. For example if, config.GlobalScope is given the local and global config -// are returned merged in one config value. -func (r *Repository) ConfigScoped(scope config.Scope) (*config.Config, error) { - // TODO(mcuadros): v6, add this as ConfigOptions.Scoped - - var err error - system := config.NewConfig() - if scope >= config.SystemScope { - system, err = config.LoadConfig(config.SystemScope) - if err != nil { - return nil, err - } - } - - global := config.NewConfig() - if scope >= config.GlobalScope { - global, err = config.LoadConfig(config.GlobalScope) - if err != nil { - return nil, err - } - } - - local, err := r.Storer.Config() - if err != nil { - return nil, err - } - - _ = mergo.Merge(global, system) - _ = mergo.Merge(local, global) - return local, nil -} - -// Remote return a remote if exists -func (r *Repository) Remote(name string) (*Remote, error) { - cfg, err := r.Config() - if err != nil { - return nil, err - } - - c, ok := cfg.Remotes[name] - if !ok { - return nil, ErrRemoteNotFound - } - - return NewRemote(r.Storer, c), nil -} - -// Remotes returns a list with all the remotes -func (r *Repository) Remotes() ([]*Remote, error) { - cfg, err := r.Config() - if err != nil { - return nil, err - } - - remotes := make([]*Remote, len(cfg.Remotes)) - - var i int - for _, c := range cfg.Remotes { - remotes[i] = NewRemote(r.Storer, c) - i++ - } - - return remotes, nil -} - -// CreateRemote creates a new remote -func (r *Repository) CreateRemote(c *config.RemoteConfig) (*Remote, error) { - if err := c.Validate(); err != nil { - return nil, err - } - - remote := NewRemote(r.Storer, c) - - cfg, err := r.Config() - if err != nil { - return nil, err - } - - if _, ok := cfg.Remotes[c.Name]; ok { - return nil, ErrRemoteExists - } - - cfg.Remotes[c.Name] = c - return remote, r.Storer.SetConfig(cfg) -} - -// CreateRemoteAnonymous creates a new anonymous remote. c.Name must be "anonymous". -// It's used like 'git fetch git@github.com:src-d/go-git.git master:master'. -func (r *Repository) CreateRemoteAnonymous(c *config.RemoteConfig) (*Remote, error) { - if err := c.Validate(); err != nil { - return nil, err - } - - if c.Name != "anonymous" { - return nil, ErrAnonymousRemoteName - } - - remote := NewRemote(r.Storer, c) - - return remote, nil -} - -// DeleteRemote delete a remote from the repository and delete the config -func (r *Repository) DeleteRemote(name string) error { - cfg, err := r.Config() - if err != nil { - return err - } - - if _, ok := cfg.Remotes[name]; !ok { - return ErrRemoteNotFound - } - - delete(cfg.Remotes, name) - return r.Storer.SetConfig(cfg) -} - -// Branch return a Branch if exists -func (r *Repository) Branch(name string) (*config.Branch, error) { - cfg, err := r.Config() - if err != nil { - return nil, err - } - - b, ok := cfg.Branches[name] - if !ok { - return nil, ErrBranchNotFound - } - - return b, nil -} - -// CreateBranch creates a new Branch -func (r *Repository) CreateBranch(c *config.Branch) error { - if err := c.Validate(); err != nil { - return err - } - - cfg, err := r.Config() - if err != nil { - return err - } - - if _, ok := cfg.Branches[c.Name]; ok { - return ErrBranchExists - } - - cfg.Branches[c.Name] = c - return r.Storer.SetConfig(cfg) -} - -// DeleteBranch delete a Branch from the repository and delete the config -func (r *Repository) DeleteBranch(name string) error { - cfg, err := r.Config() - if err != nil { - return err - } - - if _, ok := cfg.Branches[name]; !ok { - return ErrBranchNotFound - } - - delete(cfg.Branches, name) - return r.Storer.SetConfig(cfg) -} - -// CreateTag creates a tag. If opts is included, the tag is an annotated tag, -// otherwise a lightweight tag is created. -func (r *Repository) CreateTag(name string, hash plumbing.Hash, opts *CreateTagOptions) (*plumbing.Reference, error) { - rname := plumbing.ReferenceName(path.Join("refs", "tags", name)) - - _, err := r.Storer.Reference(rname) - switch err { - case nil: - // Tag exists, this is an error - return nil, ErrTagExists - case plumbing.ErrReferenceNotFound: - // Tag missing, available for creation, pass this - default: - // Some other error - return nil, err - } - - var target plumbing.Hash - if opts != nil { - target, err = r.createTagObject(name, hash, opts) - if err != nil { - return nil, err - } - } else { - target = hash - } - - ref := plumbing.NewHashReference(rname, target) - if err = r.Storer.SetReference(ref); err != nil { - return nil, err - } - - return ref, nil -} - -func (r *Repository) createTagObject(name string, hash plumbing.Hash, opts *CreateTagOptions) (plumbing.Hash, error) { - if err := opts.Validate(r, hash); err != nil { - return plumbing.ZeroHash, err - } - - rawobj, err := object.GetObject(r.Storer, hash) - if err != nil { - return plumbing.ZeroHash, err - } - - tag := &object.Tag{ - Name: name, - Tagger: *opts.Tagger, - Message: opts.Message, - TargetType: rawobj.Type(), - Target: hash, - } - - if opts.SignKey != nil { - sig, err := r.buildTagSignature(tag, opts.SignKey) - if err != nil { - return plumbing.ZeroHash, err - } - - tag.PGPSignature = sig - } - - obj := r.Storer.NewEncodedObject() - if err := tag.Encode(obj); err != nil { - return plumbing.ZeroHash, err - } - - return r.Storer.SetEncodedObject(obj) -} - -func (r *Repository) buildTagSignature(tag *object.Tag, signKey *openpgp.Entity) (string, error) { - encoded := &plumbing.MemoryObject{} - if err := tag.Encode(encoded); err != nil { - return "", err - } - - rdr, err := encoded.Reader() - if err != nil { - return "", err - } - - var b bytes.Buffer - if err := openpgp.ArmoredDetachSign(&b, signKey, rdr, nil); err != nil { - return "", err - } - - return b.String(), nil -} - -// Tag returns a tag from the repository. -// -// If you want to check to see if the tag is an annotated tag, you can call -// TagObject on the hash of the reference in ForEach: -// -// ref, err := r.Tag("v0.1.0") -// if err != nil { -// // Handle error -// } -// -// obj, err := r.TagObject(ref.Hash()) -// switch err { -// case nil: -// // Tag object present -// case plumbing.ErrObjectNotFound: -// // Not a tag object -// default: -// // Some other error -// } -// -func (r *Repository) Tag(name string) (*plumbing.Reference, error) { - ref, err := r.Reference(plumbing.ReferenceName(path.Join("refs", "tags", name)), false) - if err != nil { - if err == plumbing.ErrReferenceNotFound { - // Return a friendly error for this one, versus just ReferenceNotFound. - return nil, ErrTagNotFound - } - - return nil, err - } - - return ref, nil -} - -// DeleteTag deletes a tag from the repository. -func (r *Repository) DeleteTag(name string) error { - _, err := r.Tag(name) - if err != nil { - return err - } - - return r.Storer.RemoveReference(plumbing.ReferenceName(path.Join("refs", "tags", name))) -} - -func (r *Repository) resolveToCommitHash(h plumbing.Hash) (plumbing.Hash, error) { - obj, err := r.Storer.EncodedObject(plumbing.AnyObject, h) - if err != nil { - return plumbing.ZeroHash, err - } - switch obj.Type() { - case plumbing.TagObject: - t, err := object.DecodeTag(r.Storer, obj) - if err != nil { - return plumbing.ZeroHash, err - } - return r.resolveToCommitHash(t.Target) - case plumbing.CommitObject: - return h, nil - default: - return plumbing.ZeroHash, ErrUnableToResolveCommit - } -} - -// Clone clones a remote repository -func (r *Repository) clone(ctx context.Context, o *CloneOptions) error { - if err := o.Validate(); err != nil { - return err - } - - c := &config.RemoteConfig{ - Name: o.RemoteName, - URLs: []string{o.URL}, - Fetch: r.cloneRefSpec(o), - } - - if _, err := r.CreateRemote(c); err != nil { - return err - } - - ref, err := r.fetchAndUpdateReferences(ctx, &FetchOptions{ - RefSpecs: c.Fetch, - Depth: o.Depth, - Auth: o.Auth, - Progress: o.Progress, - Tags: o.Tags, - RemoteName: o.RemoteName, - InsecureSkipTLS: o.InsecureSkipTLS, - CABundle: o.CABundle, - }, o.ReferenceName) - if err != nil { - return err - } - - if r.wt != nil && !o.NoCheckout { - w, err := r.Worktree() - if err != nil { - return err - } - - head, err := r.Head() - if err != nil { - return err - } - - if err := w.Reset(&ResetOptions{ - Mode: MergeReset, - Commit: head.Hash(), - }); err != nil { - return err - } - - if o.RecurseSubmodules != NoRecurseSubmodules { - if err := w.updateSubmodules(&SubmoduleUpdateOptions{ - RecurseSubmodules: o.RecurseSubmodules, - Auth: o.Auth, - }); err != nil { - return err - } - } - } - - if err := r.updateRemoteConfigIfNeeded(o, c, ref); err != nil { - return err - } - - if ref.Name().IsBranch() { - branchRef := ref.Name() - branchName := strings.Split(string(branchRef), "refs/heads/")[1] - - b := &config.Branch{ - Name: branchName, - Merge: branchRef, - } - - if o.RemoteName == "" { - b.Remote = "origin" - } else { - b.Remote = o.RemoteName - } - - if err := r.CreateBranch(b); err != nil { - return err - } - } - - return nil -} - -const ( - refspecTag = "+refs/tags/%s:refs/tags/%[1]s" - refspecSingleBranch = "+refs/heads/%s:refs/remotes/%s/%[1]s" - refspecSingleBranchHEAD = "+HEAD:refs/remotes/%s/HEAD" -) - -func (r *Repository) cloneRefSpec(o *CloneOptions) []config.RefSpec { - switch { - case o.ReferenceName.IsTag(): - return []config.RefSpec{ - config.RefSpec(fmt.Sprintf(refspecTag, o.ReferenceName.Short())), - } - case o.SingleBranch && o.ReferenceName == plumbing.HEAD: - return []config.RefSpec{ - config.RefSpec(fmt.Sprintf(refspecSingleBranchHEAD, o.RemoteName)), - config.RefSpec(fmt.Sprintf(refspecSingleBranch, plumbing.Master.Short(), o.RemoteName)), - } - case o.SingleBranch: - return []config.RefSpec{ - config.RefSpec(fmt.Sprintf(refspecSingleBranch, o.ReferenceName.Short(), o.RemoteName)), - } - default: - return []config.RefSpec{ - config.RefSpec(fmt.Sprintf(config.DefaultFetchRefSpec, o.RemoteName)), - } - } -} - -func (r *Repository) setIsBare(isBare bool) error { - cfg, err := r.Config() - if err != nil { - return err - } - - cfg.Core.IsBare = isBare - return r.Storer.SetConfig(cfg) -} - -func (r *Repository) updateRemoteConfigIfNeeded(o *CloneOptions, c *config.RemoteConfig, head *plumbing.Reference) error { - if !o.SingleBranch { - return nil - } - - c.Fetch = r.cloneRefSpec(o) - - cfg, err := r.Config() - if err != nil { - return err - } - - cfg.Remotes[c.Name] = c - return r.Storer.SetConfig(cfg) -} - -func (r *Repository) fetchAndUpdateReferences( - ctx context.Context, o *FetchOptions, ref plumbing.ReferenceName, -) (*plumbing.Reference, error) { - - if err := o.Validate(); err != nil { - return nil, err - } - - remote, err := r.Remote(o.RemoteName) - if err != nil { - return nil, err - } - - objsUpdated := true - remoteRefs, err := remote.fetch(ctx, o) - if err == NoErrAlreadyUpToDate { - objsUpdated = false - } else if err == packfile.ErrEmptyPackfile { - return nil, ErrFetching - } else if err != nil { - return nil, err - } - - resolvedRef, err := storer.ResolveReference(remoteRefs, ref) - if err != nil { - return nil, err - } - - refsUpdated, err := r.updateReferences(remote.c.Fetch, resolvedRef) - if err != nil { - return nil, err - } - - if !objsUpdated && !refsUpdated { - return nil, NoErrAlreadyUpToDate - } - - return resolvedRef, nil -} - -func (r *Repository) updateReferences(spec []config.RefSpec, - resolvedRef *plumbing.Reference) (updated bool, err error) { - - if !resolvedRef.Name().IsBranch() { - // Detached HEAD mode - h, err := r.resolveToCommitHash(resolvedRef.Hash()) - if err != nil { - return false, err - } - head := plumbing.NewHashReference(plumbing.HEAD, h) - return updateReferenceStorerIfNeeded(r.Storer, head) - } - - refs := []*plumbing.Reference{ - // Create local reference for the resolved ref - resolvedRef, - // Create local symbolic HEAD - plumbing.NewSymbolicReference(plumbing.HEAD, resolvedRef.Name()), - } - - refs = append(refs, r.calculateRemoteHeadReference(spec, resolvedRef)...) - - for _, ref := range refs { - u, err := updateReferenceStorerIfNeeded(r.Storer, ref) - if err != nil { - return updated, err - } - - if u { - updated = true - } - } - - return -} - -func (r *Repository) calculateRemoteHeadReference(spec []config.RefSpec, - resolvedHead *plumbing.Reference) []*plumbing.Reference { - - var refs []*plumbing.Reference - - // Create resolved HEAD reference with remote prefix if it does not - // exist. This is needed when using single branch and HEAD. - for _, rs := range spec { - name := resolvedHead.Name() - if !rs.Match(name) { - continue - } - - name = rs.Dst(name) - _, err := r.Storer.Reference(name) - if err == plumbing.ErrReferenceNotFound { - refs = append(refs, plumbing.NewHashReference(name, resolvedHead.Hash())) - } - } - - return refs -} - -func checkAndUpdateReferenceStorerIfNeeded( - s storer.ReferenceStorer, r, old *plumbing.Reference) ( - updated bool, err error) { - p, err := s.Reference(r.Name()) - if err != nil && err != plumbing.ErrReferenceNotFound { - return false, err - } - - // we use the string method to compare references, is the easiest way - if err == plumbing.ErrReferenceNotFound || r.String() != p.String() { - if err := s.CheckAndSetReference(r, old); err != nil { - return false, err - } - - return true, nil - } - - return false, nil -} - -func updateReferenceStorerIfNeeded( - s storer.ReferenceStorer, r *plumbing.Reference) (updated bool, err error) { - return checkAndUpdateReferenceStorerIfNeeded(s, r, nil) -} - -// Fetch fetches references along with the objects necessary to complete -// their histories, from the remote named as FetchOptions.RemoteName. -// -// Returns nil if the operation is successful, NoErrAlreadyUpToDate if there are -// no changes to be fetched, or an error. -func (r *Repository) Fetch(o *FetchOptions) error { - return r.FetchContext(context.Background(), o) -} - -// FetchContext fetches references along with the objects necessary to complete -// their histories, from the remote named as FetchOptions.RemoteName. -// -// Returns nil if the operation is successful, NoErrAlreadyUpToDate if there are -// no changes to be fetched, or an error. -// -// The provided Context must be non-nil. If the context expires before the -// operation is complete, an error is returned. The context only affects the -// transport operations. -func (r *Repository) FetchContext(ctx context.Context, o *FetchOptions) error { - if err := o.Validate(); err != nil { - return err - } - - remote, err := r.Remote(o.RemoteName) - if err != nil { - return err - } - - return remote.FetchContext(ctx, o) -} - -// Push performs a push to the remote. Returns NoErrAlreadyUpToDate if -// the remote was already up-to-date, from the remote named as -// FetchOptions.RemoteName. -func (r *Repository) Push(o *PushOptions) error { - return r.PushContext(context.Background(), o) -} - -// PushContext performs a push to the remote. Returns NoErrAlreadyUpToDate if -// the remote was already up-to-date, from the remote named as -// FetchOptions.RemoteName. -// -// The provided Context must be non-nil. If the context expires before the -// operation is complete, an error is returned. The context only affects the -// transport operations. -func (r *Repository) PushContext(ctx context.Context, o *PushOptions) error { - if err := o.Validate(); err != nil { - return err - } - - remote, err := r.Remote(o.RemoteName) - if err != nil { - return err - } - - return remote.PushContext(ctx, o) -} - -// Log returns the commit history from the given LogOptions. -func (r *Repository) Log(o *LogOptions) (object.CommitIter, error) { - fn := commitIterFunc(o.Order) - if fn == nil { - return nil, fmt.Errorf("invalid Order=%v", o.Order) - } - - var ( - it object.CommitIter - err error - ) - if o.All { - it, err = r.logAll(fn) - } else { - it, err = r.log(o.From, fn) - } - - if err != nil { - return nil, err - } - - if o.FileName != nil { - // for `git log --all` also check parent (if the next commit comes from the real parent) - it = r.logWithFile(*o.FileName, it, o.All) - } - if o.PathFilter != nil { - it = r.logWithPathFilter(o.PathFilter, it, o.All) - } - - if o.Since != nil || o.Until != nil { - limitOptions := object.LogLimitOptions{Since: o.Since, Until: o.Until} - it = r.logWithLimit(it, limitOptions) - } - - return it, nil -} - -func (r *Repository) log(from plumbing.Hash, commitIterFunc func(*object.Commit) object.CommitIter) (object.CommitIter, error) { - h := from - if from == plumbing.ZeroHash { - head, err := r.Head() - if err != nil { - return nil, err - } - - h = head.Hash() - } - - commit, err := r.CommitObject(h) - if err != nil { - return nil, err - } - return commitIterFunc(commit), nil -} - -func (r *Repository) logAll(commitIterFunc func(*object.Commit) object.CommitIter) (object.CommitIter, error) { - return object.NewCommitAllIter(r.Storer, commitIterFunc) -} - -func (*Repository) logWithFile(fileName string, commitIter object.CommitIter, checkParent bool) object.CommitIter { - return object.NewCommitPathIterFromIter( - func(path string) bool { - return path == fileName - }, - commitIter, - checkParent, - ) -} - -func (*Repository) logWithPathFilter(pathFilter func(string) bool, commitIter object.CommitIter, checkParent bool) object.CommitIter { - return object.NewCommitPathIterFromIter( - pathFilter, - commitIter, - checkParent, - ) -} - -func (*Repository) logWithLimit(commitIter object.CommitIter, limitOptions object.LogLimitOptions) object.CommitIter { - return object.NewCommitLimitIterFromIter(commitIter, limitOptions) -} - -func commitIterFunc(order LogOrder) func(c *object.Commit) object.CommitIter { - switch order { - case LogOrderDefault: - return func(c *object.Commit) object.CommitIter { - return object.NewCommitPreorderIter(c, nil, nil) - } - case LogOrderDFS: - return func(c *object.Commit) object.CommitIter { - return object.NewCommitPreorderIter(c, nil, nil) - } - case LogOrderDFSPost: - return func(c *object.Commit) object.CommitIter { - return object.NewCommitPostorderIter(c, nil) - } - case LogOrderBSF: - return func(c *object.Commit) object.CommitIter { - return object.NewCommitIterBSF(c, nil, nil) - } - case LogOrderCommitterTime: - return func(c *object.Commit) object.CommitIter { - return object.NewCommitIterCTime(c, nil, nil) - } - } - return nil -} - -// Tags returns all the tag References in a repository. -// -// If you want to check to see if the tag is an annotated tag, you can call -// TagObject on the hash Reference passed in through ForEach: -// -// iter, err := r.Tags() -// if err != nil { -// // Handle error -// } -// -// if err := iter.ForEach(func (ref *plumbing.Reference) error { -// obj, err := r.TagObject(ref.Hash()) -// switch err { -// case nil: -// // Tag object present -// case plumbing.ErrObjectNotFound: -// // Not a tag object -// default: -// // Some other error -// return err -// } -// }); err != nil { -// // Handle outer iterator error -// } -// -func (r *Repository) Tags() (storer.ReferenceIter, error) { - refIter, err := r.Storer.IterReferences() - if err != nil { - return nil, err - } - - return storer.NewReferenceFilteredIter( - func(r *plumbing.Reference) bool { - return r.Name().IsTag() - }, refIter), nil -} - -// Branches returns all the References that are Branches. -func (r *Repository) Branches() (storer.ReferenceIter, error) { - refIter, err := r.Storer.IterReferences() - if err != nil { - return nil, err - } - - return storer.NewReferenceFilteredIter( - func(r *plumbing.Reference) bool { - return r.Name().IsBranch() - }, refIter), nil -} - -// Notes returns all the References that are notes. For more information: -// https://git-scm.com/docs/git-notes -func (r *Repository) Notes() (storer.ReferenceIter, error) { - refIter, err := r.Storer.IterReferences() - if err != nil { - return nil, err - } - - return storer.NewReferenceFilteredIter( - func(r *plumbing.Reference) bool { - return r.Name().IsNote() - }, refIter), nil -} - -// TreeObject return a Tree with the given hash. If not found -// plumbing.ErrObjectNotFound is returned -func (r *Repository) TreeObject(h plumbing.Hash) (*object.Tree, error) { - return object.GetTree(r.Storer, h) -} - -// TreeObjects returns an unsorted TreeIter with all the trees in the repository -func (r *Repository) TreeObjects() (*object.TreeIter, error) { - iter, err := r.Storer.IterEncodedObjects(plumbing.TreeObject) - if err != nil { - return nil, err - } - - return object.NewTreeIter(r.Storer, iter), nil -} - -// CommitObject return a Commit with the given hash. If not found -// plumbing.ErrObjectNotFound is returned. -func (r *Repository) CommitObject(h plumbing.Hash) (*object.Commit, error) { - return object.GetCommit(r.Storer, h) -} - -// CommitObjects returns an unsorted CommitIter with all the commits in the repository. -func (r *Repository) CommitObjects() (object.CommitIter, error) { - iter, err := r.Storer.IterEncodedObjects(plumbing.CommitObject) - if err != nil { - return nil, err - } - - return object.NewCommitIter(r.Storer, iter), nil -} - -// BlobObject returns a Blob with the given hash. If not found -// plumbing.ErrObjectNotFound is returned. -func (r *Repository) BlobObject(h plumbing.Hash) (*object.Blob, error) { - return object.GetBlob(r.Storer, h) -} - -// BlobObjects returns an unsorted BlobIter with all the blobs in the repository. -func (r *Repository) BlobObjects() (*object.BlobIter, error) { - iter, err := r.Storer.IterEncodedObjects(plumbing.BlobObject) - if err != nil { - return nil, err - } - - return object.NewBlobIter(r.Storer, iter), nil -} - -// TagObject returns a Tag with the given hash. If not found -// plumbing.ErrObjectNotFound is returned. This method only returns -// annotated Tags, no lightweight Tags. -func (r *Repository) TagObject(h plumbing.Hash) (*object.Tag, error) { - return object.GetTag(r.Storer, h) -} - -// TagObjects returns a unsorted TagIter that can step through all of the annotated -// tags in the repository. -func (r *Repository) TagObjects() (*object.TagIter, error) { - iter, err := r.Storer.IterEncodedObjects(plumbing.TagObject) - if err != nil { - return nil, err - } - - return object.NewTagIter(r.Storer, iter), nil -} - -// Object returns an Object with the given hash. If not found -// plumbing.ErrObjectNotFound is returned. -func (r *Repository) Object(t plumbing.ObjectType, h plumbing.Hash) (object.Object, error) { - obj, err := r.Storer.EncodedObject(t, h) - if err != nil { - return nil, err - } - - return object.DecodeObject(r.Storer, obj) -} - -// Objects returns an unsorted ObjectIter with all the objects in the repository. -func (r *Repository) Objects() (*object.ObjectIter, error) { - iter, err := r.Storer.IterEncodedObjects(plumbing.AnyObject) - if err != nil { - return nil, err - } - - return object.NewObjectIter(r.Storer, iter), nil -} - -// Head returns the reference where HEAD is pointing to. -func (r *Repository) Head() (*plumbing.Reference, error) { - return storer.ResolveReference(r.Storer, plumbing.HEAD) -} - -// Reference returns the reference for a given reference name. If resolved is -// true, any symbolic reference will be resolved. -func (r *Repository) Reference(name plumbing.ReferenceName, resolved bool) ( - *plumbing.Reference, error) { - - if resolved { - return storer.ResolveReference(r.Storer, name) - } - - return r.Storer.Reference(name) -} - -// References returns an unsorted ReferenceIter for all references. -func (r *Repository) References() (storer.ReferenceIter, error) { - return r.Storer.IterReferences() -} - -// Worktree returns a worktree based on the given fs, if nil the default -// worktree will be used. -func (r *Repository) Worktree() (*Worktree, error) { - if r.wt == nil { - return nil, ErrIsBareRepository - } - - return &Worktree{r: r, Filesystem: r.wt}, nil -} - -// ResolveRevision resolves revision to corresponding hash. It will always -// resolve to a commit hash, not a tree or annotated tag. -// -// Implemented resolvers : HEAD, branch, tag, heads/branch, refs/heads/branch, -// refs/tags/tag, refs/remotes/origin/branch, refs/remotes/origin/HEAD, tilde and caret (HEAD~1, master~^, tag~2, ref/heads/master~1, ...), selection by text (HEAD^{/fix nasty bug}), hash (prefix and full) -func (r *Repository) ResolveRevision(rev plumbing.Revision) (*plumbing.Hash, error) { - p := revision.NewParserFromString(string(rev)) - - items, err := p.Parse() - - if err != nil { - return nil, err - } - - var commit *object.Commit - - for _, item := range items { - switch item := item.(type) { - case revision.Ref: - revisionRef := item - - var tryHashes []plumbing.Hash - - tryHashes = append(tryHashes, r.resolveHashPrefix(string(revisionRef))...) - - for _, rule := range append([]string{"%s"}, plumbing.RefRevParseRules...) { - ref, err := storer.ResolveReference(r.Storer, plumbing.ReferenceName(fmt.Sprintf(rule, revisionRef))) - - if err == nil { - tryHashes = append(tryHashes, ref.Hash()) - break - } - } - - // in ambiguous cases, `git rev-parse` will emit a warning, but - // will always return the oid in preference to a ref; we don't have - // the ability to emit a warning here, so (for speed purposes) - // don't bother to detect the ambiguity either, just return in the - // priority that git would. - gotOne := false - for _, hash := range tryHashes { - commitObj, err := r.CommitObject(hash) - if err == nil { - commit = commitObj - gotOne = true - break - } - - tagObj, err := r.TagObject(hash) - if err == nil { - // If the tag target lookup fails here, this most likely - // represents some sort of repo corruption, so let the - // error bubble up. - tagCommit, err := tagObj.Commit() - if err != nil { - return &plumbing.ZeroHash, err - } - commit = tagCommit - gotOne = true - break - } - } - - if !gotOne { - return &plumbing.ZeroHash, plumbing.ErrReferenceNotFound - } - - case revision.CaretPath: - depth := item.Depth - - if depth == 0 { - break - } - - iter := commit.Parents() - - c, err := iter.Next() - - if err != nil { - return &plumbing.ZeroHash, err - } - - if depth == 1 { - commit = c - - break - } - - c, err = iter.Next() - - if err != nil { - return &plumbing.ZeroHash, err - } - - commit = c - case revision.TildePath: - for i := 0; i < item.Depth; i++ { - c, err := commit.Parents().Next() - - if err != nil { - return &plumbing.ZeroHash, err - } - - commit = c - } - case revision.CaretReg: - history := object.NewCommitPreorderIter(commit, nil, nil) - - re := item.Regexp - negate := item.Negate - - var c *object.Commit - - err := history.ForEach(func(hc *object.Commit) error { - if !negate && re.MatchString(hc.Message) { - c = hc - return storer.ErrStop - } - - if negate && !re.MatchString(hc.Message) { - c = hc - return storer.ErrStop - } - - return nil - }) - if err != nil { - return &plumbing.ZeroHash, err - } - - if c == nil { - return &plumbing.ZeroHash, fmt.Errorf(`No commit message match regexp : "%s"`, re.String()) - } - - commit = c - } - } - - return &commit.Hash, nil -} - -// resolveHashPrefix returns a list of potential hashes that the given string -// is a prefix of. It quietly swallows errors, returning nil. -func (r *Repository) resolveHashPrefix(hashStr string) []plumbing.Hash { - // Handle complete and partial hashes. - // plumbing.NewHash forces args into a full 20 byte hash, which isn't suitable - // for partial hashes since they will become zero-filled. - - if hashStr == "" { - return nil - } - if len(hashStr) == len(plumbing.ZeroHash)*2 { - // Only a full hash is possible. - hexb, err := hex.DecodeString(hashStr) - if err != nil { - return nil - } - var h plumbing.Hash - copy(h[:], hexb) - return []plumbing.Hash{h} - } - - // Partial hash. - // hex.DecodeString only decodes to complete bytes, so only works with pairs of hex digits. - evenHex := hashStr[:len(hashStr)&^1] - hexb, err := hex.DecodeString(evenHex) - if err != nil { - return nil - } - candidates := expandPartialHash(r.Storer, hexb) - if len(evenHex) == len(hashStr) { - // The prefix was an exact number of bytes. - return candidates - } - // Do another prefix check to ensure the dangling nybble is correct. - var hashes []plumbing.Hash - for _, h := range candidates { - if strings.HasPrefix(h.String(), hashStr) { - hashes = append(hashes, h) - } - } - return hashes -} - -type RepackConfig struct { - // UseRefDeltas configures whether packfile encoder will use reference deltas. - // By default OFSDeltaObject is used. - UseRefDeltas bool - // OnlyDeletePacksOlderThan if set to non-zero value - // selects only objects older than the time provided. - OnlyDeletePacksOlderThan time.Time -} - -func (r *Repository) RepackObjects(cfg *RepackConfig) (err error) { - pos, ok := r.Storer.(storer.PackedObjectStorer) - if !ok { - return ErrPackedObjectsNotSupported - } - - // Get the existing object packs. - hs, err := pos.ObjectPacks() - if err != nil { - return err - } - - // Create a new pack. - nh, err := r.createNewObjectPack(cfg) - if err != nil { - return err - } - - // Delete old packs. - for _, h := range hs { - // Skip if new hash is the same as an old one. - if h == nh { - continue - } - err = pos.DeleteOldObjectPackAndIndex(h, cfg.OnlyDeletePacksOlderThan) - if err != nil { - return err - } - } - - return nil -} - -// createNewObjectPack is a helper for RepackObjects taking care -// of creating a new pack. It is used so the the PackfileWriter -// deferred close has the right scope. -func (r *Repository) createNewObjectPack(cfg *RepackConfig) (h plumbing.Hash, err error) { - ow := newObjectWalker(r.Storer) - err = ow.walkAllRefs() - if err != nil { - return h, err - } - objs := make([]plumbing.Hash, 0, len(ow.seen)) - for h := range ow.seen { - objs = append(objs, h) - } - pfw, ok := r.Storer.(storer.PackfileWriter) - if !ok { - return h, fmt.Errorf("Repository storer is not a storer.PackfileWriter") - } - wc, err := pfw.PackfileWriter() - if err != nil { - return h, err - } - defer ioutil.CheckClose(wc, &err) - scfg, err := r.Config() - if err != nil { - return h, err - } - enc := packfile.NewEncoder(wc, r.Storer, cfg.UseRefDeltas) - h, err = enc.Encode(objs, scfg.Pack.Window) - if err != nil { - return h, err - } - - // Delete the packed, loose objects. - if los, ok := r.Storer.(storer.LooseObjectStorer); ok { - err = los.ForEachObjectHash(func(hash plumbing.Hash) error { - if ow.isSeen(hash) { - err = los.DeleteLooseObject(hash) - if err != nil { - return err - } - } - return nil - }) - if err != nil { - return h, err - } - } - - return h, err -} - -func expandPartialHash(st storer.EncodedObjectStorer, prefix []byte) (hashes []plumbing.Hash) { - // The fast version is implemented by storage/filesystem.ObjectStorage. - type fastIter interface { - HashesWithPrefix(prefix []byte) ([]plumbing.Hash, error) - } - if fi, ok := st.(fastIter); ok { - h, err := fi.HashesWithPrefix(prefix) - if err != nil { - return nil - } - return h - } - - // Slow path. - iter, err := st.IterEncodedObjects(plumbing.AnyObject) - if err != nil { - return nil - } - iter.ForEach(func(obj plumbing.EncodedObject) error { - h := obj.Hash() - if bytes.HasPrefix(h[:], prefix) { - hashes = append(hashes, h) - } - return nil - }) - return -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/status.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/status.go deleted file mode 100644 index 7f18e02278b..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/status.go +++ /dev/null @@ -1,79 +0,0 @@ -package git - -import ( - "bytes" - "fmt" - "path/filepath" -) - -// Status represents the current status of a Worktree. -// The key of the map is the path of the file. -type Status map[string]*FileStatus - -// File returns the FileStatus for a given path, if the FileStatus doesn't -// exists a new FileStatus is added to the map using the path as key. -func (s Status) File(path string) *FileStatus { - if _, ok := (s)[path]; !ok { - s[path] = &FileStatus{Worktree: Untracked, Staging: Untracked} - } - - return s[path] -} - -// IsUntracked checks if file for given path is 'Untracked' -func (s Status) IsUntracked(path string) bool { - stat, ok := (s)[filepath.ToSlash(path)] - return ok && stat.Worktree == Untracked -} - -// IsClean returns true if all the files are in Unmodified status. -func (s Status) IsClean() bool { - for _, status := range s { - if status.Worktree != Unmodified || status.Staging != Unmodified { - return false - } - } - - return true -} - -func (s Status) String() string { - buf := bytes.NewBuffer(nil) - for path, status := range s { - if status.Staging == Unmodified && status.Worktree == Unmodified { - continue - } - - if status.Staging == Renamed { - path = fmt.Sprintf("%s -> %s", path, status.Extra) - } - - fmt.Fprintf(buf, "%c%c %s\n", status.Staging, status.Worktree, path) - } - - return buf.String() -} - -// FileStatus contains the status of a file in the worktree -type FileStatus struct { - // Staging is the status of a file in the staging area - Staging StatusCode - // Worktree is the status of a file in the worktree - Worktree StatusCode - // Extra contains extra information, such as the previous name in a rename - Extra string -} - -// StatusCode status code of a file in the Worktree -type StatusCode byte - -const ( - Unmodified StatusCode = ' ' - Untracked StatusCode = '?' - Modified StatusCode = 'M' - Added StatusCode = 'A' - Deleted StatusCode = 'D' - Renamed StatusCode = 'R' - Copied StatusCode = 'C' - UpdatedButUnmerged StatusCode = 'U' -) diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/storage/filesystem/config.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/storage/filesystem/config.go deleted file mode 100644 index 78a646465a2..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/storage/filesystem/config.go +++ /dev/null @@ -1,48 +0,0 @@ -package filesystem - -import ( - "os" - - "github.com/go-git/go-git/v5/config" - "github.com/go-git/go-git/v5/storage/filesystem/dotgit" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -type ConfigStorage struct { - dir *dotgit.DotGit -} - -func (c *ConfigStorage) Config() (conf *config.Config, err error) { - f, err := c.dir.Config() - if err != nil { - if os.IsNotExist(err) { - return config.NewConfig(), nil - } - - return nil, err - } - - defer ioutil.CheckClose(f, &err) - return config.ReadConfig(f) -} - -func (c *ConfigStorage) SetConfig(cfg *config.Config) (err error) { - if err = cfg.Validate(); err != nil { - return err - } - - f, err := c.dir.ConfigWriter() - if err != nil { - return err - } - - defer ioutil.CheckClose(f, &err) - - b, err := cfg.Marshal() - if err != nil { - return err - } - - _, err = f.Write(b) - return err -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/storage/filesystem/deltaobject.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/storage/filesystem/deltaobject.go deleted file mode 100644 index 6ab2cdf38aa..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/storage/filesystem/deltaobject.go +++ /dev/null @@ -1,37 +0,0 @@ -package filesystem - -import ( - "github.com/go-git/go-git/v5/plumbing" -) - -type deltaObject struct { - plumbing.EncodedObject - base plumbing.Hash - hash plumbing.Hash - size int64 -} - -func newDeltaObject( - obj plumbing.EncodedObject, - hash plumbing.Hash, - base plumbing.Hash, - size int64) plumbing.DeltaObject { - return &deltaObject{ - EncodedObject: obj, - hash: hash, - base: base, - size: size, - } -} - -func (o *deltaObject) BaseHash() plumbing.Hash { - return o.base -} - -func (o *deltaObject) ActualSize() int64 { - return o.size -} - -func (o *deltaObject) ActualHash() plumbing.Hash { - return o.hash -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit.go deleted file mode 100644 index 6c386f79921..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit.go +++ /dev/null @@ -1,1188 +0,0 @@ -// https://github.com/git/git/blob/master/Documentation/gitrepository-layout.txt -package dotgit - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "io" - stdioutil "io/ioutil" - "os" - "path/filepath" - "sort" - "strings" - "time" - - "github.com/go-git/go-billy/v5/osfs" - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/storage" - "github.com/go-git/go-git/v5/utils/ioutil" - - "github.com/go-git/go-billy/v5" -) - -const ( - suffix = ".git" - packedRefsPath = "packed-refs" - configPath = "config" - indexPath = "index" - shallowPath = "shallow" - modulePath = "modules" - objectsPath = "objects" - packPath = "pack" - refsPath = "refs" - branchesPath = "branches" - hooksPath = "hooks" - infoPath = "info" - remotesPath = "remotes" - logsPath = "logs" - worktreesPath = "worktrees" - - tmpPackedRefsPrefix = "._packed-refs" - - packPrefix = "pack-" - packExt = ".pack" - idxExt = ".idx" -) - -var ( - // ErrNotFound is returned by New when the path is not found. - ErrNotFound = errors.New("path not found") - // ErrIdxNotFound is returned by Idxfile when the idx file is not found - ErrIdxNotFound = errors.New("idx file not found") - // ErrPackfileNotFound is returned by Packfile when the packfile is not found - ErrPackfileNotFound = errors.New("packfile not found") - // ErrConfigNotFound is returned by Config when the config is not found - ErrConfigNotFound = errors.New("config file not found") - // ErrPackedRefsDuplicatedRef is returned when a duplicated reference is - // found in the packed-ref file. This is usually the case for corrupted git - // repositories. - ErrPackedRefsDuplicatedRef = errors.New("duplicated ref found in packed-ref file") - // ErrPackedRefsBadFormat is returned when the packed-ref file corrupt. - ErrPackedRefsBadFormat = errors.New("malformed packed-ref") - // ErrSymRefTargetNotFound is returned when a symbolic reference is - // targeting a non-existing object. This usually means the repository - // is corrupt. - ErrSymRefTargetNotFound = errors.New("symbolic reference target not found") - // ErrIsDir is returned when a reference file is attempting to be read, - // but the path specified is a directory. - ErrIsDir = errors.New("reference path is a directory") -) - -// Options holds configuration for the storage. -type Options struct { - // ExclusiveAccess means that the filesystem is not modified externally - // while the repo is open. - ExclusiveAccess bool - // KeepDescriptors makes the file descriptors to be reused but they will - // need to be manually closed calling Close(). - KeepDescriptors bool -} - -// The DotGit type represents a local git repository on disk. This -// type is not zero-value-safe, use the New function to initialize it. -type DotGit struct { - options Options - fs billy.Filesystem - - // incoming object directory information - incomingChecked bool - incomingDirName string - - objectList []plumbing.Hash // sorted - objectMap map[plumbing.Hash]struct{} - packList []plumbing.Hash - packMap map[plumbing.Hash]struct{} - - files map[plumbing.Hash]billy.File -} - -// New returns a DotGit value ready to be used. The path argument must -// be the absolute path of a git repository directory (e.g. -// "/foo/bar/.git"). -func New(fs billy.Filesystem) *DotGit { - return NewWithOptions(fs, Options{}) -} - -// NewWithOptions sets non default configuration options. -// See New for complete help. -func NewWithOptions(fs billy.Filesystem, o Options) *DotGit { - return &DotGit{ - options: o, - fs: fs, - } -} - -// Initialize creates all the folder scaffolding. -func (d *DotGit) Initialize() error { - mustExists := []string{ - d.fs.Join("objects", "info"), - d.fs.Join("objects", "pack"), - d.fs.Join("refs", "heads"), - d.fs.Join("refs", "tags"), - } - - for _, path := range mustExists { - _, err := d.fs.Stat(path) - if err == nil { - continue - } - - if !os.IsNotExist(err) { - return err - } - - if err := d.fs.MkdirAll(path, os.ModeDir|os.ModePerm); err != nil { - return err - } - } - - return nil -} - -// Close closes all opened files. -func (d *DotGit) Close() error { - var firstError error - if d.files != nil { - for _, f := range d.files { - err := f.Close() - if err != nil && firstError == nil { - firstError = err - continue - } - } - - d.files = nil - } - - if firstError != nil { - return firstError - } - - return nil -} - -// ConfigWriter returns a file pointer for write to the config file -func (d *DotGit) ConfigWriter() (billy.File, error) { - return d.fs.Create(configPath) -} - -// Config returns a file pointer for read to the config file -func (d *DotGit) Config() (billy.File, error) { - return d.fs.Open(configPath) -} - -// IndexWriter returns a file pointer for write to the index file -func (d *DotGit) IndexWriter() (billy.File, error) { - return d.fs.Create(indexPath) -} - -// Index returns a file pointer for read to the index file -func (d *DotGit) Index() (billy.File, error) { - return d.fs.Open(indexPath) -} - -// ShallowWriter returns a file pointer for write to the shallow file -func (d *DotGit) ShallowWriter() (billy.File, error) { - return d.fs.Create(shallowPath) -} - -// Shallow returns a file pointer for read to the shallow file -func (d *DotGit) Shallow() (billy.File, error) { - f, err := d.fs.Open(shallowPath) - if err != nil { - if os.IsNotExist(err) { - return nil, nil - } - - return nil, err - } - - return f, nil -} - -// NewObjectPack return a writer for a new packfile, it saves the packfile to -// disk and also generates and save the index for the given packfile. -func (d *DotGit) NewObjectPack() (*PackWriter, error) { - d.cleanPackList() - return newPackWrite(d.fs) -} - -// ObjectPacks returns the list of availables packfiles -func (d *DotGit) ObjectPacks() ([]plumbing.Hash, error) { - if !d.options.ExclusiveAccess { - return d.objectPacks() - } - - err := d.genPackList() - if err != nil { - return nil, err - } - - return d.packList, nil -} - -func (d *DotGit) objectPacks() ([]plumbing.Hash, error) { - packDir := d.fs.Join(objectsPath, packPath) - files, err := d.fs.ReadDir(packDir) - if err != nil { - if os.IsNotExist(err) { - return nil, nil - } - - return nil, err - } - - var packs []plumbing.Hash - for _, f := range files { - n := f.Name() - if !strings.HasSuffix(n, packExt) || !strings.HasPrefix(n, packPrefix) { - continue - } - - h := plumbing.NewHash(n[5 : len(n)-5]) //pack-(hash).pack - if h.IsZero() { - // Ignore files with badly-formatted names. - continue - } - packs = append(packs, h) - } - - return packs, nil -} - -func (d *DotGit) objectPackPath(hash plumbing.Hash, extension string) string { - return d.fs.Join(objectsPath, packPath, fmt.Sprintf("pack-%s.%s", hash.String(), extension)) -} - -func (d *DotGit) objectPackOpen(hash plumbing.Hash, extension string) (billy.File, error) { - if d.options.KeepDescriptors && extension == "pack" { - if d.files == nil { - d.files = make(map[plumbing.Hash]billy.File) - } - - f, ok := d.files[hash] - if ok { - return f, nil - } - } - - err := d.hasPack(hash) - if err != nil { - return nil, err - } - - path := d.objectPackPath(hash, extension) - pack, err := d.fs.Open(path) - if err != nil { - if os.IsNotExist(err) { - return nil, ErrPackfileNotFound - } - - return nil, err - } - - if d.options.KeepDescriptors && extension == "pack" { - d.files[hash] = pack - } - - return pack, nil -} - -// ObjectPack returns a fs.File of the given packfile -func (d *DotGit) ObjectPack(hash plumbing.Hash) (billy.File, error) { - err := d.hasPack(hash) - if err != nil { - return nil, err - } - - return d.objectPackOpen(hash, `pack`) -} - -// ObjectPackIdx returns a fs.File of the index file for a given packfile -func (d *DotGit) ObjectPackIdx(hash plumbing.Hash) (billy.File, error) { - err := d.hasPack(hash) - if err != nil { - return nil, err - } - - return d.objectPackOpen(hash, `idx`) -} - -func (d *DotGit) DeleteOldObjectPackAndIndex(hash plumbing.Hash, t time.Time) error { - d.cleanPackList() - - path := d.objectPackPath(hash, `pack`) - if !t.IsZero() { - fi, err := d.fs.Stat(path) - if err != nil { - return err - } - // too new, skip deletion. - if !fi.ModTime().Before(t) { - return nil - } - } - err := d.fs.Remove(path) - if err != nil { - return err - } - return d.fs.Remove(d.objectPackPath(hash, `idx`)) -} - -// NewObject return a writer for a new object file. -func (d *DotGit) NewObject() (*ObjectWriter, error) { - d.cleanObjectList() - - return newObjectWriter(d.fs) -} - -// ObjectsWithPrefix returns the hashes of objects that have the given prefix. -func (d *DotGit) ObjectsWithPrefix(prefix []byte) ([]plumbing.Hash, error) { - // Handle edge cases. - if len(prefix) < 1 { - return d.Objects() - } else if len(prefix) > len(plumbing.ZeroHash) { - return nil, nil - } - - if d.options.ExclusiveAccess { - err := d.genObjectList() - if err != nil { - return nil, err - } - - // Rely on d.objectList being sorted. - // Figure out the half-open interval defined by the prefix. - first := sort.Search(len(d.objectList), func(i int) bool { - // Same as plumbing.HashSlice.Less. - return bytes.Compare(d.objectList[i][:], prefix) >= 0 - }) - lim := len(d.objectList) - if limPrefix, overflow := incBytes(prefix); !overflow { - lim = sort.Search(len(d.objectList), func(i int) bool { - // Same as plumbing.HashSlice.Less. - return bytes.Compare(d.objectList[i][:], limPrefix) >= 0 - }) - } - return d.objectList[first:lim], nil - } - - // This is the slow path. - var objects []plumbing.Hash - var n int - err := d.ForEachObjectHash(func(hash plumbing.Hash) error { - n++ - if bytes.HasPrefix(hash[:], prefix) { - objects = append(objects, hash) - } - return nil - }) - if err != nil { - return nil, err - } - return objects, nil -} - -// Objects returns a slice with the hashes of objects found under the -// .git/objects/ directory. -func (d *DotGit) Objects() ([]plumbing.Hash, error) { - if d.options.ExclusiveAccess { - err := d.genObjectList() - if err != nil { - return nil, err - } - - return d.objectList, nil - } - - var objects []plumbing.Hash - err := d.ForEachObjectHash(func(hash plumbing.Hash) error { - objects = append(objects, hash) - return nil - }) - if err != nil { - return nil, err - } - return objects, nil -} - -// ForEachObjectHash iterates over the hashes of objects found under the -// .git/objects/ directory and executes the provided function. -func (d *DotGit) ForEachObjectHash(fun func(plumbing.Hash) error) error { - if !d.options.ExclusiveAccess { - return d.forEachObjectHash(fun) - } - - err := d.genObjectList() - if err != nil { - return err - } - - for _, h := range d.objectList { - err := fun(h) - if err != nil { - return err - } - } - - return nil -} - -func (d *DotGit) forEachObjectHash(fun func(plumbing.Hash) error) error { - files, err := d.fs.ReadDir(objectsPath) - if err != nil { - if os.IsNotExist(err) { - return nil - } - - return err - } - - for _, f := range files { - if f.IsDir() && len(f.Name()) == 2 && isHex(f.Name()) { - base := f.Name() - d, err := d.fs.ReadDir(d.fs.Join(objectsPath, base)) - if err != nil { - return err - } - - for _, o := range d { - h := plumbing.NewHash(base + o.Name()) - if h.IsZero() { - // Ignore files with badly-formatted names. - continue - } - err = fun(h) - if err != nil { - return err - } - } - } - } - - return nil -} - -func (d *DotGit) cleanObjectList() { - d.objectMap = nil - d.objectList = nil -} - -func (d *DotGit) genObjectList() error { - if d.objectMap != nil { - return nil - } - - d.objectMap = make(map[plumbing.Hash]struct{}) - populate := func(h plumbing.Hash) error { - d.objectList = append(d.objectList, h) - d.objectMap[h] = struct{}{} - - return nil - } - if err := d.forEachObjectHash(populate); err != nil { - return err - } - plumbing.HashesSort(d.objectList) - return nil -} - -func (d *DotGit) hasObject(h plumbing.Hash) error { - if !d.options.ExclusiveAccess { - return nil - } - - err := d.genObjectList() - if err != nil { - return err - } - - _, ok := d.objectMap[h] - if !ok { - return plumbing.ErrObjectNotFound - } - - return nil -} - -func (d *DotGit) cleanPackList() { - d.packMap = nil - d.packList = nil -} - -func (d *DotGit) genPackList() error { - if d.packMap != nil { - return nil - } - - op, err := d.objectPacks() - if err != nil { - return err - } - - d.packMap = make(map[plumbing.Hash]struct{}) - d.packList = nil - - for _, h := range op { - d.packList = append(d.packList, h) - d.packMap[h] = struct{}{} - } - - return nil -} - -func (d *DotGit) hasPack(h plumbing.Hash) error { - if !d.options.ExclusiveAccess { - return nil - } - - err := d.genPackList() - if err != nil { - return err - } - - _, ok := d.packMap[h] - if !ok { - return ErrPackfileNotFound - } - - return nil -} - -func (d *DotGit) objectPath(h plumbing.Hash) string { - hash := h.String() - return d.fs.Join(objectsPath, hash[0:2], hash[2:40]) -} - -// incomingObjectPath is intended to add support for a git pre-receive hook -// to be written it adds support for go-git to find objects in an "incoming" -// directory, so that the library can be used to write a pre-receive hook -// that deals with the incoming objects. -// -// More on git hooks found here : https://git-scm.com/docs/githooks -// More on 'quarantine'/incoming directory here: -// https://git-scm.com/docs/git-receive-pack -func (d *DotGit) incomingObjectPath(h plumbing.Hash) string { - hString := h.String() - - if d.incomingDirName == "" { - return d.fs.Join(objectsPath, hString[0:2], hString[2:40]) - } - - return d.fs.Join(objectsPath, d.incomingDirName, hString[0:2], hString[2:40]) -} - -// hasIncomingObjects searches for an incoming directory and keeps its name -// so it doesn't have to be found each time an object is accessed. -func (d *DotGit) hasIncomingObjects() bool { - if !d.incomingChecked { - directoryContents, err := d.fs.ReadDir(objectsPath) - if err == nil { - for _, file := range directoryContents { - if strings.HasPrefix(file.Name(), "incoming-") && file.IsDir() { - d.incomingDirName = file.Name() - } - } - } - - d.incomingChecked = true - } - - return d.incomingDirName != "" -} - -// Object returns a fs.File pointing the object file, if exists -func (d *DotGit) Object(h plumbing.Hash) (billy.File, error) { - err := d.hasObject(h) - if err != nil { - return nil, err - } - - obj1, err1 := d.fs.Open(d.objectPath(h)) - if os.IsNotExist(err1) && d.hasIncomingObjects() { - obj2, err2 := d.fs.Open(d.incomingObjectPath(h)) - if err2 != nil { - return obj1, err1 - } - return obj2, err2 - } - return obj1, err1 -} - -// ObjectStat returns a os.FileInfo pointing the object file, if exists -func (d *DotGit) ObjectStat(h plumbing.Hash) (os.FileInfo, error) { - err := d.hasObject(h) - if err != nil { - return nil, err - } - - obj1, err1 := d.fs.Stat(d.objectPath(h)) - if os.IsNotExist(err1) && d.hasIncomingObjects() { - obj2, err2 := d.fs.Stat(d.incomingObjectPath(h)) - if err2 != nil { - return obj1, err1 - } - return obj2, err2 - } - return obj1, err1 -} - -// ObjectDelete removes the object file, if exists -func (d *DotGit) ObjectDelete(h plumbing.Hash) error { - d.cleanObjectList() - - err1 := d.fs.Remove(d.objectPath(h)) - if os.IsNotExist(err1) && d.hasIncomingObjects() { - err2 := d.fs.Remove(d.incomingObjectPath(h)) - if err2 != nil { - return err1 - } - return err2 - } - return err1 -} - -func (d *DotGit) readReferenceFrom(rd io.Reader, name string) (ref *plumbing.Reference, err error) { - b, err := stdioutil.ReadAll(rd) - if err != nil { - return nil, err - } - - line := strings.TrimSpace(string(b)) - return plumbing.NewReferenceFromStrings(name, line), nil -} - -func (d *DotGit) checkReferenceAndTruncate(f billy.File, old *plumbing.Reference) error { - if old == nil { - return nil - } - ref, err := d.readReferenceFrom(f, old.Name().String()) - if err != nil { - return err - } - if ref.Hash() != old.Hash() { - return storage.ErrReferenceHasChanged - } - _, err = f.Seek(0, io.SeekStart) - if err != nil { - return err - } - return f.Truncate(0) -} - -func (d *DotGit) SetRef(r, old *plumbing.Reference) error { - var content string - switch r.Type() { - case plumbing.SymbolicReference: - content = fmt.Sprintf("ref: %s\n", r.Target()) - case plumbing.HashReference: - content = fmt.Sprintln(r.Hash().String()) - } - - fileName := r.Name().String() - - return d.setRef(fileName, content, old) -} - -// Refs scans the git directory collecting references, which it returns. -// Symbolic references are resolved and included in the output. -func (d *DotGit) Refs() ([]*plumbing.Reference, error) { - var refs []*plumbing.Reference - var seen = make(map[plumbing.ReferenceName]bool) - if err := d.addRefsFromRefDir(&refs, seen); err != nil { - return nil, err - } - - if err := d.addRefsFromPackedRefs(&refs, seen); err != nil { - return nil, err - } - - if err := d.addRefFromHEAD(&refs); err != nil { - return nil, err - } - - return refs, nil -} - -// Ref returns the reference for a given reference name. -func (d *DotGit) Ref(name plumbing.ReferenceName) (*plumbing.Reference, error) { - ref, err := d.readReferenceFile(".", name.String()) - if err == nil { - return ref, nil - } - - return d.packedRef(name) -} - -func (d *DotGit) findPackedRefsInFile(f billy.File) ([]*plumbing.Reference, error) { - s := bufio.NewScanner(f) - var refs []*plumbing.Reference - for s.Scan() { - ref, err := d.processLine(s.Text()) - if err != nil { - return nil, err - } - - if ref != nil { - refs = append(refs, ref) - } - } - - return refs, s.Err() -} - -func (d *DotGit) findPackedRefs() (r []*plumbing.Reference, err error) { - f, err := d.fs.Open(packedRefsPath) - if err != nil { - if os.IsNotExist(err) { - return nil, nil - } - return nil, err - } - - defer ioutil.CheckClose(f, &err) - return d.findPackedRefsInFile(f) -} - -func (d *DotGit) packedRef(name plumbing.ReferenceName) (*plumbing.Reference, error) { - refs, err := d.findPackedRefs() - if err != nil { - return nil, err - } - - for _, ref := range refs { - if ref.Name() == name { - return ref, nil - } - } - - return nil, plumbing.ErrReferenceNotFound -} - -// RemoveRef removes a reference by name. -func (d *DotGit) RemoveRef(name plumbing.ReferenceName) error { - path := d.fs.Join(".", name.String()) - _, err := d.fs.Stat(path) - if err == nil { - err = d.fs.Remove(path) - // Drop down to remove it from the packed refs file, too. - } - - if err != nil && !os.IsNotExist(err) { - return err - } - - return d.rewritePackedRefsWithoutRef(name) -} - -func (d *DotGit) addRefsFromPackedRefs(refs *[]*plumbing.Reference, seen map[plumbing.ReferenceName]bool) (err error) { - packedRefs, err := d.findPackedRefs() - if err != nil { - return err - } - - for _, ref := range packedRefs { - if !seen[ref.Name()] { - *refs = append(*refs, ref) - seen[ref.Name()] = true - } - } - return nil -} - -func (d *DotGit) addRefsFromPackedRefsFile(refs *[]*plumbing.Reference, f billy.File, seen map[plumbing.ReferenceName]bool) (err error) { - packedRefs, err := d.findPackedRefsInFile(f) - if err != nil { - return err - } - - for _, ref := range packedRefs { - if !seen[ref.Name()] { - *refs = append(*refs, ref) - seen[ref.Name()] = true - } - } - return nil -} - -func (d *DotGit) openAndLockPackedRefs(doCreate bool) ( - pr billy.File, err error) { - var f billy.File - defer func() { - if err != nil && f != nil { - ioutil.CheckClose(f, &err) - } - }() - - // File mode is retrieved from a constant defined in the target specific - // files (dotgit_rewrite_packed_refs_*). Some modes are not available - // in all filesystems. - openFlags := d.openAndLockPackedRefsMode() - if doCreate { - openFlags |= os.O_CREATE - } - - // Keep trying to open and lock the file until we're sure the file - // didn't change between the open and the lock. - for { - f, err = d.fs.OpenFile(packedRefsPath, openFlags, 0600) - if err != nil { - if os.IsNotExist(err) && !doCreate { - return nil, nil - } - - return nil, err - } - fi, err := d.fs.Stat(packedRefsPath) - if err != nil { - return nil, err - } - mtime := fi.ModTime() - - err = f.Lock() - if err != nil { - return nil, err - } - - fi, err = d.fs.Stat(packedRefsPath) - if err != nil { - return nil, err - } - if mtime.Equal(fi.ModTime()) { - break - } - // The file has changed since we opened it. Close and retry. - err = f.Close() - if err != nil { - return nil, err - } - } - return f, nil -} - -func (d *DotGit) rewritePackedRefsWithoutRef(name plumbing.ReferenceName) (err error) { - pr, err := d.openAndLockPackedRefs(false) - if err != nil { - return err - } - if pr == nil { - return nil - } - defer ioutil.CheckClose(pr, &err) - - // Creating the temp file in the same directory as the target file - // improves our chances for rename operation to be atomic. - tmp, err := d.fs.TempFile("", tmpPackedRefsPrefix) - if err != nil { - return err - } - tmpName := tmp.Name() - defer func() { - ioutil.CheckClose(tmp, &err) - _ = d.fs.Remove(tmpName) // don't check err, we might have renamed it - }() - - s := bufio.NewScanner(pr) - found := false - for s.Scan() { - line := s.Text() - ref, err := d.processLine(line) - if err != nil { - return err - } - - if ref != nil && ref.Name() == name { - found = true - continue - } - - if _, err := fmt.Fprintln(tmp, line); err != nil { - return err - } - } - - if err := s.Err(); err != nil { - return err - } - - if !found { - return nil - } - - return d.rewritePackedRefsWhileLocked(tmp, pr) -} - -// process lines from a packed-refs file -func (d *DotGit) processLine(line string) (*plumbing.Reference, error) { - if len(line) == 0 { - return nil, nil - } - - switch line[0] { - case '#': // comment - ignore - return nil, nil - case '^': // annotated tag commit of the previous line - ignore - return nil, nil - default: - ws := strings.Split(line, " ") // hash then ref - if len(ws) != 2 { - return nil, ErrPackedRefsBadFormat - } - - return plumbing.NewReferenceFromStrings(ws[1], ws[0]), nil - } -} - -func (d *DotGit) addRefsFromRefDir(refs *[]*plumbing.Reference, seen map[plumbing.ReferenceName]bool) error { - return d.walkReferencesTree(refs, []string{refsPath}, seen) -} - -func (d *DotGit) walkReferencesTree(refs *[]*plumbing.Reference, relPath []string, seen map[plumbing.ReferenceName]bool) error { - files, err := d.fs.ReadDir(d.fs.Join(relPath...)) - if err != nil { - if os.IsNotExist(err) { - return nil - } - - return err - } - - for _, f := range files { - newRelPath := append(append([]string(nil), relPath...), f.Name()) - if f.IsDir() { - if err = d.walkReferencesTree(refs, newRelPath, seen); err != nil { - return err - } - - continue - } - - ref, err := d.readReferenceFile(".", strings.Join(newRelPath, "/")) - if err != nil { - return err - } - - if ref != nil && !seen[ref.Name()] { - *refs = append(*refs, ref) - seen[ref.Name()] = true - } - } - - return nil -} - -func (d *DotGit) addRefFromHEAD(refs *[]*plumbing.Reference) error { - ref, err := d.readReferenceFile(".", "HEAD") - if err != nil { - if os.IsNotExist(err) { - return nil - } - - return err - } - - *refs = append(*refs, ref) - return nil -} - -func (d *DotGit) readReferenceFile(path, name string) (ref *plumbing.Reference, err error) { - path = d.fs.Join(path, d.fs.Join(strings.Split(name, "/")...)) - st, err := d.fs.Stat(path) - if err != nil { - return nil, err - } - if st.IsDir() { - return nil, ErrIsDir - } - - f, err := d.fs.Open(path) - if err != nil { - return nil, err - } - defer ioutil.CheckClose(f, &err) - - return d.readReferenceFrom(f, name) -} - -func (d *DotGit) CountLooseRefs() (int, error) { - var refs []*plumbing.Reference - var seen = make(map[plumbing.ReferenceName]bool) - if err := d.addRefsFromRefDir(&refs, seen); err != nil { - return 0, err - } - - return len(refs), nil -} - -// PackRefs packs all loose refs into the packed-refs file. -// -// This implementation only works under the assumption that the view -// of the file system won't be updated during this operation. This -// strategy would not work on a general file system though, without -// locking each loose reference and checking it again before deleting -// the file, because otherwise an updated reference could sneak in and -// then be deleted by the packed-refs process. Alternatively, every -// ref update could also lock packed-refs, so only one lock is -// required during ref-packing. But that would worsen performance in -// the common case. -// -// TODO: add an "all" boolean like the `git pack-refs --all` flag. -// When `all` is false, it would only pack refs that have already been -// packed, plus all tags. -func (d *DotGit) PackRefs() (err error) { - // Lock packed-refs, and create it if it doesn't exist yet. - f, err := d.openAndLockPackedRefs(true) - if err != nil { - return err - } - defer ioutil.CheckClose(f, &err) - - // Gather all refs using addRefsFromRefDir and addRefsFromPackedRefs. - var refs []*plumbing.Reference - seen := make(map[plumbing.ReferenceName]bool) - if err = d.addRefsFromRefDir(&refs, seen); err != nil { - return err - } - if len(refs) == 0 { - // Nothing to do! - return nil - } - numLooseRefs := len(refs) - if err = d.addRefsFromPackedRefsFile(&refs, f, seen); err != nil { - return err - } - - // Write them all to a new temp packed-refs file. - tmp, err := d.fs.TempFile("", tmpPackedRefsPrefix) - if err != nil { - return err - } - tmpName := tmp.Name() - defer func() { - ioutil.CheckClose(tmp, &err) - _ = d.fs.Remove(tmpName) // don't check err, we might have renamed it - }() - - w := bufio.NewWriter(tmp) - for _, ref := range refs { - _, err = w.WriteString(ref.String() + "\n") - if err != nil { - return err - } - } - err = w.Flush() - if err != nil { - return err - } - - // Rename the temp packed-refs file. - err = d.rewritePackedRefsWhileLocked(tmp, f) - if err != nil { - return err - } - - // Delete all the loose refs, while still holding the packed-refs - // lock. - for _, ref := range refs[:numLooseRefs] { - path := d.fs.Join(".", ref.Name().String()) - err = d.fs.Remove(path) - if err != nil && !os.IsNotExist(err) { - return err - } - } - - return nil -} - -// Module return a billy.Filesystem pointing to the module folder -func (d *DotGit) Module(name string) (billy.Filesystem, error) { - return d.fs.Chroot(d.fs.Join(modulePath, name)) -} - -// Alternates returns DotGit(s) based off paths in objects/info/alternates if -// available. This can be used to checks if it's a shared repository. -func (d *DotGit) Alternates() ([]*DotGit, error) { - altpath := d.fs.Join("objects", "info", "alternates") - f, err := d.fs.Open(altpath) - if err != nil { - return nil, err - } - defer f.Close() - - var alternates []*DotGit - - // Read alternate paths line-by-line and create DotGit objects. - scanner := bufio.NewScanner(f) - for scanner.Scan() { - path := scanner.Text() - if !filepath.IsAbs(path) { - // For relative paths, we can perform an internal conversion to - // slash so that they work cross-platform. - slashPath := filepath.ToSlash(path) - // If the path is not absolute, it must be relative to object - // database (.git/objects/info). - // https://www.kernel.org/pub/software/scm/git/docs/gitrepository-layout.html - // Hence, derive a path relative to DotGit's root. - // "../../../reponame/.git/" -> "../../reponame/.git" - // Remove the first ../ - relpath := filepath.Join(strings.Split(slashPath, "/")[1:]...) - normalPath := filepath.FromSlash(relpath) - path = filepath.Join(d.fs.Root(), normalPath) - } - fs := osfs.New(filepath.Dir(path)) - alternates = append(alternates, New(fs)) - } - - if err = scanner.Err(); err != nil { - return nil, err - } - - return alternates, nil -} - -// Fs returns the underlying filesystem of the DotGit folder. -func (d *DotGit) Fs() billy.Filesystem { - return d.fs -} - -func isHex(s string) bool { - for _, b := range []byte(s) { - if isNum(b) { - continue - } - if isHexAlpha(b) { - continue - } - - return false - } - - return true -} - -func isNum(b byte) bool { - return b >= '0' && b <= '9' -} - -func isHexAlpha(b byte) bool { - return b >= 'a' && b <= 'f' || b >= 'A' && b <= 'F' -} - -// incBytes increments a byte slice, which involves incrementing the -// right-most byte, and following carry leftward. -// It makes a copy so that the provided slice's underlying array is not modified. -// If the overall operation overflows (e.g. incBytes(0xff, 0xff)), the second return parameter indicates that. -func incBytes(in []byte) (out []byte, overflow bool) { - out = make([]byte, len(in)) - copy(out, in) - for i := len(out) - 1; i >= 0; i-- { - out[i]++ - if out[i] != 0 { - return // Didn't overflow. - } - } - overflow = true - return -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit_rewrite_packed_refs.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit_rewrite_packed_refs.go deleted file mode 100644 index 43263eadfa0..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit_rewrite_packed_refs.go +++ /dev/null @@ -1,81 +0,0 @@ -package dotgit - -import ( - "io" - "os" - "runtime" - - "github.com/go-git/go-billy/v5" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -func (d *DotGit) openAndLockPackedRefsMode() int { - if billy.CapabilityCheck(d.fs, billy.ReadAndWriteCapability) { - return os.O_RDWR - } - - return os.O_RDONLY -} - -func (d *DotGit) rewritePackedRefsWhileLocked( - tmp billy.File, pr billy.File) error { - // Try plain rename. If we aren't using the bare Windows filesystem as the - // storage layer, we might be able to get away with a rename over a locked - // file. - err := d.fs.Rename(tmp.Name(), pr.Name()) - if err == nil { - return nil - } - - // If we are in a filesystem that does not support rename (e.g. sivafs) - // a full copy is done. - if err == billy.ErrNotSupported { - return d.copyNewFile(tmp, pr) - } - - if runtime.GOOS != "windows" { - return err - } - - // Otherwise, Windows doesn't let us rename over a locked file, so - // we have to do a straight copy. Unfortunately this could result - // in a partially-written file if the process fails before the - // copy completes. - return d.copyToExistingFile(tmp, pr) -} - -func (d *DotGit) copyToExistingFile(tmp, pr billy.File) error { - _, err := pr.Seek(0, io.SeekStart) - if err != nil { - return err - } - err = pr.Truncate(0) - if err != nil { - return err - } - _, err = tmp.Seek(0, io.SeekStart) - if err != nil { - return err - } - _, err = io.Copy(pr, tmp) - - return err -} - -func (d *DotGit) copyNewFile(tmp billy.File, pr billy.File) (err error) { - prWrite, err := d.fs.Create(pr.Name()) - if err != nil { - return err - } - - defer ioutil.CheckClose(prWrite, &err) - - _, err = tmp.Seek(0, io.SeekStart) - if err != nil { - return err - } - - _, err = io.Copy(prWrite, tmp) - - return err -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit_setref.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit_setref.go deleted file mode 100644 index c057f5c4865..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit_setref.go +++ /dev/null @@ -1,90 +0,0 @@ -package dotgit - -import ( - "fmt" - "os" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/utils/ioutil" - - "github.com/go-git/go-billy/v5" -) - -func (d *DotGit) setRef(fileName, content string, old *plumbing.Reference) (err error) { - if billy.CapabilityCheck(d.fs, billy.ReadAndWriteCapability) { - return d.setRefRwfs(fileName, content, old) - } - - return d.setRefNorwfs(fileName, content, old) -} - -func (d *DotGit) setRefRwfs(fileName, content string, old *plumbing.Reference) (err error) { - // If we are not checking an old ref, just truncate the file. - mode := os.O_RDWR | os.O_CREATE - if old == nil { - mode |= os.O_TRUNC - } - - f, err := d.fs.OpenFile(fileName, mode, 0666) - if err != nil { - return err - } - - defer ioutil.CheckClose(f, &err) - - // Lock is unlocked by the deferred Close above. This is because Unlock - // does not imply a fsync and thus there would be a race between - // Unlock+Close and other concurrent writers. Adding Sync to go-billy - // could work, but this is better (and avoids superfluous syncs). - err = f.Lock() - if err != nil { - return err - } - - // this is a no-op to call even when old is nil. - err = d.checkReferenceAndTruncate(f, old) - if err != nil { - return err - } - - _, err = f.Write([]byte(content)) - return err -} - -// There are some filesystems that don't support opening files in RDWD mode. -// In these filesystems the standard SetRef function can not be used as it -// reads the reference file to check that it's not modified before updating it. -// -// This version of the function writes the reference without extra checks -// making it compatible with these simple filesystems. This is usually not -// a problem as they should be accessed by only one process at a time. -func (d *DotGit) setRefNorwfs(fileName, content string, old *plumbing.Reference) error { - _, err := d.fs.Stat(fileName) - if err == nil && old != nil { - fRead, err := d.fs.Open(fileName) - if err != nil { - return err - } - - ref, err := d.readReferenceFrom(fRead, old.Name().String()) - fRead.Close() - - if err != nil { - return err - } - - if ref.Hash() != old.Hash() { - return fmt.Errorf("reference has changed concurrently") - } - } - - f, err := d.fs.Create(fileName) - if err != nil { - return err - } - - defer f.Close() - - _, err = f.Write([]byte(content)) - return err -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/repository_filesystem.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/repository_filesystem.go deleted file mode 100644 index 8d243efea1f..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/repository_filesystem.go +++ /dev/null @@ -1,111 +0,0 @@ -package dotgit - -import ( - "os" - "path/filepath" - "strings" - - "github.com/go-git/go-billy/v5" -) - -// RepositoryFilesystem is a billy.Filesystem compatible object wrapper -// which handles dot-git filesystem operations and supports commondir according to git scm layout: -// https://github.com/git/git/blob/master/Documentation/gitrepository-layout.txt -type RepositoryFilesystem struct { - dotGitFs billy.Filesystem - commonDotGitFs billy.Filesystem -} - -func NewRepositoryFilesystem(dotGitFs, commonDotGitFs billy.Filesystem) *RepositoryFilesystem { - return &RepositoryFilesystem{ - dotGitFs: dotGitFs, - commonDotGitFs: commonDotGitFs, - } -} - -func (fs *RepositoryFilesystem) mapToRepositoryFsByPath(path string) billy.Filesystem { - // Nothing to decide if commondir not defined - if fs.commonDotGitFs == nil { - return fs.dotGitFs - } - - cleanPath := filepath.Clean(path) - - // Check exceptions for commondir (https://git-scm.com/docs/gitrepository-layout#Documentation/gitrepository-layout.txt) - switch cleanPath { - case fs.dotGitFs.Join(logsPath, "HEAD"): - return fs.dotGitFs - case fs.dotGitFs.Join(refsPath, "bisect"), fs.dotGitFs.Join(refsPath, "rewritten"), fs.dotGitFs.Join(refsPath, "worktree"): - return fs.dotGitFs - } - - // Determine dot-git root by first path element. - // There are some elements which should always use commondir when commondir defined. - // Usual dot-git root will be used for the rest of files. - switch strings.Split(cleanPath, string(filepath.Separator))[0] { - case objectsPath, refsPath, packedRefsPath, configPath, branchesPath, hooksPath, infoPath, remotesPath, logsPath, shallowPath, worktreesPath: - return fs.commonDotGitFs - default: - return fs.dotGitFs - } -} - -func (fs *RepositoryFilesystem) Create(filename string) (billy.File, error) { - return fs.mapToRepositoryFsByPath(filename).Create(filename) -} - -func (fs *RepositoryFilesystem) Open(filename string) (billy.File, error) { - return fs.mapToRepositoryFsByPath(filename).Open(filename) -} - -func (fs *RepositoryFilesystem) OpenFile(filename string, flag int, perm os.FileMode) (billy.File, error) { - return fs.mapToRepositoryFsByPath(filename).OpenFile(filename, flag, perm) -} - -func (fs *RepositoryFilesystem) Stat(filename string) (os.FileInfo, error) { - return fs.mapToRepositoryFsByPath(filename).Stat(filename) -} - -func (fs *RepositoryFilesystem) Rename(oldpath, newpath string) error { - return fs.mapToRepositoryFsByPath(oldpath).Rename(oldpath, newpath) -} - -func (fs *RepositoryFilesystem) Remove(filename string) error { - return fs.mapToRepositoryFsByPath(filename).Remove(filename) -} - -func (fs *RepositoryFilesystem) Join(elem ...string) string { - return fs.dotGitFs.Join(elem...) -} - -func (fs *RepositoryFilesystem) TempFile(dir, prefix string) (billy.File, error) { - return fs.mapToRepositoryFsByPath(dir).TempFile(dir, prefix) -} - -func (fs *RepositoryFilesystem) ReadDir(path string) ([]os.FileInfo, error) { - return fs.mapToRepositoryFsByPath(path).ReadDir(path) -} - -func (fs *RepositoryFilesystem) MkdirAll(filename string, perm os.FileMode) error { - return fs.mapToRepositoryFsByPath(filename).MkdirAll(filename, perm) -} - -func (fs *RepositoryFilesystem) Lstat(filename string) (os.FileInfo, error) { - return fs.mapToRepositoryFsByPath(filename).Lstat(filename) -} - -func (fs *RepositoryFilesystem) Symlink(target, link string) error { - return fs.mapToRepositoryFsByPath(target).Symlink(target, link) -} - -func (fs *RepositoryFilesystem) Readlink(link string) (string, error) { - return fs.mapToRepositoryFsByPath(link).Readlink(link) -} - -func (fs *RepositoryFilesystem) Chroot(path string) (billy.Filesystem, error) { - return fs.mapToRepositoryFsByPath(path).Chroot(path) -} - -func (fs *RepositoryFilesystem) Root() string { - return fs.dotGitFs.Root() -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/writers.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/writers.go deleted file mode 100644 index e2ede938cce..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/writers.go +++ /dev/null @@ -1,284 +0,0 @@ -package dotgit - -import ( - "fmt" - "io" - "sync/atomic" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/format/idxfile" - "github.com/go-git/go-git/v5/plumbing/format/objfile" - "github.com/go-git/go-git/v5/plumbing/format/packfile" - - "github.com/go-git/go-billy/v5" -) - -// PackWriter is a io.Writer that generates the packfile index simultaneously, -// a packfile.Decoder is used with a file reader to read the file being written -// this operation is synchronized with the write operations. -// The packfile is written in a temp file, when Close is called this file -// is renamed/moved (depends on the Filesystem implementation) to the final -// location, if the PackWriter is not used, nothing is written -type PackWriter struct { - Notify func(plumbing.Hash, *idxfile.Writer) - - fs billy.Filesystem - fr, fw billy.File - synced *syncedReader - checksum plumbing.Hash - parser *packfile.Parser - writer *idxfile.Writer - result chan error -} - -func newPackWrite(fs billy.Filesystem) (*PackWriter, error) { - fw, err := fs.TempFile(fs.Join(objectsPath, packPath), "tmp_pack_") - if err != nil { - return nil, err - } - - fr, err := fs.Open(fw.Name()) - if err != nil { - return nil, err - } - - writer := &PackWriter{ - fs: fs, - fw: fw, - fr: fr, - synced: newSyncedReader(fw, fr), - result: make(chan error), - } - - go writer.buildIndex() - return writer, nil -} - -func (w *PackWriter) buildIndex() { - s := packfile.NewScanner(w.synced) - w.writer = new(idxfile.Writer) - var err error - w.parser, err = packfile.NewParser(s, w.writer) - if err != nil { - w.result <- err - return - } - - checksum, err := w.parser.Parse() - if err != nil { - w.result <- err - return - } - - w.checksum = checksum - w.result <- err -} - -// waitBuildIndex waits until buildIndex function finishes, this can terminate -// with a packfile.ErrEmptyPackfile, this means that nothing was written so we -// ignore the error -func (w *PackWriter) waitBuildIndex() error { - err := <-w.result - if err == packfile.ErrEmptyPackfile { - return nil - } - - return err -} - -func (w *PackWriter) Write(p []byte) (int, error) { - return w.synced.Write(p) -} - -// Close closes all the file descriptors and save the final packfile, if nothing -// was written, the tempfiles are deleted without writing a packfile. -func (w *PackWriter) Close() error { - defer func() { - if w.Notify != nil && w.writer != nil && w.writer.Finished() { - w.Notify(w.checksum, w.writer) - } - - close(w.result) - }() - - if err := w.synced.Close(); err != nil { - return err - } - - if err := w.waitBuildIndex(); err != nil { - return err - } - - if err := w.fr.Close(); err != nil { - return err - } - - if err := w.fw.Close(); err != nil { - return err - } - - if w.writer == nil || !w.writer.Finished() { - return w.clean() - } - - return w.save() -} - -func (w *PackWriter) clean() error { - return w.fs.Remove(w.fw.Name()) -} - -func (w *PackWriter) save() error { - base := w.fs.Join(objectsPath, packPath, fmt.Sprintf("pack-%s", w.checksum)) - idx, err := w.fs.Create(fmt.Sprintf("%s.idx", base)) - if err != nil { - return err - } - - if err := w.encodeIdx(idx); err != nil { - return err - } - - if err := idx.Close(); err != nil { - return err - } - - return w.fs.Rename(w.fw.Name(), fmt.Sprintf("%s.pack", base)) -} - -func (w *PackWriter) encodeIdx(writer io.Writer) error { - idx, err := w.writer.Index() - if err != nil { - return err - } - - e := idxfile.NewEncoder(writer) - _, err = e.Encode(idx) - return err -} - -type syncedReader struct { - w io.Writer - r io.ReadSeeker - - blocked, done uint32 - written, read uint64 - news chan bool -} - -func newSyncedReader(w io.Writer, r io.ReadSeeker) *syncedReader { - return &syncedReader{ - w: w, - r: r, - news: make(chan bool), - } -} - -func (s *syncedReader) Write(p []byte) (n int, err error) { - defer func() { - written := atomic.AddUint64(&s.written, uint64(n)) - read := atomic.LoadUint64(&s.read) - if written > read { - s.wake() - } - }() - - n, err = s.w.Write(p) - return -} - -func (s *syncedReader) Read(p []byte) (n int, err error) { - defer func() { atomic.AddUint64(&s.read, uint64(n)) }() - - for { - s.sleep() - n, err = s.r.Read(p) - if err == io.EOF && !s.isDone() && n == 0 { - continue - } - - break - } - - return -} - -func (s *syncedReader) isDone() bool { - return atomic.LoadUint32(&s.done) == 1 -} - -func (s *syncedReader) isBlocked() bool { - return atomic.LoadUint32(&s.blocked) == 1 -} - -func (s *syncedReader) wake() { - if s.isBlocked() { - atomic.StoreUint32(&s.blocked, 0) - s.news <- true - } -} - -func (s *syncedReader) sleep() { - read := atomic.LoadUint64(&s.read) - written := atomic.LoadUint64(&s.written) - if read >= written { - atomic.StoreUint32(&s.blocked, 1) - <-s.news - } - -} - -func (s *syncedReader) Seek(offset int64, whence int) (int64, error) { - if whence == io.SeekCurrent { - return s.r.Seek(offset, whence) - } - - p, err := s.r.Seek(offset, whence) - atomic.StoreUint64(&s.read, uint64(p)) - - return p, err -} - -func (s *syncedReader) Close() error { - atomic.StoreUint32(&s.done, 1) - close(s.news) - return nil -} - -type ObjectWriter struct { - objfile.Writer - fs billy.Filesystem - f billy.File -} - -func newObjectWriter(fs billy.Filesystem) (*ObjectWriter, error) { - f, err := fs.TempFile(fs.Join(objectsPath, packPath), "tmp_obj_") - if err != nil { - return nil, err - } - - return &ObjectWriter{ - Writer: (*objfile.NewWriter(f)), - fs: fs, - f: f, - }, nil -} - -func (w *ObjectWriter) Close() error { - if err := w.Writer.Close(); err != nil { - return err - } - - if err := w.f.Close(); err != nil { - return err - } - - return w.save() -} - -func (w *ObjectWriter) save() error { - hash := w.Hash().String() - file := w.fs.Join(objectsPath, hash[0:2], hash[2:40]) - - return w.fs.Rename(w.f.Name(), file) -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/storage/filesystem/index.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/storage/filesystem/index.go deleted file mode 100644 index a19176f83db..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/storage/filesystem/index.go +++ /dev/null @@ -1,54 +0,0 @@ -package filesystem - -import ( - "bufio" - "os" - - "github.com/go-git/go-git/v5/plumbing/format/index" - "github.com/go-git/go-git/v5/storage/filesystem/dotgit" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -type IndexStorage struct { - dir *dotgit.DotGit -} - -func (s *IndexStorage) SetIndex(idx *index.Index) (err error) { - f, err := s.dir.IndexWriter() - if err != nil { - return err - } - - defer ioutil.CheckClose(f, &err) - bw := bufio.NewWriter(f) - defer func() { - if e := bw.Flush(); err == nil && e != nil { - err = e - } - }() - - e := index.NewEncoder(bw) - err = e.Encode(idx) - return err -} - -func (s *IndexStorage) Index() (i *index.Index, err error) { - idx := &index.Index{ - Version: 2, - } - - f, err := s.dir.Index() - if err != nil { - if os.IsNotExist(err) { - return idx, nil - } - - return nil, err - } - - defer ioutil.CheckClose(f, &err) - - d := index.NewDecoder(bufio.NewReader(f)) - err = d.Decode(idx) - return idx, err -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/storage/filesystem/module.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/storage/filesystem/module.go deleted file mode 100644 index 20336c11846..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/storage/filesystem/module.go +++ /dev/null @@ -1,20 +0,0 @@ -package filesystem - -import ( - "github.com/go-git/go-git/v5/plumbing/cache" - "github.com/go-git/go-git/v5/storage" - "github.com/go-git/go-git/v5/storage/filesystem/dotgit" -) - -type ModuleStorage struct { - dir *dotgit.DotGit -} - -func (s *ModuleStorage) Module(name string) (storage.Storer, error) { - fs, err := s.dir.Module(name) - if err != nil { - return nil, err - } - - return NewStorage(fs, cache.NewObjectLRUDefault()), nil -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/storage/filesystem/object.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/storage/filesystem/object.go deleted file mode 100644 index 0c25dad613d..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/storage/filesystem/object.go +++ /dev/null @@ -1,848 +0,0 @@ -package filesystem - -import ( - "bytes" - "io" - "os" - "time" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/cache" - "github.com/go-git/go-git/v5/plumbing/format/idxfile" - "github.com/go-git/go-git/v5/plumbing/format/objfile" - "github.com/go-git/go-git/v5/plumbing/format/packfile" - "github.com/go-git/go-git/v5/plumbing/storer" - "github.com/go-git/go-git/v5/storage/filesystem/dotgit" - "github.com/go-git/go-git/v5/utils/ioutil" - - "github.com/go-git/go-billy/v5" -) - -type ObjectStorage struct { - options Options - - // objectCache is an object cache uses to cache delta's bases and also recently - // loaded loose objects - objectCache cache.Object - - dir *dotgit.DotGit - index map[plumbing.Hash]idxfile.Index - - packList []plumbing.Hash - packListIdx int - packfiles map[plumbing.Hash]*packfile.Packfile -} - -// NewObjectStorage creates a new ObjectStorage with the given .git directory and cache. -func NewObjectStorage(dir *dotgit.DotGit, objectCache cache.Object) *ObjectStorage { - return NewObjectStorageWithOptions(dir, objectCache, Options{}) -} - -// NewObjectStorageWithOptions creates a new ObjectStorage with the given .git directory, cache and extra options -func NewObjectStorageWithOptions(dir *dotgit.DotGit, objectCache cache.Object, ops Options) *ObjectStorage { - return &ObjectStorage{ - options: ops, - objectCache: objectCache, - dir: dir, - } -} - -func (s *ObjectStorage) requireIndex() error { - if s.index != nil { - return nil - } - - s.index = make(map[plumbing.Hash]idxfile.Index) - packs, err := s.dir.ObjectPacks() - if err != nil { - return err - } - - for _, h := range packs { - if err := s.loadIdxFile(h); err != nil { - return err - } - } - - return nil -} - -// Reindex indexes again all packfiles. Useful if git changed packfiles externally -func (s *ObjectStorage) Reindex() { - s.index = nil -} - -func (s *ObjectStorage) loadIdxFile(h plumbing.Hash) (err error) { - f, err := s.dir.ObjectPackIdx(h) - if err != nil { - return err - } - - defer ioutil.CheckClose(f, &err) - - idxf := idxfile.NewMemoryIndex() - d := idxfile.NewDecoder(f) - if err = d.Decode(idxf); err != nil { - return err - } - - s.index[h] = idxf - return err -} - -func (s *ObjectStorage) NewEncodedObject() plumbing.EncodedObject { - return &plumbing.MemoryObject{} -} - -func (s *ObjectStorage) PackfileWriter() (io.WriteCloser, error) { - if err := s.requireIndex(); err != nil { - return nil, err - } - - w, err := s.dir.NewObjectPack() - if err != nil { - return nil, err - } - - w.Notify = func(h plumbing.Hash, writer *idxfile.Writer) { - index, err := writer.Index() - if err == nil { - s.index[h] = index - } - } - - return w, nil -} - -// SetEncodedObject adds a new object to the storage. -func (s *ObjectStorage) SetEncodedObject(o plumbing.EncodedObject) (h plumbing.Hash, err error) { - if o.Type() == plumbing.OFSDeltaObject || o.Type() == plumbing.REFDeltaObject { - return plumbing.ZeroHash, plumbing.ErrInvalidType - } - - ow, err := s.dir.NewObject() - if err != nil { - return plumbing.ZeroHash, err - } - - defer ioutil.CheckClose(ow, &err) - - or, err := o.Reader() - if err != nil { - return plumbing.ZeroHash, err - } - - defer ioutil.CheckClose(or, &err) - - if err = ow.WriteHeader(o.Type(), o.Size()); err != nil { - return plumbing.ZeroHash, err - } - - if _, err = io.Copy(ow, or); err != nil { - return plumbing.ZeroHash, err - } - - return o.Hash(), err -} - -// HasEncodedObject returns nil if the object exists, without actually -// reading the object data from storage. -func (s *ObjectStorage) HasEncodedObject(h plumbing.Hash) (err error) { - // Check unpacked objects - f, err := s.dir.Object(h) - if err != nil { - if !os.IsNotExist(err) { - return err - } - // Fall through to check packed objects. - } else { - defer ioutil.CheckClose(f, &err) - return nil - } - - // Check packed objects. - if err := s.requireIndex(); err != nil { - return err - } - _, _, offset := s.findObjectInPackfile(h) - if offset == -1 { - return plumbing.ErrObjectNotFound - } - return nil -} - -func (s *ObjectStorage) encodedObjectSizeFromUnpacked(h plumbing.Hash) ( - size int64, err error) { - f, err := s.dir.Object(h) - if err != nil { - if os.IsNotExist(err) { - return 0, plumbing.ErrObjectNotFound - } - - return 0, err - } - - r, err := objfile.NewReader(f) - if err != nil { - return 0, err - } - defer ioutil.CheckClose(r, &err) - - _, size, err = r.Header() - return size, err -} - -func (s *ObjectStorage) packfile(idx idxfile.Index, pack plumbing.Hash) (*packfile.Packfile, error) { - if p := s.packfileFromCache(pack); p != nil { - return p, nil - } - - f, err := s.dir.ObjectPack(pack) - if err != nil { - return nil, err - } - - var p *packfile.Packfile - if s.objectCache != nil { - p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache) - } else { - p = packfile.NewPackfile(idx, s.dir.Fs(), f) - } - - return p, s.storePackfileInCache(pack, p) -} - -func (s *ObjectStorage) packfileFromCache(hash plumbing.Hash) *packfile.Packfile { - if s.packfiles == nil { - if s.options.KeepDescriptors { - s.packfiles = make(map[plumbing.Hash]*packfile.Packfile) - } else if s.options.MaxOpenDescriptors > 0 { - s.packList = make([]plumbing.Hash, s.options.MaxOpenDescriptors) - s.packfiles = make(map[plumbing.Hash]*packfile.Packfile, s.options.MaxOpenDescriptors) - } - } - - return s.packfiles[hash] -} - -func (s *ObjectStorage) storePackfileInCache(hash plumbing.Hash, p *packfile.Packfile) error { - if s.options.KeepDescriptors { - s.packfiles[hash] = p - return nil - } - - if s.options.MaxOpenDescriptors <= 0 { - return nil - } - - // start over as the limit of packList is hit - if s.packListIdx >= len(s.packList) { - s.packListIdx = 0 - } - - // close the existing packfile if open - if next := s.packList[s.packListIdx]; !next.IsZero() { - open := s.packfiles[next] - delete(s.packfiles, next) - if open != nil { - if err := open.Close(); err != nil { - return err - } - } - } - - // cache newly open packfile - s.packList[s.packListIdx] = hash - s.packfiles[hash] = p - s.packListIdx++ - - return nil -} - -func (s *ObjectStorage) encodedObjectSizeFromPackfile(h plumbing.Hash) ( - size int64, err error) { - if err := s.requireIndex(); err != nil { - return 0, err - } - - pack, _, offset := s.findObjectInPackfile(h) - if offset == -1 { - return 0, plumbing.ErrObjectNotFound - } - - idx := s.index[pack] - hash, err := idx.FindHash(offset) - if err == nil { - obj, ok := s.objectCache.Get(hash) - if ok { - return obj.Size(), nil - } - } else if err != nil && err != plumbing.ErrObjectNotFound { - return 0, err - } - - p, err := s.packfile(idx, pack) - if err != nil { - return 0, err - } - - if !s.options.KeepDescriptors && s.options.MaxOpenDescriptors == 0 { - defer ioutil.CheckClose(p, &err) - } - - return p.GetSizeByOffset(offset) -} - -// EncodedObjectSize returns the plaintext size of the given object, -// without actually reading the full object data from storage. -func (s *ObjectStorage) EncodedObjectSize(h plumbing.Hash) ( - size int64, err error) { - size, err = s.encodedObjectSizeFromUnpacked(h) - if err != nil && err != plumbing.ErrObjectNotFound { - return 0, err - } else if err == nil { - return size, nil - } - - return s.encodedObjectSizeFromPackfile(h) -} - -// EncodedObject returns the object with the given hash, by searching for it in -// the packfile and the git object directories. -func (s *ObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (plumbing.EncodedObject, error) { - var obj plumbing.EncodedObject - var err error - - if s.index != nil { - obj, err = s.getFromPackfile(h, false) - if err == plumbing.ErrObjectNotFound { - obj, err = s.getFromUnpacked(h) - } - } else { - obj, err = s.getFromUnpacked(h) - if err == plumbing.ErrObjectNotFound { - obj, err = s.getFromPackfile(h, false) - } - } - - // If the error is still object not found, check if it's a shared object - // repository. - if err == plumbing.ErrObjectNotFound { - dotgits, e := s.dir.Alternates() - if e == nil { - // Create a new object storage with the DotGit(s) and check for the - // required hash object. Skip when not found. - for _, dg := range dotgits { - o := NewObjectStorage(dg, s.objectCache) - enobj, enerr := o.EncodedObject(t, h) - if enerr != nil { - continue - } - return enobj, nil - } - } - } - - if err != nil { - return nil, err - } - - if plumbing.AnyObject != t && obj.Type() != t { - return nil, plumbing.ErrObjectNotFound - } - - return obj, nil -} - -// DeltaObject returns the object with the given hash, by searching for -// it in the packfile and the git object directories. -func (s *ObjectStorage) DeltaObject(t plumbing.ObjectType, - h plumbing.Hash) (plumbing.EncodedObject, error) { - obj, err := s.getFromUnpacked(h) - if err == plumbing.ErrObjectNotFound { - obj, err = s.getFromPackfile(h, true) - } - - if err != nil { - return nil, err - } - - if plumbing.AnyObject != t && obj.Type() != t { - return nil, plumbing.ErrObjectNotFound - } - - return obj, nil -} - -func (s *ObjectStorage) getFromUnpacked(h plumbing.Hash) (obj plumbing.EncodedObject, err error) { - f, err := s.dir.Object(h) - if err != nil { - if os.IsNotExist(err) { - return nil, plumbing.ErrObjectNotFound - } - - return nil, err - } - defer ioutil.CheckClose(f, &err) - - if cacheObj, found := s.objectCache.Get(h); found { - return cacheObj, nil - } - - obj = s.NewEncodedObject() - r, err := objfile.NewReader(f) - if err != nil { - return nil, err - } - - defer ioutil.CheckClose(r, &err) - - t, size, err := r.Header() - if err != nil { - return nil, err - } - - obj.SetType(t) - obj.SetSize(size) - w, err := obj.Writer() - if err != nil { - return nil, err - } - - defer ioutil.CheckClose(w, &err) - - s.objectCache.Put(obj) - - _, err = io.Copy(w, r) - return obj, err -} - -// Get returns the object with the given hash, by searching for it in -// the packfile. -func (s *ObjectStorage) getFromPackfile(h plumbing.Hash, canBeDelta bool) ( - plumbing.EncodedObject, error) { - - if err := s.requireIndex(); err != nil { - return nil, err - } - - pack, hash, offset := s.findObjectInPackfile(h) - if offset == -1 { - return nil, plumbing.ErrObjectNotFound - } - - idx := s.index[pack] - p, err := s.packfile(idx, pack) - if err != nil { - return nil, err - } - - if !s.options.KeepDescriptors && s.options.MaxOpenDescriptors == 0 { - defer ioutil.CheckClose(p, &err) - } - - if canBeDelta { - return s.decodeDeltaObjectAt(p, offset, hash) - } - - return s.decodeObjectAt(p, offset) -} - -func (s *ObjectStorage) decodeObjectAt( - p *packfile.Packfile, - offset int64, -) (plumbing.EncodedObject, error) { - hash, err := p.FindHash(offset) - if err == nil { - obj, ok := s.objectCache.Get(hash) - if ok { - return obj, nil - } - } - - if err != nil && err != plumbing.ErrObjectNotFound { - return nil, err - } - - return p.GetByOffset(offset) -} - -func (s *ObjectStorage) decodeDeltaObjectAt( - p *packfile.Packfile, - offset int64, - hash plumbing.Hash, -) (plumbing.EncodedObject, error) { - scan := p.Scanner() - header, err := scan.SeekObjectHeader(offset) - if err != nil { - return nil, err - } - - var ( - base plumbing.Hash - ) - - switch header.Type { - case plumbing.REFDeltaObject: - base = header.Reference - case plumbing.OFSDeltaObject: - base, err = p.FindHash(header.OffsetReference) - if err != nil { - return nil, err - } - default: - return s.decodeObjectAt(p, offset) - } - - obj := &plumbing.MemoryObject{} - obj.SetType(header.Type) - w, err := obj.Writer() - if err != nil { - return nil, err - } - - if _, _, err := scan.NextObject(w); err != nil { - return nil, err - } - - return newDeltaObject(obj, hash, base, header.Length), nil -} - -func (s *ObjectStorage) findObjectInPackfile(h plumbing.Hash) (plumbing.Hash, plumbing.Hash, int64) { - for packfile, index := range s.index { - offset, err := index.FindOffset(h) - if err == nil { - return packfile, h, offset - } - } - - return plumbing.ZeroHash, plumbing.ZeroHash, -1 -} - -func (s *ObjectStorage) HashesWithPrefix(prefix []byte) ([]plumbing.Hash, error) { - hashes, err := s.dir.ObjectsWithPrefix(prefix) - if err != nil { - return nil, err - } - - // TODO: This could be faster with some idxfile changes, - // or diving into the packfile. - for _, index := range s.index { - ei, err := index.Entries() - if err != nil { - return nil, err - } - for { - e, err := ei.Next() - if err == io.EOF { - break - } else if err != nil { - return nil, err - } - if bytes.HasPrefix(e.Hash[:], prefix) { - hashes = append(hashes, e.Hash) - } - } - ei.Close() - } - - return hashes, nil -} - -// IterEncodedObjects returns an iterator for all the objects in the packfile -// with the given type. -func (s *ObjectStorage) IterEncodedObjects(t plumbing.ObjectType) (storer.EncodedObjectIter, error) { - objects, err := s.dir.Objects() - if err != nil { - return nil, err - } - - seen := make(map[plumbing.Hash]struct{}) - var iters []storer.EncodedObjectIter - if len(objects) != 0 { - iters = append(iters, &objectsIter{s: s, t: t, h: objects}) - seen = hashListAsMap(objects) - } - - packi, err := s.buildPackfileIters(t, seen) - if err != nil { - return nil, err - } - - iters = append(iters, packi) - return storer.NewMultiEncodedObjectIter(iters), nil -} - -func (s *ObjectStorage) buildPackfileIters( - t plumbing.ObjectType, - seen map[plumbing.Hash]struct{}, -) (storer.EncodedObjectIter, error) { - if err := s.requireIndex(); err != nil { - return nil, err - } - - packs, err := s.dir.ObjectPacks() - if err != nil { - return nil, err - } - return &lazyPackfilesIter{ - hashes: packs, - open: func(h plumbing.Hash) (storer.EncodedObjectIter, error) { - pack, err := s.dir.ObjectPack(h) - if err != nil { - return nil, err - } - return newPackfileIter( - s.dir.Fs(), pack, t, seen, s.index[h], - s.objectCache, s.options.KeepDescriptors, - ) - }, - }, nil -} - -// Close closes all opened files. -func (s *ObjectStorage) Close() error { - var firstError error - if s.options.KeepDescriptors || s.options.MaxOpenDescriptors > 0 { - for _, packfile := range s.packfiles { - err := packfile.Close() - if firstError == nil && err != nil { - firstError = err - } - } - } - - s.packfiles = nil - s.dir.Close() - - return firstError -} - -type lazyPackfilesIter struct { - hashes []plumbing.Hash - open func(h plumbing.Hash) (storer.EncodedObjectIter, error) - cur storer.EncodedObjectIter -} - -func (it *lazyPackfilesIter) Next() (plumbing.EncodedObject, error) { - for { - if it.cur == nil { - if len(it.hashes) == 0 { - return nil, io.EOF - } - h := it.hashes[0] - it.hashes = it.hashes[1:] - - sub, err := it.open(h) - if err == io.EOF { - continue - } else if err != nil { - return nil, err - } - it.cur = sub - } - ob, err := it.cur.Next() - if err == io.EOF { - it.cur.Close() - it.cur = nil - continue - } else if err != nil { - return nil, err - } - return ob, nil - } -} - -func (it *lazyPackfilesIter) ForEach(cb func(plumbing.EncodedObject) error) error { - return storer.ForEachIterator(it, cb) -} - -func (it *lazyPackfilesIter) Close() { - if it.cur != nil { - it.cur.Close() - it.cur = nil - } - it.hashes = nil -} - -type packfileIter struct { - pack billy.File - iter storer.EncodedObjectIter - seen map[plumbing.Hash]struct{} - - // tells whether the pack file should be left open after iteration or not - keepPack bool -} - -// NewPackfileIter returns a new EncodedObjectIter for the provided packfile -// and object type. Packfile and index file will be closed after they're -// used. If keepPack is true the packfile won't be closed after the iteration -// finished. -func NewPackfileIter( - fs billy.Filesystem, - f billy.File, - idxFile billy.File, - t plumbing.ObjectType, - keepPack bool, -) (storer.EncodedObjectIter, error) { - idx := idxfile.NewMemoryIndex() - if err := idxfile.NewDecoder(idxFile).Decode(idx); err != nil { - return nil, err - } - - if err := idxFile.Close(); err != nil { - return nil, err - } - - seen := make(map[plumbing.Hash]struct{}) - return newPackfileIter(fs, f, t, seen, idx, nil, keepPack) -} - -func newPackfileIter( - fs billy.Filesystem, - f billy.File, - t plumbing.ObjectType, - seen map[plumbing.Hash]struct{}, - index idxfile.Index, - cache cache.Object, - keepPack bool, -) (storer.EncodedObjectIter, error) { - var p *packfile.Packfile - if cache != nil { - p = packfile.NewPackfileWithCache(index, fs, f, cache) - } else { - p = packfile.NewPackfile(index, fs, f) - } - - iter, err := p.GetByType(t) - if err != nil { - return nil, err - } - - return &packfileIter{ - pack: f, - iter: iter, - seen: seen, - keepPack: keepPack, - }, nil -} - -func (iter *packfileIter) Next() (plumbing.EncodedObject, error) { - for { - obj, err := iter.iter.Next() - if err != nil { - return nil, err - } - - if _, ok := iter.seen[obj.Hash()]; ok { - continue - } - - return obj, nil - } -} - -func (iter *packfileIter) ForEach(cb func(plumbing.EncodedObject) error) error { - for { - o, err := iter.Next() - if err != nil { - if err == io.EOF { - iter.Close() - return nil - } - return err - } - - if err := cb(o); err != nil { - return err - } - } -} - -func (iter *packfileIter) Close() { - iter.iter.Close() - if !iter.keepPack { - _ = iter.pack.Close() - } -} - -type objectsIter struct { - s *ObjectStorage - t plumbing.ObjectType - h []plumbing.Hash -} - -func (iter *objectsIter) Next() (plumbing.EncodedObject, error) { - if len(iter.h) == 0 { - return nil, io.EOF - } - - obj, err := iter.s.getFromUnpacked(iter.h[0]) - iter.h = iter.h[1:] - - if err != nil { - return nil, err - } - - if iter.t != plumbing.AnyObject && iter.t != obj.Type() { - return iter.Next() - } - - return obj, err -} - -func (iter *objectsIter) ForEach(cb func(plumbing.EncodedObject) error) error { - for { - o, err := iter.Next() - if err != nil { - if err == io.EOF { - return nil - } - return err - } - - if err := cb(o); err != nil { - return err - } - } -} - -func (iter *objectsIter) Close() { - iter.h = []plumbing.Hash{} -} - -func hashListAsMap(l []plumbing.Hash) map[plumbing.Hash]struct{} { - m := make(map[plumbing.Hash]struct{}, len(l)) - for _, h := range l { - m[h] = struct{}{} - } - return m -} - -func (s *ObjectStorage) ForEachObjectHash(fun func(plumbing.Hash) error) error { - err := s.dir.ForEachObjectHash(fun) - if err == storer.ErrStop { - return nil - } - return err -} - -func (s *ObjectStorage) LooseObjectTime(hash plumbing.Hash) (time.Time, error) { - fi, err := s.dir.ObjectStat(hash) - if err != nil { - return time.Time{}, err - } - return fi.ModTime(), nil -} - -func (s *ObjectStorage) DeleteLooseObject(hash plumbing.Hash) error { - return s.dir.ObjectDelete(hash) -} - -func (s *ObjectStorage) ObjectPacks() ([]plumbing.Hash, error) { - return s.dir.ObjectPacks() -} - -func (s *ObjectStorage) DeleteOldObjectPackAndIndex(h plumbing.Hash, t time.Time) error { - return s.dir.DeleteOldObjectPackAndIndex(h, t) -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/storage/filesystem/reference.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/storage/filesystem/reference.go deleted file mode 100644 index aabcd7308d6..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/storage/filesystem/reference.go +++ /dev/null @@ -1,44 +0,0 @@ -package filesystem - -import ( - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/storer" - "github.com/go-git/go-git/v5/storage/filesystem/dotgit" -) - -type ReferenceStorage struct { - dir *dotgit.DotGit -} - -func (r *ReferenceStorage) SetReference(ref *plumbing.Reference) error { - return r.dir.SetRef(ref, nil) -} - -func (r *ReferenceStorage) CheckAndSetReference(ref, old *plumbing.Reference) error { - return r.dir.SetRef(ref, old) -} - -func (r *ReferenceStorage) Reference(n plumbing.ReferenceName) (*plumbing.Reference, error) { - return r.dir.Ref(n) -} - -func (r *ReferenceStorage) IterReferences() (storer.ReferenceIter, error) { - refs, err := r.dir.Refs() - if err != nil { - return nil, err - } - - return storer.NewReferenceSliceIter(refs), nil -} - -func (r *ReferenceStorage) RemoveReference(n plumbing.ReferenceName) error { - return r.dir.RemoveRef(n) -} - -func (r *ReferenceStorage) CountLooseRefs() (int, error) { - return r.dir.CountLooseRefs() -} - -func (r *ReferenceStorage) PackRefs() error { - return r.dir.PackRefs() -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/storage/filesystem/shallow.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/storage/filesystem/shallow.go deleted file mode 100644 index afb600cf2da..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/storage/filesystem/shallow.go +++ /dev/null @@ -1,54 +0,0 @@ -package filesystem - -import ( - "bufio" - "fmt" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/storage/filesystem/dotgit" - "github.com/go-git/go-git/v5/utils/ioutil" -) - -// ShallowStorage where the shallow commits are stored, an internal to -// manipulate the shallow file -type ShallowStorage struct { - dir *dotgit.DotGit -} - -// SetShallow save the shallows in the shallow file in the .git folder as one -// commit per line represented by 40-byte hexadecimal object terminated by a -// newline. -func (s *ShallowStorage) SetShallow(commits []plumbing.Hash) error { - f, err := s.dir.ShallowWriter() - if err != nil { - return err - } - - defer ioutil.CheckClose(f, &err) - for _, h := range commits { - if _, err := fmt.Fprintf(f, "%s\n", h); err != nil { - return err - } - } - - return err -} - -// Shallow return the shallow commits reading from shallo file from .git -func (s *ShallowStorage) Shallow() ([]plumbing.Hash, error) { - f, err := s.dir.Shallow() - if f == nil || err != nil { - return nil, err - } - - defer ioutil.CheckClose(f, &err) - - var hash []plumbing.Hash - - scn := bufio.NewScanner(f) - for scn.Scan() { - hash = append(hash, plumbing.NewHash(scn.Text())) - } - - return hash, scn.Err() -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/storage/filesystem/storage.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/storage/filesystem/storage.go deleted file mode 100644 index 8b69b27b00a..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/storage/filesystem/storage.go +++ /dev/null @@ -1,73 +0,0 @@ -// Package filesystem is a storage backend base on filesystems -package filesystem - -import ( - "github.com/go-git/go-git/v5/plumbing/cache" - "github.com/go-git/go-git/v5/storage/filesystem/dotgit" - - "github.com/go-git/go-billy/v5" -) - -// Storage is an implementation of git.Storer that stores data on disk in the -// standard git format (this is, the .git directory). Zero values of this type -// are not safe to use, see the NewStorage function below. -type Storage struct { - fs billy.Filesystem - dir *dotgit.DotGit - - ObjectStorage - ReferenceStorage - IndexStorage - ShallowStorage - ConfigStorage - ModuleStorage -} - -// Options holds configuration for the storage. -type Options struct { - // ExclusiveAccess means that the filesystem is not modified externally - // while the repo is open. - ExclusiveAccess bool - // KeepDescriptors makes the file descriptors to be reused but they will - // need to be manually closed calling Close(). - KeepDescriptors bool - // MaxOpenDescriptors is the max number of file descriptors to keep - // open. If KeepDescriptors is true, all file descriptors will remain open. - MaxOpenDescriptors int -} - -// NewStorage returns a new Storage backed by a given `fs.Filesystem` and cache. -func NewStorage(fs billy.Filesystem, cache cache.Object) *Storage { - return NewStorageWithOptions(fs, cache, Options{}) -} - -// NewStorageWithOptions returns a new Storage with extra options, -// backed by a given `fs.Filesystem` and cache. -func NewStorageWithOptions(fs billy.Filesystem, cache cache.Object, ops Options) *Storage { - dirOps := dotgit.Options{ - ExclusiveAccess: ops.ExclusiveAccess, - } - dir := dotgit.NewWithOptions(fs, dirOps) - - return &Storage{ - fs: fs, - dir: dir, - - ObjectStorage: *NewObjectStorageWithOptions(dir, cache, ops), - ReferenceStorage: ReferenceStorage{dir: dir}, - IndexStorage: IndexStorage{dir: dir}, - ShallowStorage: ShallowStorage{dir: dir}, - ConfigStorage: ConfigStorage{dir: dir}, - ModuleStorage: ModuleStorage{dir: dir}, - } -} - -// Filesystem returns the underlying filesystem -func (s *Storage) Filesystem() billy.Filesystem { - return s.fs -} - -// Init initializes .git directory -func (s *Storage) Init() error { - return s.dir.Initialize() -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/storage/memory/storage.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/storage/memory/storage.go deleted file mode 100644 index a8e56697b9d..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/storage/memory/storage.go +++ /dev/null @@ -1,320 +0,0 @@ -// Package memory is a storage backend base on memory -package memory - -import ( - "fmt" - "time" - - "github.com/go-git/go-git/v5/config" - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/format/index" - "github.com/go-git/go-git/v5/plumbing/storer" - "github.com/go-git/go-git/v5/storage" -) - -var ErrUnsupportedObjectType = fmt.Errorf("unsupported object type") - -// Storage is an implementation of git.Storer that stores data on memory, being -// ephemeral. The use of this storage should be done in controlled environments, -// since the representation in memory of some repository can fill the machine -// memory. in the other hand this storage has the best performance. -type Storage struct { - ConfigStorage - ObjectStorage - ShallowStorage - IndexStorage - ReferenceStorage - ModuleStorage -} - -// NewStorage returns a new Storage base on memory -func NewStorage() *Storage { - return &Storage{ - ReferenceStorage: make(ReferenceStorage), - ConfigStorage: ConfigStorage{}, - ShallowStorage: ShallowStorage{}, - ObjectStorage: ObjectStorage{ - Objects: make(map[plumbing.Hash]plumbing.EncodedObject), - Commits: make(map[plumbing.Hash]plumbing.EncodedObject), - Trees: make(map[plumbing.Hash]plumbing.EncodedObject), - Blobs: make(map[plumbing.Hash]plumbing.EncodedObject), - Tags: make(map[plumbing.Hash]plumbing.EncodedObject), - }, - ModuleStorage: make(ModuleStorage), - } -} - -type ConfigStorage struct { - config *config.Config -} - -func (c *ConfigStorage) SetConfig(cfg *config.Config) error { - if err := cfg.Validate(); err != nil { - return err - } - - c.config = cfg - return nil -} - -func (c *ConfigStorage) Config() (*config.Config, error) { - if c.config == nil { - c.config = config.NewConfig() - } - - return c.config, nil -} - -type IndexStorage struct { - index *index.Index -} - -func (c *IndexStorage) SetIndex(idx *index.Index) error { - c.index = idx - return nil -} - -func (c *IndexStorage) Index() (*index.Index, error) { - if c.index == nil { - c.index = &index.Index{Version: 2} - } - - return c.index, nil -} - -type ObjectStorage struct { - Objects map[plumbing.Hash]plumbing.EncodedObject - Commits map[plumbing.Hash]plumbing.EncodedObject - Trees map[plumbing.Hash]plumbing.EncodedObject - Blobs map[plumbing.Hash]plumbing.EncodedObject - Tags map[plumbing.Hash]plumbing.EncodedObject -} - -func (o *ObjectStorage) NewEncodedObject() plumbing.EncodedObject { - return &plumbing.MemoryObject{} -} - -func (o *ObjectStorage) SetEncodedObject(obj plumbing.EncodedObject) (plumbing.Hash, error) { - h := obj.Hash() - o.Objects[h] = obj - - switch obj.Type() { - case plumbing.CommitObject: - o.Commits[h] = o.Objects[h] - case plumbing.TreeObject: - o.Trees[h] = o.Objects[h] - case plumbing.BlobObject: - o.Blobs[h] = o.Objects[h] - case plumbing.TagObject: - o.Tags[h] = o.Objects[h] - default: - return h, ErrUnsupportedObjectType - } - - return h, nil -} - -func (o *ObjectStorage) HasEncodedObject(h plumbing.Hash) (err error) { - if _, ok := o.Objects[h]; !ok { - return plumbing.ErrObjectNotFound - } - return nil -} - -func (o *ObjectStorage) EncodedObjectSize(h plumbing.Hash) ( - size int64, err error) { - obj, ok := o.Objects[h] - if !ok { - return 0, plumbing.ErrObjectNotFound - } - - return obj.Size(), nil -} - -func (o *ObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (plumbing.EncodedObject, error) { - obj, ok := o.Objects[h] - if !ok || (plumbing.AnyObject != t && obj.Type() != t) { - return nil, plumbing.ErrObjectNotFound - } - - return obj, nil -} - -func (o *ObjectStorage) IterEncodedObjects(t plumbing.ObjectType) (storer.EncodedObjectIter, error) { - var series []plumbing.EncodedObject - switch t { - case plumbing.AnyObject: - series = flattenObjectMap(o.Objects) - case plumbing.CommitObject: - series = flattenObjectMap(o.Commits) - case plumbing.TreeObject: - series = flattenObjectMap(o.Trees) - case plumbing.BlobObject: - series = flattenObjectMap(o.Blobs) - case plumbing.TagObject: - series = flattenObjectMap(o.Tags) - } - - return storer.NewEncodedObjectSliceIter(series), nil -} - -func flattenObjectMap(m map[plumbing.Hash]plumbing.EncodedObject) []plumbing.EncodedObject { - objects := make([]plumbing.EncodedObject, 0, len(m)) - for _, obj := range m { - objects = append(objects, obj) - } - return objects -} - -func (o *ObjectStorage) Begin() storer.Transaction { - return &TxObjectStorage{ - Storage: o, - Objects: make(map[plumbing.Hash]plumbing.EncodedObject), - } -} - -func (o *ObjectStorage) ForEachObjectHash(fun func(plumbing.Hash) error) error { - for h := range o.Objects { - err := fun(h) - if err != nil { - if err == storer.ErrStop { - return nil - } - return err - } - } - return nil -} - -func (o *ObjectStorage) ObjectPacks() ([]plumbing.Hash, error) { - return nil, nil -} -func (o *ObjectStorage) DeleteOldObjectPackAndIndex(plumbing.Hash, time.Time) error { - return nil -} - -var errNotSupported = fmt.Errorf("Not supported") - -func (o *ObjectStorage) LooseObjectTime(hash plumbing.Hash) (time.Time, error) { - return time.Time{}, errNotSupported -} -func (o *ObjectStorage) DeleteLooseObject(plumbing.Hash) error { - return errNotSupported -} - -type TxObjectStorage struct { - Storage *ObjectStorage - Objects map[plumbing.Hash]plumbing.EncodedObject -} - -func (tx *TxObjectStorage) SetEncodedObject(obj plumbing.EncodedObject) (plumbing.Hash, error) { - h := obj.Hash() - tx.Objects[h] = obj - - return h, nil -} - -func (tx *TxObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (plumbing.EncodedObject, error) { - obj, ok := tx.Objects[h] - if !ok || (plumbing.AnyObject != t && obj.Type() != t) { - return nil, plumbing.ErrObjectNotFound - } - - return obj, nil -} - -func (tx *TxObjectStorage) Commit() error { - for h, obj := range tx.Objects { - delete(tx.Objects, h) - if _, err := tx.Storage.SetEncodedObject(obj); err != nil { - return err - } - } - - return nil -} - -func (tx *TxObjectStorage) Rollback() error { - tx.Objects = make(map[plumbing.Hash]plumbing.EncodedObject) - return nil -} - -type ReferenceStorage map[plumbing.ReferenceName]*plumbing.Reference - -func (r ReferenceStorage) SetReference(ref *plumbing.Reference) error { - if ref != nil { - r[ref.Name()] = ref - } - - return nil -} - -func (r ReferenceStorage) CheckAndSetReference(ref, old *plumbing.Reference) error { - if ref == nil { - return nil - } - - if old != nil { - tmp := r[ref.Name()] - if tmp != nil && tmp.Hash() != old.Hash() { - return storage.ErrReferenceHasChanged - } - } - r[ref.Name()] = ref - return nil -} - -func (r ReferenceStorage) Reference(n plumbing.ReferenceName) (*plumbing.Reference, error) { - ref, ok := r[n] - if !ok { - return nil, plumbing.ErrReferenceNotFound - } - - return ref, nil -} - -func (r ReferenceStorage) IterReferences() (storer.ReferenceIter, error) { - var refs []*plumbing.Reference - for _, ref := range r { - refs = append(refs, ref) - } - - return storer.NewReferenceSliceIter(refs), nil -} - -func (r ReferenceStorage) CountLooseRefs() (int, error) { - return len(r), nil -} - -func (r ReferenceStorage) PackRefs() error { - return nil -} - -func (r ReferenceStorage) RemoveReference(n plumbing.ReferenceName) error { - delete(r, n) - return nil -} - -type ShallowStorage []plumbing.Hash - -func (s *ShallowStorage) SetShallow(commits []plumbing.Hash) error { - *s = commits - return nil -} - -func (s ShallowStorage) Shallow() ([]plumbing.Hash, error) { - return s, nil -} - -type ModuleStorage map[string]*Storage - -func (s ModuleStorage) Module(name string) (storage.Storer, error) { - if m, ok := s[name]; ok { - return m, nil - } - - m := NewStorage() - s[name] = m - - return m, nil -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/storage/storer.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/storage/storer.go deleted file mode 100644 index 4800ac7ba07..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/storage/storer.go +++ /dev/null @@ -1,30 +0,0 @@ -package storage - -import ( - "errors" - - "github.com/go-git/go-git/v5/config" - "github.com/go-git/go-git/v5/plumbing/storer" -) - -var ErrReferenceHasChanged = errors.New("reference has changed concurrently") - -// Storer is a generic storage of objects, references and any information -// related to a particular repository. The package github.com/go-git/go-git/v5/storage -// contains two implementation a filesystem base implementation (such as `.git`) -// and a memory implementations being ephemeral -type Storer interface { - storer.EncodedObjectStorer - storer.ReferenceStorer - storer.ShallowStorer - storer.IndexStorer - config.ConfigStorer - ModuleStorer -} - -// ModuleStorer allows interact with the modules' Storers -type ModuleStorer interface { - // Module returns a Storer representing a submodule, if not exists returns a - // new empty Storer is returned - Module(name string) (Storer, error) -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/submodule.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/submodule.go deleted file mode 100644 index a202a9b6007..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/submodule.go +++ /dev/null @@ -1,397 +0,0 @@ -package git - -import ( - "bytes" - "context" - "errors" - "fmt" - "net/url" - "path" - - "github.com/go-git/go-billy/v5" - "github.com/go-git/go-git/v5/config" - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/format/index" -) - -var ( - ErrSubmoduleAlreadyInitialized = errors.New("submodule already initialized") - ErrSubmoduleNotInitialized = errors.New("submodule not initialized") -) - -// Submodule a submodule allows you to keep another Git repository in a -// subdirectory of your repository. -type Submodule struct { - // initialized defines if a submodule was already initialized. - initialized bool - - c *config.Submodule - w *Worktree -} - -// Config returns the submodule config -func (s *Submodule) Config() *config.Submodule { - return s.c -} - -// Init initialize the submodule reading the recorded Entry in the index for -// the given submodule -func (s *Submodule) Init() error { - cfg, err := s.w.r.Config() - if err != nil { - return err - } - - _, ok := cfg.Submodules[s.c.Name] - if ok { - return ErrSubmoduleAlreadyInitialized - } - - s.initialized = true - - cfg.Submodules[s.c.Name] = s.c - return s.w.r.Storer.SetConfig(cfg) -} - -// Status returns the status of the submodule. -func (s *Submodule) Status() (*SubmoduleStatus, error) { - idx, err := s.w.r.Storer.Index() - if err != nil { - return nil, err - } - - return s.status(idx) -} - -func (s *Submodule) status(idx *index.Index) (*SubmoduleStatus, error) { - status := &SubmoduleStatus{ - Path: s.c.Path, - } - - e, err := idx.Entry(s.c.Path) - if err != nil && err != index.ErrEntryNotFound { - return nil, err - } - - if e != nil { - status.Expected = e.Hash - } - - if !s.initialized { - return status, nil - } - - r, err := s.Repository() - if err != nil { - return nil, err - } - - head, err := r.Head() - if err == nil { - status.Current = head.Hash() - } - - if err != nil && err == plumbing.ErrReferenceNotFound { - err = nil - } - - return status, err -} - -// Repository returns the Repository represented by this submodule -func (s *Submodule) Repository() (*Repository, error) { - if !s.initialized { - return nil, ErrSubmoduleNotInitialized - } - - storer, err := s.w.r.Storer.Module(s.c.Name) - if err != nil { - return nil, err - } - - _, err = storer.Reference(plumbing.HEAD) - if err != nil && err != plumbing.ErrReferenceNotFound { - return nil, err - } - - var exists bool - if err == nil { - exists = true - } - - var worktree billy.Filesystem - if worktree, err = s.w.Filesystem.Chroot(s.c.Path); err != nil { - return nil, err - } - - if exists { - return Open(storer, worktree) - } - - r, err := Init(storer, worktree) - if err != nil { - return nil, err - } - - moduleURL, err := url.Parse(s.c.URL) - if err != nil { - return nil, err - } - - if !path.IsAbs(moduleURL.Path) { - remotes, err := s.w.r.Remotes() - if err != nil { - return nil, err - } - - rootURL, err := url.Parse(remotes[0].c.URLs[0]) - if err != nil { - return nil, err - } - - rootURL.Path = path.Join(rootURL.Path, moduleURL.Path) - *moduleURL = *rootURL - } - - _, err = r.CreateRemote(&config.RemoteConfig{ - Name: DefaultRemoteName, - URLs: []string{moduleURL.String()}, - }) - - return r, err -} - -// Update the registered submodule to match what the superproject expects, the -// submodule should be initialized first calling the Init method or setting in -// the options SubmoduleUpdateOptions.Init equals true -func (s *Submodule) Update(o *SubmoduleUpdateOptions) error { - return s.UpdateContext(context.Background(), o) -} - -// UpdateContext the registered submodule to match what the superproject -// expects, the submodule should be initialized first calling the Init method or -// setting in the options SubmoduleUpdateOptions.Init equals true. -// -// The provided Context must be non-nil. If the context expires before the -// operation is complete, an error is returned. The context only affects the -// transport operations. -func (s *Submodule) UpdateContext(ctx context.Context, o *SubmoduleUpdateOptions) error { - return s.update(ctx, o, plumbing.ZeroHash) -} - -func (s *Submodule) update(ctx context.Context, o *SubmoduleUpdateOptions, forceHash plumbing.Hash) error { - if !s.initialized && !o.Init { - return ErrSubmoduleNotInitialized - } - - if !s.initialized && o.Init { - if err := s.Init(); err != nil { - return err - } - } - - idx, err := s.w.r.Storer.Index() - if err != nil { - return err - } - - hash := forceHash - if hash.IsZero() { - e, err := idx.Entry(s.c.Path) - if err != nil { - return err - } - - hash = e.Hash - } - - r, err := s.Repository() - if err != nil { - return err - } - - if err := s.fetchAndCheckout(ctx, r, o, hash); err != nil { - return err - } - - return s.doRecursiveUpdate(r, o) -} - -func (s *Submodule) doRecursiveUpdate(r *Repository, o *SubmoduleUpdateOptions) error { - if o.RecurseSubmodules == NoRecurseSubmodules { - return nil - } - - w, err := r.Worktree() - if err != nil { - return err - } - - l, err := w.Submodules() - if err != nil { - return err - } - - new := &SubmoduleUpdateOptions{} - *new = *o - - new.RecurseSubmodules-- - return l.Update(new) -} - -func (s *Submodule) fetchAndCheckout( - ctx context.Context, r *Repository, o *SubmoduleUpdateOptions, hash plumbing.Hash, -) error { - if !o.NoFetch { - err := r.FetchContext(ctx, &FetchOptions{Auth: o.Auth}) - if err != nil && err != NoErrAlreadyUpToDate { - return err - } - } - - w, err := r.Worktree() - if err != nil { - return err - } - - // Handle a case when submodule refers to an orphaned commit that's still reachable - // through Git server using a special protocol capability[1]. - // - // [1]: https://git-scm.com/docs/protocol-capabilities#_allow_reachable_sha1_in_want - if !o.NoFetch { - if _, err := w.r.Object(plumbing.AnyObject, hash); err != nil { - refSpec := config.RefSpec("+" + hash.String() + ":" + hash.String()) - - err := r.FetchContext(ctx, &FetchOptions{ - Auth: o.Auth, - RefSpecs: []config.RefSpec{refSpec}, - }) - if err != nil && err != NoErrAlreadyUpToDate && err != ErrExactSHA1NotSupported { - return err - } - } - } - - if err := w.Checkout(&CheckoutOptions{Hash: hash}); err != nil { - return err - } - - head := plumbing.NewHashReference(plumbing.HEAD, hash) - return r.Storer.SetReference(head) -} - -// Submodules list of several submodules from the same repository. -type Submodules []*Submodule - -// Init initializes the submodules in this list. -func (s Submodules) Init() error { - for _, sub := range s { - if err := sub.Init(); err != nil { - return err - } - } - - return nil -} - -// Update updates all the submodules in this list. -func (s Submodules) Update(o *SubmoduleUpdateOptions) error { - return s.UpdateContext(context.Background(), o) -} - -// UpdateContext updates all the submodules in this list. -// -// The provided Context must be non-nil. If the context expires before the -// operation is complete, an error is returned. The context only affects the -// transport operations. -func (s Submodules) UpdateContext(ctx context.Context, o *SubmoduleUpdateOptions) error { - for _, sub := range s { - if err := sub.UpdateContext(ctx, o); err != nil { - return err - } - } - - return nil -} - -// Status returns the status of the submodules. -func (s Submodules) Status() (SubmodulesStatus, error) { - var list SubmodulesStatus - - var r *Repository - for _, sub := range s { - if r == nil { - r = sub.w.r - } - - idx, err := r.Storer.Index() - if err != nil { - return nil, err - } - - status, err := sub.status(idx) - if err != nil { - return nil, err - } - - list = append(list, status) - } - - return list, nil -} - -// SubmodulesStatus contains the status for all submodiles in the worktree -type SubmodulesStatus []*SubmoduleStatus - -// String is equivalent to `git submodule status` -func (s SubmodulesStatus) String() string { - buf := bytes.NewBuffer(nil) - for _, sub := range s { - fmt.Fprintln(buf, sub) - } - - return buf.String() -} - -// SubmoduleStatus contains the status for a submodule in the worktree -type SubmoduleStatus struct { - Path string - Current plumbing.Hash - Expected plumbing.Hash - Branch plumbing.ReferenceName -} - -// IsClean is the HEAD of the submodule is equals to the expected commit -func (s *SubmoduleStatus) IsClean() bool { - return s.Current == s.Expected -} - -// String is equivalent to `git submodule status ` -// -// This will print the SHA-1 of the currently checked out commit for a -// submodule, along with the submodule path and the output of git describe fo -// the SHA-1. Each SHA-1 will be prefixed with - if the submodule is not -// initialized, + if the currently checked out submodule commit does not match -// the SHA-1 found in the index of the containing repository. -func (s *SubmoduleStatus) String() string { - var extra string - var status = ' ' - - if s.Current.IsZero() { - status = '-' - } else if !s.IsClean() { - status = '+' - } - - if len(s.Branch) != 0 { - extra = string(s.Branch[5:]) - } else if !s.Current.IsZero() { - extra = s.Current.String()[:7] - } - - if extra != "" { - extra = fmt.Sprintf(" (%s)", extra) - } - - return fmt.Sprintf("%c%s %s%s", status, s.Expected, s.Path, extra) -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/binary/read.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/binary/read.go deleted file mode 100644 index a14d48db9c7..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/binary/read.go +++ /dev/null @@ -1,180 +0,0 @@ -// Package binary implements sintax-sugar functions on top of the standard -// library binary package -package binary - -import ( - "bufio" - "encoding/binary" - "io" - - "github.com/go-git/go-git/v5/plumbing" -) - -// Read reads structured binary data from r into data. Bytes are read and -// decoded in BigEndian order -// https://golang.org/pkg/encoding/binary/#Read -func Read(r io.Reader, data ...interface{}) error { - for _, v := range data { - if err := binary.Read(r, binary.BigEndian, v); err != nil { - return err - } - } - - return nil -} - -// ReadUntil reads from r untin delim is found -func ReadUntil(r io.Reader, delim byte) ([]byte, error) { - if bufr, ok := r.(*bufio.Reader); ok { - return ReadUntilFromBufioReader(bufr, delim) - } - - var buf [1]byte - value := make([]byte, 0, 16) - for { - if _, err := io.ReadFull(r, buf[:]); err != nil { - if err == io.EOF { - return nil, err - } - - return nil, err - } - - if buf[0] == delim { - return value, nil - } - - value = append(value, buf[0]) - } -} - -// ReadUntilFromBufioReader is like bufio.ReadBytes but drops the delimiter -// from the result. -func ReadUntilFromBufioReader(r *bufio.Reader, delim byte) ([]byte, error) { - value, err := r.ReadBytes(delim) - if err != nil || len(value) == 0 { - return nil, err - } - - return value[:len(value)-1], nil -} - -// ReadVariableWidthInt reads and returns an int in Git VLQ special format: -// -// Ordinary VLQ has some redundancies, example: the number 358 can be -// encoded as the 2-octet VLQ 0x8166 or the 3-octet VLQ 0x808166 or the -// 4-octet VLQ 0x80808166 and so forth. -// -// To avoid these redundancies, the VLQ format used in Git removes this -// prepending redundancy and extends the representable range of shorter -// VLQs by adding an offset to VLQs of 2 or more octets in such a way -// that the lowest possible value for such an (N+1)-octet VLQ becomes -// exactly one more than the maximum possible value for an N-octet VLQ. -// In particular, since a 1-octet VLQ can store a maximum value of 127, -// the minimum 2-octet VLQ (0x8000) is assigned the value 128 instead of -// 0. Conversely, the maximum value of such a 2-octet VLQ (0xff7f) is -// 16511 instead of just 16383. Similarly, the minimum 3-octet VLQ -// (0x808000) has a value of 16512 instead of zero, which means -// that the maximum 3-octet VLQ (0xffff7f) is 2113663 instead of -// just 2097151. And so forth. -// -// This is how the offset is saved in C: -// -// dheader[pos] = ofs & 127; -// while (ofs >>= 7) -// dheader[--pos] = 128 | (--ofs & 127); -// -func ReadVariableWidthInt(r io.Reader) (int64, error) { - var c byte - if err := Read(r, &c); err != nil { - return 0, err - } - - var v = int64(c & maskLength) - for c&maskContinue > 0 { - v++ - if err := Read(r, &c); err != nil { - return 0, err - } - - v = (v << lengthBits) + int64(c&maskLength) - } - - return v, nil -} - -const ( - maskContinue = uint8(128) // 1000 000 - maskLength = uint8(127) // 0111 1111 - lengthBits = uint8(7) // subsequent bytes has 7 bits to store the length -) - -// ReadUint64 reads 8 bytes and returns them as a BigEndian uint32 -func ReadUint64(r io.Reader) (uint64, error) { - var v uint64 - if err := binary.Read(r, binary.BigEndian, &v); err != nil { - return 0, err - } - - return v, nil -} - -// ReadUint32 reads 4 bytes and returns them as a BigEndian uint32 -func ReadUint32(r io.Reader) (uint32, error) { - var v uint32 - if err := binary.Read(r, binary.BigEndian, &v); err != nil { - return 0, err - } - - return v, nil -} - -// ReadUint16 reads 2 bytes and returns them as a BigEndian uint16 -func ReadUint16(r io.Reader) (uint16, error) { - var v uint16 - if err := binary.Read(r, binary.BigEndian, &v); err != nil { - return 0, err - } - - return v, nil -} - -// ReadHash reads a plumbing.Hash from r -func ReadHash(r io.Reader) (plumbing.Hash, error) { - var h plumbing.Hash - if err := binary.Read(r, binary.BigEndian, h[:]); err != nil { - return plumbing.ZeroHash, err - } - - return h, nil -} - -const sniffLen = 8000 - -// IsBinary detects if data is a binary value based on: -// http://git.kernel.org/cgit/git/git.git/tree/xdiff-interface.c?id=HEAD#n198 -func IsBinary(r io.Reader) (bool, error) { - reader := bufio.NewReader(r) - c := 0 - for { - if c == sniffLen { - break - } - - b, err := reader.ReadByte() - if err == io.EOF { - break - } - if err != nil { - return false, err - } - - if b == byte(0) { - return true, nil - } - - c++ - } - - return false, nil -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/binary/write.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/binary/write.go deleted file mode 100644 index c08c73a06b2..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/binary/write.go +++ /dev/null @@ -1,50 +0,0 @@ -package binary - -import ( - "encoding/binary" - "io" -) - -// Write writes the binary representation of data into w, using BigEndian order -// https://golang.org/pkg/encoding/binary/#Write -func Write(w io.Writer, data ...interface{}) error { - for _, v := range data { - if err := binary.Write(w, binary.BigEndian, v); err != nil { - return err - } - } - - return nil -} - -func WriteVariableWidthInt(w io.Writer, n int64) error { - buf := []byte{byte(n & 0x7f)} - n >>= 7 - for n != 0 { - n-- - buf = append([]byte{0x80 | (byte(n & 0x7f))}, buf...) - n >>= 7 - } - - _, err := w.Write(buf) - - return err -} - -// WriteUint64 writes the binary representation of a uint64 into w, in BigEndian -// order -func WriteUint64(w io.Writer, value uint64) error { - return binary.Write(w, binary.BigEndian, value) -} - -// WriteUint32 writes the binary representation of a uint32 into w, in BigEndian -// order -func WriteUint32(w io.Writer, value uint32) error { - return binary.Write(w, binary.BigEndian, value) -} - -// WriteUint16 writes the binary representation of a uint16 into w, in BigEndian -// order -func WriteUint16(w io.Writer, value uint16) error { - return binary.Write(w, binary.BigEndian, value) -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/diff/diff.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/diff/diff.go deleted file mode 100644 index 70054949fd9..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/diff/diff.go +++ /dev/null @@ -1,61 +0,0 @@ -// Package diff implements line oriented diffs, similar to the ancient -// Unix diff command. -// -// The current implementation is just a wrapper around Sergi's -// go-diff/diffmatchpatch library, which is a go port of Neil -// Fraser's google-diff-match-patch code -package diff - -import ( - "bytes" - "time" - - "github.com/sergi/go-diff/diffmatchpatch" -) - -// Do computes the (line oriented) modifications needed to turn the src -// string into the dst string. The underlying algorithm is Meyers, -// its complexity is O(N*d) where N is min(lines(src), lines(dst)) and d -// is the size of the diff. -func Do(src, dst string) (diffs []diffmatchpatch.Diff) { - // the default timeout is time.Second which may be too small under heavy load - return DoWithTimeout(src, dst, time.Hour) -} - -// DoWithTimeout computes the (line oriented) modifications needed to turn the src -// string into the dst string. The `timeout` argument specifies the maximum -// amount of time it is allowed to spend in this function. If the timeout -// is exceeded, the parts of the strings which were not considered are turned into -// a bulk delete+insert and the half-baked suboptimal result is returned at once. -// The underlying algorithm is Meyers, its complexity is O(N*d) where N is -// min(lines(src), lines(dst)) and d is the size of the diff. -func DoWithTimeout(src, dst string, timeout time.Duration) (diffs []diffmatchpatch.Diff) { - dmp := diffmatchpatch.New() - dmp.DiffTimeout = timeout - wSrc, wDst, warray := dmp.DiffLinesToRunes(src, dst) - diffs = dmp.DiffMainRunes(wSrc, wDst, false) - diffs = dmp.DiffCharsToLines(diffs, warray) - return diffs -} - -// Dst computes and returns the destination text. -func Dst(diffs []diffmatchpatch.Diff) string { - var text bytes.Buffer - for _, d := range diffs { - if d.Type != diffmatchpatch.DiffDelete { - text.WriteString(d.Text) - } - } - return text.String() -} - -// Src computes and returns the source text -func Src(diffs []diffmatchpatch.Diff) string { - var text bytes.Buffer - for _, d := range diffs { - if d.Type != diffmatchpatch.DiffInsert { - text.WriteString(d.Text) - } - } - return text.String() -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/ioutil/common.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/ioutil/common.go deleted file mode 100644 index b52e85a380b..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/ioutil/common.go +++ /dev/null @@ -1,180 +0,0 @@ -// Package ioutil implements some I/O utility functions. -package ioutil - -import ( - "bufio" - "context" - "errors" - "io" - - ctxio "github.com/jbenet/go-context/io" -) - -type readPeeker interface { - io.Reader - Peek(int) ([]byte, error) -} - -var ( - ErrEmptyReader = errors.New("reader is empty") -) - -// NonEmptyReader takes a reader and returns it if it is not empty, or -// `ErrEmptyReader` if it is empty. If there is an error when reading the first -// byte of the given reader, it will be propagated. -func NonEmptyReader(r io.Reader) (io.Reader, error) { - pr, ok := r.(readPeeker) - if !ok { - pr = bufio.NewReader(r) - } - - _, err := pr.Peek(1) - if err == io.EOF { - return nil, ErrEmptyReader - } - - if err != nil { - return nil, err - } - - return pr, nil -} - -type readCloser struct { - io.Reader - closer io.Closer -} - -func (r *readCloser) Close() error { - return r.closer.Close() -} - -// NewReadCloser creates an `io.ReadCloser` with the given `io.Reader` and -// `io.Closer`. -func NewReadCloser(r io.Reader, c io.Closer) io.ReadCloser { - return &readCloser{Reader: r, closer: c} -} - -type writeCloser struct { - io.Writer - closer io.Closer -} - -func (r *writeCloser) Close() error { - return r.closer.Close() -} - -// NewWriteCloser creates an `io.WriteCloser` with the given `io.Writer` and -// `io.Closer`. -func NewWriteCloser(w io.Writer, c io.Closer) io.WriteCloser { - return &writeCloser{Writer: w, closer: c} -} - -type writeNopCloser struct { - io.Writer -} - -func (writeNopCloser) Close() error { return nil } - -// WriteNopCloser returns a WriteCloser with a no-op Close method wrapping -// the provided Writer w. -func WriteNopCloser(w io.Writer) io.WriteCloser { - return writeNopCloser{w} -} - -// CheckClose calls Close on the given io.Closer. If the given *error points to -// nil, it will be assigned the error returned by Close. Otherwise, any error -// returned by Close will be ignored. CheckClose is usually called with defer. -func CheckClose(c io.Closer, err *error) { - if cerr := c.Close(); cerr != nil && *err == nil { - *err = cerr - } -} - -// NewContextWriter wraps a writer to make it respect given Context. -// If there is a blocking write, the returned Writer will return whenever the -// context is cancelled (the return values are n=0 and err=ctx.Err()). -func NewContextWriter(ctx context.Context, w io.Writer) io.Writer { - return ctxio.NewWriter(ctx, w) -} - -// NewContextReader wraps a reader to make it respect given Context. -// If there is a blocking read, the returned Reader will return whenever the -// context is cancelled (the return values are n=0 and err=ctx.Err()). -func NewContextReader(ctx context.Context, r io.Reader) io.Reader { - return ctxio.NewReader(ctx, r) -} - -// NewContextWriteCloser as NewContextWriter but with io.Closer interface. -func NewContextWriteCloser(ctx context.Context, w io.WriteCloser) io.WriteCloser { - ctxw := ctxio.NewWriter(ctx, w) - return NewWriteCloser(ctxw, w) -} - -// NewContextReadCloser as NewContextReader but with io.Closer interface. -func NewContextReadCloser(ctx context.Context, r io.ReadCloser) io.ReadCloser { - ctxr := ctxio.NewReader(ctx, r) - return NewReadCloser(ctxr, r) -} - -type readerOnError struct { - io.Reader - notify func(error) -} - -// NewReaderOnError returns a io.Reader that call the notify function when an -// unexpected (!io.EOF) error happens, after call Read function. -func NewReaderOnError(r io.Reader, notify func(error)) io.Reader { - return &readerOnError{r, notify} -} - -// NewReadCloserOnError returns a io.ReadCloser that call the notify function -// when an unexpected (!io.EOF) error happens, after call Read function. -func NewReadCloserOnError(r io.ReadCloser, notify func(error)) io.ReadCloser { - return NewReadCloser(NewReaderOnError(r, notify), r) -} - -func (r *readerOnError) Read(buf []byte) (n int, err error) { - n, err = r.Reader.Read(buf) - if err != nil && err != io.EOF { - r.notify(err) - } - - return -} - -type writerOnError struct { - io.Writer - notify func(error) -} - -// NewWriterOnError returns a io.Writer that call the notify function when an -// unexpected (!io.EOF) error happens, after call Write function. -func NewWriterOnError(w io.Writer, notify func(error)) io.Writer { - return &writerOnError{w, notify} -} - -// NewWriteCloserOnError returns a io.WriteCloser that call the notify function -//when an unexpected (!io.EOF) error happens, after call Write function. -func NewWriteCloserOnError(w io.WriteCloser, notify func(error)) io.WriteCloser { - return NewWriteCloser(NewWriterOnError(w, notify), w) -} - -func (r *writerOnError) Write(p []byte) (n int, err error) { - n, err = r.Writer.Write(p) - if err != nil && err != io.EOF { - r.notify(err) - } - - return -} - -type PipeReader interface { - io.ReadCloser - CloseWithError(err error) error -} - -type PipeWriter interface { - io.WriteCloser - CloseWithError(err error) error -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/ioutil/pipe.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/ioutil/pipe.go deleted file mode 100644 index f30c452fa4e..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/ioutil/pipe.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build !js - -package ioutil - -import "io" - -func Pipe() (PipeReader, PipeWriter) { - return io.Pipe() -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/ioutil/pipe_js.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/ioutil/pipe_js.go deleted file mode 100644 index cf102e6ef83..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/ioutil/pipe_js.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build js - -package ioutil - -import "github.com/acomagu/bufpipe" - -func Pipe() (PipeReader, PipeWriter) { - return bufpipe.New(nil) -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/merkletrie/change.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/merkletrie/change.go deleted file mode 100644 index cc6dc890716..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/merkletrie/change.go +++ /dev/null @@ -1,149 +0,0 @@ -package merkletrie - -import ( - "fmt" - "io" - - "github.com/go-git/go-git/v5/utils/merkletrie/noder" -) - -// Action values represent the kind of things a Change can represent: -// insertion, deletions or modifications of files. -type Action int - -// The set of possible actions in a change. -const ( - _ Action = iota - Insert - Delete - Modify -) - -// String returns the action as a human readable text. -func (a Action) String() string { - switch a { - case Insert: - return "Insert" - case Delete: - return "Delete" - case Modify: - return "Modify" - default: - panic(fmt.Sprintf("unsupported action: %d", a)) - } -} - -// A Change value represent how a noder has change between to merkletries. -type Change struct { - // The noder before the change or nil if it was inserted. - From noder.Path - // The noder after the change or nil if it was deleted. - To noder.Path -} - -// Action is convenience method that returns what Action c represents. -func (c *Change) Action() (Action, error) { - if c.From == nil && c.To == nil { - return Action(0), fmt.Errorf("malformed change: nil from and to") - } - if c.From == nil { - return Insert, nil - } - if c.To == nil { - return Delete, nil - } - - return Modify, nil -} - -// NewInsert returns a new Change representing the insertion of n. -func NewInsert(n noder.Path) Change { return Change{To: n} } - -// NewDelete returns a new Change representing the deletion of n. -func NewDelete(n noder.Path) Change { return Change{From: n} } - -// NewModify returns a new Change representing that a has been modified and -// it is now b. -func NewModify(a, b noder.Path) Change { - return Change{ - From: a, - To: b, - } -} - -// String returns a single change in human readable form, using the -// format: '<' + action + space + path + '>'. The contents of the file -// before or after the change are not included in this format. -// -// Example: inserting a file at the path a/b/c.txt will return "". -func (c Change) String() string { - action, err := c.Action() - if err != nil { - panic(err) - } - - var path string - if action == Delete { - path = c.From.String() - } else { - path = c.To.String() - } - - return fmt.Sprintf("<%s %s>", action, path) -} - -// Changes is a list of changes between to merkletries. -type Changes []Change - -// NewChanges returns an empty list of changes. -func NewChanges() Changes { - return Changes{} -} - -// Add adds the change c to the list of changes. -func (l *Changes) Add(c Change) { - *l = append(*l, c) -} - -// AddRecursiveInsert adds the required changes to insert all the -// file-like noders found in root, recursively. -func (l *Changes) AddRecursiveInsert(root noder.Path) error { - return l.addRecursive(root, NewInsert) -} - -// AddRecursiveDelete adds the required changes to delete all the -// file-like noders found in root, recursively. -func (l *Changes) AddRecursiveDelete(root noder.Path) error { - return l.addRecursive(root, NewDelete) -} - -type noderToChangeFn func(noder.Path) Change // NewInsert or NewDelete - -func (l *Changes) addRecursive(root noder.Path, ctor noderToChangeFn) error { - if !root.IsDir() { - l.Add(ctor(root)) - return nil - } - - i, err := NewIterFromPath(root) - if err != nil { - return err - } - - var current noder.Path - for { - if current, err = i.Step(); err != nil { - if err == io.EOF { - break - } - return err - } - if current.IsDir() { - continue - } - l.Add(ctor(current)) - } - - return nil -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/merkletrie/difftree.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/merkletrie/difftree.go deleted file mode 100644 index bd084b2ab03..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/merkletrie/difftree.go +++ /dev/null @@ -1,428 +0,0 @@ -package merkletrie - -// The focus of this difftree implementation is to save time by -// skipping whole directories if their hash is the same in both -// trees. -// -// The diff algorithm implemented here is based on the doubleiter -// type defined in this same package; we will iterate over both -// trees at the same time, while comparing the current noders in -// each iterator. Depending on how they differ we will output the -// corresponding changes and move the iterators further over both -// trees. -// -// The table bellow show all the possible comparison results, along -// with what changes should we produce and how to advance the -// iterators. -// -// The table is implemented by the switches in this function, -// diffTwoNodes, diffTwoNodesSameName and diffTwoDirs. -// -// Many Bothans died to bring us this information, make sure you -// understand the table before modifying this code. - -// # Cases -// -// When comparing noders in both trees you will find yourself in -// one of 169 possible cases, but if we ignore moves, we can -// simplify a lot the search space into the following table: -// -// - "-": nothing, no file or directory -// - a<>: an empty file named "a". -// - a<1>: a file named "a", with "1" as its contents. -// - a<2>: a file named "a", with "2" as its contents. -// - a(): an empty dir named "a". -// - a(...): a dir named "a", with some files and/or dirs inside (possibly -// empty). -// - a(;;;): a dir named "a", with some other files and/or dirs inside -// (possibly empty), which different from the ones in "a(...)". -// -// \ to - a<> a<1> a<2> a() a(...) a(;;;) -// from \ -// - 00 01 02 03 04 05 06 -// a<> 10 11 12 13 14 15 16 -// a<1> 20 21 22 23 24 25 26 -// a<2> 30 31 32 33 34 35 36 -// a() 40 41 42 43 44 45 46 -// a(...) 50 51 52 53 54 55 56 -// a(;;;) 60 61 62 63 64 65 66 -// -// Every (from, to) combination in the table is a special case, but -// some of them can be merged into some more general cases, for -// instance 11 and 22 can be merged into the general case: both -// noders are equal. -// -// Here is a full list of all the cases that are similar and how to -// merge them together into more general cases. Each general case -// is labeled with an uppercase letter for further reference, and it -// is followed by the pseudocode of the checks you have to perfrom -// on both noders to see if you are in such a case, the actions to -// perform (i.e. what changes to output) and how to advance the -// iterators of each tree to continue the comparison process. -// -// ## A. Impossible: 00 -// -// ## B. Same thing on both sides: 11, 22, 33, 44, 55, 66 -// - check: `SameName() && SameHash()` -// - action: do nothing. -// - advance: `FromNext(); ToNext()` -// -// ### C. To was created: 01, 02, 03, 04, 05, 06 -// - check: `DifferentName() && ToBeforeFrom()` -// - action: insertRecursively(to) -// - advance: `ToNext()` -// -// ### D. From was deleted: 10, 20, 30, 40, 50, 60 -// - check: `DifferentName() && FromBeforeTo()` -// - action: `DeleteRecursively(from)` -// - advance: `FromNext()` -// -// ### E. Empty file to file with contents: 12, 13 -// - check: `SameName() && DifferentHash() && FromIsFile() && -// ToIsFile() && FromIsEmpty()` -// - action: `modifyFile(from, to)` -// - advance: `FromNext()` or `FromStep()` -// -// ### E'. file with contents to empty file: 21, 31 -// - check: `SameName() && DifferentHash() && FromIsFile() && -// ToIsFile() && ToIsEmpty()` -// - action: `modifyFile(from, to)` -// - advance: `FromNext()` or `FromStep()` -// -// ### F. empty file to empty dir with the same name: 14 -// - check: `SameName() && FromIsFile() && FromIsEmpty() && -// ToIsDir() && ToIsEmpty()` -// - action: `DeleteFile(from); InsertEmptyDir(to)` -// - advance: `FromNext(); ToNext()` -// -// ### F'. empty dir to empty file of the same name: 41 -// - check: `SameName() && FromIsDir() && FromIsEmpty && -// ToIsFile() && ToIsEmpty()` -// - action: `DeleteEmptyDir(from); InsertFile(to)` -// - advance: `FromNext(); ToNext()` or step for any of them. -// -// ### G. empty file to non-empty dir of the same name: 15, 16 -// - check: `SameName() && FromIsFile() && ToIsDir() && -// FromIsEmpty() && ToIsNotEmpty()` -// - action: `DeleteFile(from); InsertDirRecursively(to)` -// - advance: `FromNext(); ToNext()` -// -// ### G'. non-empty dir to empty file of the same name: 51, 61 -// - check: `SameName() && FromIsDir() && FromIsNotEmpty() && -// ToIsFile() && FromIsEmpty()` -// - action: `DeleteDirRecursively(from); InsertFile(to)` -// - advance: `FromNext(); ToNext()` -// -// ### H. modify file contents: 23, 32 -// - check: `SameName() && FromIsFile() && ToIsFile() && -// FromIsNotEmpty() && ToIsNotEmpty()` -// - action: `ModifyFile(from, to)` -// - advance: `FromNext(); ToNext()` -// -// ### I. file with contents to empty dir: 24, 34 -// - check: `SameName() && DifferentHash() && FromIsFile() && -// FromIsNotEmpty() && ToIsDir() && ToIsEmpty()` -// - action: `DeleteFile(from); InsertEmptyDir(to)` -// - advance: `FromNext(); ToNext()` -// -// ### I'. empty dir to file with contents: 42, 43 -// - check: `SameName() && DifferentHash() && FromIsDir() && -// FromIsEmpty() && ToIsFile() && ToIsEmpty()` -// - action: `DeleteDir(from); InsertFile(to)` -// - advance: `FromNext(); ToNext()` -// -// ### J. file with contents to dir with contents: 25, 26, 35, 36 -// - check: `SameName() && DifferentHash() && FromIsFile() && -// FromIsNotEmpty() && ToIsDir() && ToIsNotEmpty()` -// - action: `DeleteFile(from); InsertDirRecursively(to)` -// - advance: `FromNext(); ToNext()` -// -// ### J'. dir with contents to file with contents: 52, 62, 53, 63 -// - check: `SameName() && DifferentHash() && FromIsDir() && -// FromIsNotEmpty() && ToIsFile() && ToIsNotEmpty()` -// - action: `DeleteDirRecursively(from); InsertFile(to)` -// - advance: `FromNext(); ToNext()` -// -// ### K. empty dir to dir with contents: 45, 46 -// - check: `SameName() && DifferentHash() && FromIsDir() && -// FromIsEmpty() && ToIsDir() && ToIsNotEmpty()` -// - action: `InsertChildrenRecursively(to)` -// - advance: `FromNext(); ToNext()` -// -// ### K'. dir with contents to empty dir: 54, 64 -// - check: `SameName() && DifferentHash() && FromIsDir() && -// FromIsEmpty() && ToIsDir() && ToIsNotEmpty()` -// - action: `DeleteChildrenRecursively(from)` -// - advance: `FromNext(); ToNext()` -// -// ### L. dir with contents to dir with different contents: 56, 65 -// - check: `SameName() && DifferentHash() && FromIsDir() && -// FromIsNotEmpty() && ToIsDir() && ToIsNotEmpty()` -// - action: nothing -// - advance: `FromStep(); ToStep()` -// -// - -// All these cases can be further simplified by a truth table -// reduction process, in which we gather similar checks together to -// make the final code easier to read and understand. -// -// The first 6 columns are the outputs of the checks to perform on -// both noders. I have labeled them 1 to 6, this is what they mean: -// -// 1: SameName() -// 2: SameHash() -// 3: FromIsDir() -// 4: ToIsDir() -// 5: FromIsEmpty() -// 6: ToIsEmpty() -// -// The from and to columns are a fsnoder example of the elements -// that you will find on each tree under the specified comparison -// results (columns 1 to 6). -// -// The type column identifies the case we are into, from the list above. -// -// The type' column identifies the new set of reduced cases, using -// lowercase letters, and they are explained after the table. -// -// The last column is the set of actions and advances for each case. -// -// "---" means impossible except in case of hash collision. -// -// advance meaning: -// - NN: from.Next(); to.Next() -// - SS: from.Step(); to.Step() -// -// 1 2 3 4 5 6 | from | to |type|type'|action ; advance -// ------------+--------+--------+----+------------------------------------ -// 0 0 0 0 0 0 | | | | | if !SameName() { -// . | | | | | if FromBeforeTo() { -// . | | | D | d | delete(from); from.Next() -// . | | | | | } else { -// . | | | C | c | insert(to); to.Next() -// . | | | | | } -// 0 1 1 1 1 1 | | | | | } -// 1 0 0 0 0 0 | a<1> | a<2> | H | e | modify(from, to); NN -// 1 0 0 0 0 1 | a<1> | a<> | E' | e | modify(from, to); NN -// 1 0 0 0 1 0 | a<> | a<1> | E | e | modify(from, to); NN -// 1 0 0 0 1 1 | ---- | ---- | | e | -// 1 0 0 1 0 0 | a<1> | a(...) | J | f | delete(from); insert(to); NN -// 1 0 0 1 0 1 | a<1> | a() | I | f | delete(from); insert(to); NN -// 1 0 0 1 1 0 | a<> | a(...) | G | f | delete(from); insert(to); NN -// 1 0 0 1 1 1 | a<> | a() | F | f | delete(from); insert(to); NN -// 1 0 1 0 0 0 | a(...) | a<1> | J' | f | delete(from); insert(to); NN -// 1 0 1 0 0 1 | a(...) | a<> | G' | f | delete(from); insert(to); NN -// 1 0 1 0 1 0 | a() | a<1> | I' | f | delete(from); insert(to); NN -// 1 0 1 0 1 1 | a() | a<> | F' | f | delete(from); insert(to); NN -// 1 0 1 1 0 0 | a(...) | a(;;;) | L | g | nothing; SS -// 1 0 1 1 0 1 | a(...) | a() | K' | h | deleteChildren(from); NN -// 1 0 1 1 1 0 | a() | a(...) | K | i | insertChildren(to); NN -// 1 0 1 1 1 1 | ---- | ---- | | | -// 1 1 0 0 0 0 | a<1> | a<1> | B | b | nothing; NN -// 1 1 0 0 0 1 | ---- | ---- | | b | -// 1 1 0 0 1 0 | ---- | ---- | | b | -// 1 1 0 0 1 1 | a<> | a<> | B | b | nothing; NN -// 1 1 0 1 0 0 | ---- | ---- | | b | -// 1 1 0 1 0 1 | ---- | ---- | | b | -// 1 1 0 1 1 0 | ---- | ---- | | b | -// 1 1 0 1 1 1 | ---- | ---- | | b | -// 1 1 1 0 0 0 | ---- | ---- | | b | -// 1 1 1 0 0 1 | ---- | ---- | | b | -// 1 1 1 0 1 0 | ---- | ---- | | b | -// 1 1 1 0 1 1 | ---- | ---- | | b | -// 1 1 1 1 0 0 | a(...) | a(...) | B | b | nothing; NN -// 1 1 1 1 0 1 | ---- | ---- | | b | -// 1 1 1 1 1 0 | ---- | ---- | | b | -// 1 1 1 1 1 1 | a() | a() | B | b | nothing; NN -// -// c and d: -// if !SameName() -// d if FromBeforeTo() -// c else -// b: SameName) && sameHash() -// e: SameName() && !sameHash() && BothAreFiles() -// f: SameName() && !sameHash() && FileAndDir() -// g: SameName() && !sameHash() && BothAreDirs() && NoneIsEmpty -// i: SameName() && !sameHash() && BothAreDirs() && FromIsEmpty -// h: else of i - -import ( - "context" - "errors" - "fmt" - - "github.com/go-git/go-git/v5/utils/merkletrie/noder" -) - -var ( - // ErrCanceled is returned whenever the operation is canceled. - ErrCanceled = errors.New("operation canceled") -) - -// DiffTree calculates the list of changes between two merkletries. It -// uses the provided hashEqual callback to compare noders. -func DiffTree( - fromTree, - toTree noder.Noder, - hashEqual noder.Equal, -) (Changes, error) { - return DiffTreeContext(context.Background(), fromTree, toTree, hashEqual) -} - -// DiffTreeContext calculates the list of changes between two merkletries. It -// uses the provided hashEqual callback to compare noders. -// Error will be returned if context expires -// Provided context must be non nil -func DiffTreeContext(ctx context.Context, fromTree, toTree noder.Noder, - hashEqual noder.Equal) (Changes, error) { - ret := NewChanges() - - ii, err := newDoubleIter(fromTree, toTree, hashEqual) - if err != nil { - return nil, err - } - - for { - select { - case <-ctx.Done(): - return nil, ErrCanceled - default: - } - - from := ii.from.current - to := ii.to.current - - switch r := ii.remaining(); r { - case noMoreNoders: - return ret, nil - case onlyFromRemains: - if err = ret.AddRecursiveDelete(from); err != nil { - return nil, err - } - if err = ii.nextFrom(); err != nil { - return nil, err - } - case onlyToRemains: - if err = ret.AddRecursiveInsert(to); err != nil { - return nil, err - } - if err = ii.nextTo(); err != nil { - return nil, err - } - case bothHaveNodes: - if err = diffNodes(&ret, ii); err != nil { - return nil, err - } - default: - panic(fmt.Sprintf("unknown remaining value: %d", r)) - } - } -} - -func diffNodes(changes *Changes, ii *doubleIter) error { - from := ii.from.current - to := ii.to.current - var err error - - // compare their full paths as strings - switch from.Compare(to) { - case -1: - if err = changes.AddRecursiveDelete(from); err != nil { - return err - } - if err = ii.nextFrom(); err != nil { - return err - } - case 1: - if err = changes.AddRecursiveInsert(to); err != nil { - return err - } - if err = ii.nextTo(); err != nil { - return err - } - default: - if err := diffNodesSameName(changes, ii); err != nil { - return err - } - } - - return nil -} - -func diffNodesSameName(changes *Changes, ii *doubleIter) error { - from := ii.from.current - to := ii.to.current - - status, err := ii.compare() - if err != nil { - return err - } - - switch { - case status.sameHash: - // do nothing - if err = ii.nextBoth(); err != nil { - return err - } - case status.bothAreFiles: - changes.Add(NewModify(from, to)) - if err = ii.nextBoth(); err != nil { - return err - } - case status.fileAndDir: - if err = changes.AddRecursiveDelete(from); err != nil { - return err - } - if err = changes.AddRecursiveInsert(to); err != nil { - return err - } - if err = ii.nextBoth(); err != nil { - return err - } - case status.bothAreDirs: - if err = diffDirs(changes, ii); err != nil { - return err - } - default: - return fmt.Errorf("bad status from double iterator") - } - - return nil -} - -func diffDirs(changes *Changes, ii *doubleIter) error { - from := ii.from.current - to := ii.to.current - - status, err := ii.compare() - if err != nil { - return err - } - - switch { - case status.fromIsEmptyDir: - if err = changes.AddRecursiveInsert(to); err != nil { - return err - } - if err = ii.nextBoth(); err != nil { - return err - } - case status.toIsEmptyDir: - if err = changes.AddRecursiveDelete(from); err != nil { - return err - } - if err = ii.nextBoth(); err != nil { - return err - } - case !status.fromIsEmptyDir && !status.toIsEmptyDir: - // do nothing - if err = ii.stepBoth(); err != nil { - return err - } - default: - return fmt.Errorf("both dirs are empty but has different hash") - } - - return nil -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/merkletrie/doc.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/merkletrie/doc.go deleted file mode 100644 index 5204024ad4f..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/merkletrie/doc.go +++ /dev/null @@ -1,34 +0,0 @@ -/* -Package merkletrie provides support for n-ary trees that are at the same -time Merkle trees and Radix trees (tries). - -Git trees are Radix n-ary trees in virtue of the names of their -tree entries. At the same time, git trees are Merkle trees thanks to -their hashes. - -This package defines Merkle tries as nodes that should have: - -- a hash: the Merkle part of the Merkle trie - -- a key: the Radix part of the Merkle trie - -The Merkle hash condition is not enforced by this package though. This -means that the hash of a node doesn't have to take into account the hashes of -their children, which is good for testing purposes. - -Nodes in the Merkle trie are abstracted by the Noder interface. The -intended use is that git trees implements this interface, either -directly or using a simple wrapper. - -This package provides an iterator for merkletries that can skip whole -directory-like noders and an efficient merkletrie comparison algorithm. - -When comparing git trees, the simple approach of alphabetically sorting -their elements and comparing the resulting lists is too slow as it -depends linearly on the number of files in the trees: When a directory -has lots of files but none of them has been modified, this approach is -very expensive. We can do better by prunning whole directories that -have not change, just by looking at their hashes. This package provides -the tools to do exactly that. -*/ -package merkletrie diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/merkletrie/doubleiter.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/merkletrie/doubleiter.go deleted file mode 100644 index 4a4341b3875..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/merkletrie/doubleiter.go +++ /dev/null @@ -1,187 +0,0 @@ -package merkletrie - -import ( - "fmt" - "io" - - "github.com/go-git/go-git/v5/utils/merkletrie/noder" -) - -// A doubleIter is a convenience type to keep track of the current -// noders in two merkletries that are going to be iterated in parallel. -// It has methods for: -// -// - iterating over the merkletries, both at the same time or -// individually: nextFrom, nextTo, nextBoth, stepBoth -// -// - checking if there are noders left in one or both of them with the -// remaining method and its associated returned type. -// -// - comparing the current noders of both merkletries in several ways, -// with the compare method and its associated returned type. -type doubleIter struct { - from struct { - iter *Iter - current noder.Path // nil if no more nodes - } - to struct { - iter *Iter - current noder.Path // nil if no more nodes - } - hashEqual noder.Equal -} - -// NewdoubleIter returns a new doubleIter for the merkletries "from" and -// "to". The hashEqual callback function will be used by the doubleIter -// to compare the hash of the noders in the merkletries. The doubleIter -// will be initialized to the first elements in each merkletrie if any. -func newDoubleIter(from, to noder.Noder, hashEqual noder.Equal) ( - *doubleIter, error) { - var ii doubleIter - var err error - - if ii.from.iter, err = NewIter(from); err != nil { - return nil, fmt.Errorf("from: %s", err) - } - if ii.from.current, err = ii.from.iter.Next(); turnEOFIntoNil(err) != nil { - return nil, fmt.Errorf("from: %s", err) - } - - if ii.to.iter, err = NewIter(to); err != nil { - return nil, fmt.Errorf("to: %s", err) - } - if ii.to.current, err = ii.to.iter.Next(); turnEOFIntoNil(err) != nil { - return nil, fmt.Errorf("to: %s", err) - } - - ii.hashEqual = hashEqual - - return &ii, nil -} - -func turnEOFIntoNil(e error) error { - if e != nil && e != io.EOF { - return e - } - return nil -} - -// NextBoth makes d advance to the next noder in both merkletries. If -// any of them is a directory, it skips its contents. -func (d *doubleIter) nextBoth() error { - if err := d.nextFrom(); err != nil { - return err - } - if err := d.nextTo(); err != nil { - return err - } - - return nil -} - -// NextFrom makes d advance to the next noder in the "from" merkletrie, -// skipping its contents if it is a directory. -func (d *doubleIter) nextFrom() (err error) { - d.from.current, err = d.from.iter.Next() - return turnEOFIntoNil(err) -} - -// NextTo makes d advance to the next noder in the "to" merkletrie, -// skipping its contents if it is a directory. -func (d *doubleIter) nextTo() (err error) { - d.to.current, err = d.to.iter.Next() - return turnEOFIntoNil(err) -} - -// StepBoth makes d advance to the next noder in both merkletries, -// getting deeper into directories if that is the case. -func (d *doubleIter) stepBoth() (err error) { - if d.from.current, err = d.from.iter.Step(); turnEOFIntoNil(err) != nil { - return err - } - if d.to.current, err = d.to.iter.Step(); turnEOFIntoNil(err) != nil { - return err - } - return nil -} - -// Remaining returns if there are no more noders in the tree, if both -// have noders or if one of them doesn't. -func (d *doubleIter) remaining() remaining { - if d.from.current == nil && d.to.current == nil { - return noMoreNoders - } - - if d.from.current == nil && d.to.current != nil { - return onlyToRemains - } - - if d.from.current != nil && d.to.current == nil { - return onlyFromRemains - } - - return bothHaveNodes -} - -// Remaining values tells you whether both trees still have noders, or -// only one of them or none of them. -type remaining int - -const ( - noMoreNoders remaining = iota - onlyToRemains - onlyFromRemains - bothHaveNodes -) - -// Compare returns the comparison between the current elements in the -// merkletries. -func (d *doubleIter) compare() (s comparison, err error) { - s.sameHash = d.hashEqual(d.from.current, d.to.current) - - fromIsDir := d.from.current.IsDir() - toIsDir := d.to.current.IsDir() - - s.bothAreDirs = fromIsDir && toIsDir - s.bothAreFiles = !fromIsDir && !toIsDir - s.fileAndDir = !s.bothAreDirs && !s.bothAreFiles - - fromNumChildren, err := d.from.current.NumChildren() - if err != nil { - return comparison{}, fmt.Errorf("from: %s", err) - } - - toNumChildren, err := d.to.current.NumChildren() - if err != nil { - return comparison{}, fmt.Errorf("to: %s", err) - } - - s.fromIsEmptyDir = fromIsDir && fromNumChildren == 0 - s.toIsEmptyDir = toIsDir && toNumChildren == 0 - - return -} - -// Answers to a lot of questions you can ask about how to noders are -// equal or different. -type comparison struct { - // the following are only valid if both nodes have the same name - // (i.e. nameComparison == 0) - - // Do both nodes have the same hash? - sameHash bool - // Are both nodes files? - bothAreFiles bool - - // the following are only valid if any of the noders are dirs, - // this is, if !bothAreFiles - - // Is one a file and the other a dir? - fileAndDir bool - // Are both nodes dirs? - bothAreDirs bool - // Is the from node an empty dir? - fromIsEmptyDir bool - // Is the to Node an empty dir? - toIsEmptyDir bool -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/merkletrie/filesystem/node.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/merkletrie/filesystem/node.go deleted file mode 100644 index 2fc3d7a638f..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/merkletrie/filesystem/node.go +++ /dev/null @@ -1,195 +0,0 @@ -package filesystem - -import ( - "io" - "os" - "path" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/filemode" - "github.com/go-git/go-git/v5/utils/merkletrie/noder" - - "github.com/go-git/go-billy/v5" -) - -var ignore = map[string]bool{ - ".git": true, -} - -// The node represents a file or a directory in a billy.Filesystem. It -// implements the interface noder.Noder of merkletrie package. -// -// This implementation implements a "standard" hash method being able to be -// compared with any other noder.Noder implementation inside of go-git. -type node struct { - fs billy.Filesystem - submodules map[string]plumbing.Hash - - path string - hash []byte - children []noder.Noder - isDir bool -} - -// NewRootNode returns the root node based on a given billy.Filesystem. -// -// In order to provide the submodule hash status, a map[string]plumbing.Hash -// should be provided where the key is the path of the submodule and the commit -// of the submodule HEAD -func NewRootNode( - fs billy.Filesystem, - submodules map[string]plumbing.Hash, -) noder.Noder { - return &node{fs: fs, submodules: submodules, isDir: true} -} - -// Hash the hash of a filesystem is the result of concatenating the computed -// plumbing.Hash of the file as a Blob and its plumbing.FileMode; that way the -// difftree algorithm will detect changes in the contents of files and also in -// their mode. -// -// The hash of a directory is always a 24-bytes slice of zero values -func (n *node) Hash() []byte { - return n.hash -} - -func (n *node) Name() string { - return path.Base(n.path) -} - -func (n *node) IsDir() bool { - return n.isDir -} - -func (n *node) Children() ([]noder.Noder, error) { - if err := n.calculateChildren(); err != nil { - return nil, err - } - - return n.children, nil -} - -func (n *node) NumChildren() (int, error) { - if err := n.calculateChildren(); err != nil { - return -1, err - } - - return len(n.children), nil -} - -func (n *node) calculateChildren() error { - if !n.IsDir() { - return nil - } - - if len(n.children) != 0 { - return nil - } - - files, err := n.fs.ReadDir(n.path) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - - for _, file := range files { - if _, ok := ignore[file.Name()]; ok { - continue - } - - c, err := n.newChildNode(file) - if err != nil { - return err - } - - n.children = append(n.children, c) - } - - return nil -} - -func (n *node) newChildNode(file os.FileInfo) (*node, error) { - path := path.Join(n.path, file.Name()) - - hash, err := n.calculateHash(path, file) - if err != nil { - return nil, err - } - - node := &node{ - fs: n.fs, - submodules: n.submodules, - - path: path, - hash: hash, - isDir: file.IsDir(), - } - - if hash, isSubmodule := n.submodules[path]; isSubmodule { - node.hash = append(hash[:], filemode.Submodule.Bytes()...) - node.isDir = false - } - - return node, nil -} - -func (n *node) calculateHash(path string, file os.FileInfo) ([]byte, error) { - if file.IsDir() { - return make([]byte, 24), nil - } - - var hash plumbing.Hash - var err error - if file.Mode()&os.ModeSymlink != 0 { - hash, err = n.doCalculateHashForSymlink(path, file) - } else { - hash, err = n.doCalculateHashForRegular(path, file) - } - - if err != nil { - return nil, err - } - - mode, err := filemode.NewFromOSFileMode(file.Mode()) - if err != nil { - return nil, err - } - - return append(hash[:], mode.Bytes()...), nil -} - -func (n *node) doCalculateHashForRegular(path string, file os.FileInfo) (plumbing.Hash, error) { - f, err := n.fs.Open(path) - if err != nil { - return plumbing.ZeroHash, err - } - - defer f.Close() - - h := plumbing.NewHasher(plumbing.BlobObject, file.Size()) - if _, err := io.Copy(h, f); err != nil { - return plumbing.ZeroHash, err - } - - return h.Sum(), nil -} - -func (n *node) doCalculateHashForSymlink(path string, file os.FileInfo) (plumbing.Hash, error) { - target, err := n.fs.Readlink(path) - if err != nil { - return plumbing.ZeroHash, err - } - - h := plumbing.NewHasher(plumbing.BlobObject, file.Size()) - if _, err := h.Write([]byte(target)); err != nil { - return plumbing.ZeroHash, err - } - - return h.Sum(), nil -} - -func (n *node) String() string { - return n.path -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/merkletrie/index/node.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/merkletrie/index/node.go deleted file mode 100644 index d05b0c694de..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/merkletrie/index/node.go +++ /dev/null @@ -1,90 +0,0 @@ -package index - -import ( - "path" - "strings" - - "github.com/go-git/go-git/v5/plumbing/format/index" - "github.com/go-git/go-git/v5/utils/merkletrie/noder" -) - -// The node represents a index.Entry or a directory inferred from the path -// of all entries. It implements the interface noder.Noder of merkletrie -// package. -// -// This implementation implements a "standard" hash method being able to be -// compared with any other noder.Noder implementation inside of go-git -type node struct { - path string - entry *index.Entry - children []noder.Noder - isDir bool -} - -// NewRootNode returns the root node of a computed tree from a index.Index, -func NewRootNode(idx *index.Index) noder.Noder { - const rootNode = "" - - m := map[string]*node{rootNode: {isDir: true}} - - for _, e := range idx.Entries { - parts := strings.Split(e.Name, string("/")) - - var fullpath string - for _, part := range parts { - parent := fullpath - fullpath = path.Join(fullpath, part) - - if _, ok := m[fullpath]; ok { - continue - } - - n := &node{path: fullpath} - if fullpath == e.Name { - n.entry = e - } else { - n.isDir = true - } - - m[n.path] = n - m[parent].children = append(m[parent].children, n) - } - } - - return m[rootNode] -} - -func (n *node) String() string { - return n.path -} - -// Hash the hash of a filesystem is a 24-byte slice, is the result of -// concatenating the computed plumbing.Hash of the file as a Blob and its -// plumbing.FileMode; that way the difftree algorithm will detect changes in the -// contents of files and also in their mode. -// -// If the node is computed and not based on a index.Entry the hash is equals -// to a 24-bytes slices of zero values. -func (n *node) Hash() []byte { - if n.entry == nil { - return make([]byte, 24) - } - - return append(n.entry.Hash[:], n.entry.Mode.Bytes()...) -} - -func (n *node) Name() string { - return path.Base(n.path) -} - -func (n *node) IsDir() bool { - return n.isDir -} - -func (n *node) Children() ([]noder.Noder, error) { - return n.children, nil -} - -func (n *node) NumChildren() (int, error) { - return len(n.children), nil -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/merkletrie/internal/frame/frame.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/merkletrie/internal/frame/frame.go deleted file mode 100644 index 131878a1c7a..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/merkletrie/internal/frame/frame.go +++ /dev/null @@ -1,91 +0,0 @@ -package frame - -import ( - "bytes" - "fmt" - "sort" - "strings" - - "github.com/go-git/go-git/v5/utils/merkletrie/noder" -) - -// A Frame is a collection of siblings in a trie, sorted alphabetically -// by name. -type Frame struct { - // siblings, sorted in reverse alphabetical order by name - stack []noder.Noder -} - -type byName []noder.Noder - -func (a byName) Len() int { return len(a) } -func (a byName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byName) Less(i, j int) bool { - return strings.Compare(a[i].Name(), a[j].Name()) < 0 -} - -// New returns a frame with the children of the provided node. -func New(n noder.Noder) (*Frame, error) { - children, err := n.Children() - if err != nil { - return nil, err - } - - sort.Sort(sort.Reverse(byName(children))) - return &Frame{ - stack: children, - }, nil -} - -// String returns the quoted names of the noders in the frame sorted in -// alphabetical order by name, surrounded by square brackets and -// separated by comas. -// -// Examples: -// [] -// ["a", "b"] -func (f *Frame) String() string { - var buf bytes.Buffer - _ = buf.WriteByte('[') - - sep := "" - for i := f.Len() - 1; i >= 0; i-- { - _, _ = buf.WriteString(sep) - sep = ", " - _, _ = buf.WriteString(fmt.Sprintf("%q", f.stack[i].Name())) - } - - _ = buf.WriteByte(']') - - return buf.String() -} - -// First returns, but dont extract, the noder with the alphabetically -// smaller name in the frame and true if the frame was not empty. -// Otherwise it returns nil and false. -func (f *Frame) First() (noder.Noder, bool) { - if f.Len() == 0 { - return nil, false - } - - top := f.Len() - 1 - - return f.stack[top], true -} - -// Drop extracts the noder with the alphabetically smaller name in the -// frame or does nothing if the frame was empty. -func (f *Frame) Drop() { - if f.Len() == 0 { - return - } - - top := f.Len() - 1 - f.stack[top] = nil - f.stack = f.stack[:top] -} - -// Len returns the number of noders in the frame. -func (f *Frame) Len() int { - return len(f.stack) -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/merkletrie/iter.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/merkletrie/iter.go deleted file mode 100644 index d75afec4643..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/merkletrie/iter.go +++ /dev/null @@ -1,216 +0,0 @@ -package merkletrie - -import ( - "fmt" - "io" - - "github.com/go-git/go-git/v5/utils/merkletrie/internal/frame" - "github.com/go-git/go-git/v5/utils/merkletrie/noder" -) - -// Iter is an iterator for merkletries (only the trie part of the -// merkletrie is relevant here, it does not use the Hasher interface). -// -// The iteration is performed in depth-first pre-order. Entries at each -// depth are traversed in (case-sensitive) alphabetical order. -// -// This is the kind of traversal you will expect when listing ordinary -// files and directories recursively, for example: -// -// Trie Traversal order -// ---- --------------- -// . -// / | \ c -// / | \ d/ -// d c z ===> d/a -// / \ d/b -// b a z -// -// -// This iterator is somewhat especial as you can chose to skip whole -// "directories" when iterating: -// -// - The Step method will iterate normally. -// -// - the Next method will not descend deeper into the tree. -// -// For example, if the iterator is at `d/`, the Step method will return -// `d/a` while the Next would have returned `z` instead (skipping `d/` -// and its descendants). The name of the these two methods are based on -// the well known "next" and "step" operations, quite common in -// debuggers, like gdb. -// -// The paths returned by the iterator will be relative, if the iterator -// was created from a single node, or absolute, if the iterator was -// created from the path to the node (the path will be prefixed to all -// returned paths). -type Iter struct { - // Tells if the iteration has started. - hasStarted bool - // The top of this stack has the current node and its siblings. The - // rest of the stack keeps the ancestors of the current node and - // their corresponding siblings. The current element is always the - // top element of the top frame. - // - // When "step"ping into a node, its children are pushed as a new - // frame. - // - // When "next"ing pass a node, the current element is dropped by - // popping the top frame. - frameStack []*frame.Frame - // The base path used to turn the relative paths used internally by - // the iterator into absolute paths used by external applications. - // For relative iterator this will be nil. - base noder.Path -} - -// NewIter returns a new relative iterator using the provider noder as -// its unnamed root. When iterating, all returned paths will be -// relative to node. -func NewIter(n noder.Noder) (*Iter, error) { - return newIter(n, nil) -} - -// NewIterFromPath returns a new absolute iterator from the noder at the -// end of the path p. When iterating, all returned paths will be -// absolute, using the root of the path p as their root. -func NewIterFromPath(p noder.Path) (*Iter, error) { - return newIter(p, p) // Path implements Noder -} - -func newIter(root noder.Noder, base noder.Path) (*Iter, error) { - ret := &Iter{ - base: base, - } - - if root == nil { - return ret, nil - } - - frame, err := frame.New(root) - if err != nil { - return nil, err - } - ret.push(frame) - - return ret, nil -} - -func (iter *Iter) top() (*frame.Frame, bool) { - if len(iter.frameStack) == 0 { - return nil, false - } - top := len(iter.frameStack) - 1 - - return iter.frameStack[top], true -} - -func (iter *Iter) push(f *frame.Frame) { - iter.frameStack = append(iter.frameStack, f) -} - -const ( - doDescend = true - dontDescend = false -) - -// Next returns the path of the next node without descending deeper into -// the trie and nil. If there are no more entries in the trie it -// returns nil and io.EOF. In case of error, it will return nil and the -// error. -func (iter *Iter) Next() (noder.Path, error) { - return iter.advance(dontDescend) -} - -// Step returns the path to the next node in the trie, descending deeper -// into it if needed, and nil. If there are no more nodes in the trie, -// it returns nil and io.EOF. In case of error, it will return nil and -// the error. -func (iter *Iter) Step() (noder.Path, error) { - return iter.advance(doDescend) -} - -// Advances the iterator in the desired direction: descend or -// dontDescend. -// -// Returns the new current element and a nil error on success. If there -// are no more elements in the trie below the base, it returns nil, and -// io.EOF. Returns nil and an error in case of errors. -func (iter *Iter) advance(wantDescend bool) (noder.Path, error) { - current, err := iter.current() - if err != nil { - return nil, err - } - - // The first time we just return the current node. - if !iter.hasStarted { - iter.hasStarted = true - return current, nil - } - - // Advances means getting a next current node, either its first child or - // its next sibling, depending if we must descend or not. - numChildren, err := current.NumChildren() - if err != nil { - return nil, err - } - - mustDescend := numChildren != 0 && wantDescend - if mustDescend { - // descend: add a new frame with the current's children. - frame, err := frame.New(current) - if err != nil { - return nil, err - } - iter.push(frame) - } else { - // don't descend: just drop the current node - iter.drop() - } - - return iter.current() -} - -// Returns the path to the current node, adding the base if there was -// one, and a nil error. If there were no noders left, it returns nil -// and io.EOF. If an error occurred, it returns nil and the error. -func (iter *Iter) current() (noder.Path, error) { - if topFrame, ok := iter.top(); !ok { - return nil, io.EOF - } else if _, ok := topFrame.First(); !ok { - return nil, io.EOF - } - - ret := make(noder.Path, 0, len(iter.base)+len(iter.frameStack)) - - // concat the base... - ret = append(ret, iter.base...) - // ... and the current node and all its ancestors - for i, f := range iter.frameStack { - t, ok := f.First() - if !ok { - panic(fmt.Sprintf("frame %d is empty", i)) - } - ret = append(ret, t) - } - - return ret, nil -} - -// removes the current node if any, and all the frames that become empty as a -// consequence of this action. -func (iter *Iter) drop() { - frame, ok := iter.top() - if !ok { - return - } - - frame.Drop() - // if the frame is empty, remove it and its parent, recursively - if frame.Len() == 0 { - top := len(iter.frameStack) - 1 - iter.frameStack[top] = nil - iter.frameStack = iter.frameStack[:top] - iter.drop() - } -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/merkletrie/noder/noder.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/merkletrie/noder/noder.go deleted file mode 100644 index d6b3de4adaf..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/merkletrie/noder/noder.go +++ /dev/null @@ -1,59 +0,0 @@ -// Package noder provide an interface for defining nodes in a -// merkletrie, their hashes and their paths (a noders and its -// ancestors). -// -// The hasher interface is easy to implement naively by elements that -// already have a hash, like git blobs and trees. More sophisticated -// implementations can implement the Equal function in exotic ways -// though: for instance, comparing the modification time of directories -// in a filesystem. -package noder - -import "fmt" - -// Hasher interface is implemented by types that can tell you -// their hash. -type Hasher interface { - Hash() []byte -} - -// Equal functions take two hashers and return if they are equal. -// -// These functions are expected to be faster than reflect.Equal or -// reflect.DeepEqual because they can compare just the hash of the -// objects, instead of their contents, so they are expected to be O(1). -type Equal func(a, b Hasher) bool - -// The Noder interface is implemented by the elements of a Merkle Trie. -// -// There are two types of elements in a Merkle Trie: -// -// - file-like nodes: they cannot have children. -// -// - directory-like nodes: they can have 0 or more children and their -// hash is calculated by combining their children hashes. -type Noder interface { - Hasher - fmt.Stringer // for testing purposes - // Name returns the name of an element (relative, not its full - // path). - Name() string - // IsDir returns true if the element is a directory-like node or - // false if it is a file-like node. - IsDir() bool - // Children returns the children of the element. Note that empty - // directory-like noders and file-like noders will both return - // NoChildren. - Children() ([]Noder, error) - // NumChildren returns the number of children this element has. - // - // This method is an optimization: the number of children is easily - // calculated as the length of the value returned by the Children - // method (above); yet, some implementations will be able to - // implement NumChildren in O(1) while Children is usually more - // complex. - NumChildren() (int, error) -} - -// NoChildren represents the children of a noder without children. -var NoChildren = []Noder{} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/merkletrie/noder/path.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/merkletrie/noder/path.go deleted file mode 100644 index 1c7ef54eebc..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/utils/merkletrie/noder/path.go +++ /dev/null @@ -1,90 +0,0 @@ -package noder - -import ( - "bytes" - "strings" -) - -// Path values represent a noder and its ancestors. The root goes first -// and the actual final noder the path is referring to will be the last. -// -// A path implements the Noder interface, redirecting all the interface -// calls to its final noder. -// -// Paths build from an empty Noder slice are not valid paths and should -// not be used. -type Path []Noder - -// String returns the full path of the final noder as a string, using -// "/" as the separator. -func (p Path) String() string { - var buf bytes.Buffer - sep := "" - for _, e := range p { - _, _ = buf.WriteString(sep) - sep = "/" - _, _ = buf.WriteString(e.Name()) - } - - return buf.String() -} - -// Last returns the final noder in the path. -func (p Path) Last() Noder { - return p[len(p)-1] -} - -// Hash returns the hash of the final noder of the path. -func (p Path) Hash() []byte { - return p.Last().Hash() -} - -// Name returns the name of the final noder of the path. -func (p Path) Name() string { - return p.Last().Name() -} - -// IsDir returns if the final noder of the path is a directory-like -// noder. -func (p Path) IsDir() bool { - return p.Last().IsDir() -} - -// Children returns the children of the final noder in the path. -func (p Path) Children() ([]Noder, error) { - return p.Last().Children() -} - -// NumChildren returns the number of children the final noder of the -// path has. -func (p Path) NumChildren() (int, error) { - return p.Last().NumChildren() -} - -// Compare returns -1, 0 or 1 if the path p is smaller, equal or bigger -// than other, in "directory order"; for example: -// -// "a" < "b" -// "a/b/c/d/z" < "b" -// "a/b/a" > "a/b" -func (p Path) Compare(other Path) int { - i := 0 - for { - switch { - case len(other) == len(p) && i == len(p): - return 0 - case i == len(other): - return 1 - case i == len(p): - return -1 - default: - // We do *not* normalize Unicode here. CGit doesn't. - // https://github.com/src-d/go-git/issues/1057 - cmp := strings.Compare(p[i].Name(), other[i].Name()) - if cmp != 0 { - return cmp - } - } - i++ - } -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/worktree.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/worktree.go deleted file mode 100644 index f23d9f1703c..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/worktree.go +++ /dev/null @@ -1,965 +0,0 @@ -package git - -import ( - "context" - "errors" - "fmt" - "io" - stdioutil "io/ioutil" - "os" - "path/filepath" - "strings" - "sync" - - "github.com/go-git/go-git/v5/config" - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/filemode" - "github.com/go-git/go-git/v5/plumbing/format/gitignore" - "github.com/go-git/go-git/v5/plumbing/format/index" - "github.com/go-git/go-git/v5/plumbing/object" - "github.com/go-git/go-git/v5/plumbing/storer" - "github.com/go-git/go-git/v5/utils/ioutil" - "github.com/go-git/go-git/v5/utils/merkletrie" - - "github.com/go-git/go-billy/v5" - "github.com/go-git/go-billy/v5/util" -) - -var ( - ErrWorktreeNotClean = errors.New("worktree is not clean") - ErrSubmoduleNotFound = errors.New("submodule not found") - ErrUnstagedChanges = errors.New("worktree contains unstaged changes") - ErrGitModulesSymlink = errors.New(gitmodulesFile + " is a symlink") - ErrNonFastForwardUpdate = errors.New("non-fast-forward update") -) - -// Worktree represents a git worktree. -type Worktree struct { - // Filesystem underlying filesystem. - Filesystem billy.Filesystem - // External excludes not found in the repository .gitignore - Excludes []gitignore.Pattern - - r *Repository -} - -// Pull incorporates changes from a remote repository into the current branch. -// Returns nil if the operation is successful, NoErrAlreadyUpToDate if there are -// no changes to be fetched, or an error. -// -// Pull only supports merges where the can be resolved as a fast-forward. -func (w *Worktree) Pull(o *PullOptions) error { - return w.PullContext(context.Background(), o) -} - -// PullContext incorporates changes from a remote repository into the current -// branch. Returns nil if the operation is successful, NoErrAlreadyUpToDate if -// there are no changes to be fetched, or an error. -// -// Pull only supports merges where the can be resolved as a fast-forward. -// -// The provided Context must be non-nil. If the context expires before the -// operation is complete, an error is returned. The context only affects the -// transport operations. -func (w *Worktree) PullContext(ctx context.Context, o *PullOptions) error { - if err := o.Validate(); err != nil { - return err - } - - remote, err := w.r.Remote(o.RemoteName) - if err != nil { - return err - } - - fetchHead, err := remote.fetch(ctx, &FetchOptions{ - RemoteName: o.RemoteName, - Depth: o.Depth, - Auth: o.Auth, - Progress: o.Progress, - Force: o.Force, - InsecureSkipTLS: o.InsecureSkipTLS, - CABundle: o.CABundle, - }) - - updated := true - if err == NoErrAlreadyUpToDate { - updated = false - } else if err != nil { - return err - } - - ref, err := storer.ResolveReference(fetchHead, o.ReferenceName) - if err != nil { - return err - } - - head, err := w.r.Head() - if err == nil { - headAheadOfRef, err := isFastForward(w.r.Storer, ref.Hash(), head.Hash()) - if err != nil { - return err - } - - if !updated && headAheadOfRef { - return NoErrAlreadyUpToDate - } - - ff, err := isFastForward(w.r.Storer, head.Hash(), ref.Hash()) - if err != nil { - return err - } - - if !ff { - return ErrNonFastForwardUpdate - } - } - - if err != nil && err != plumbing.ErrReferenceNotFound { - return err - } - - if err := w.updateHEAD(ref.Hash()); err != nil { - return err - } - - if err := w.Reset(&ResetOptions{ - Mode: MergeReset, - Commit: ref.Hash(), - }); err != nil { - return err - } - - if o.RecurseSubmodules != NoRecurseSubmodules { - return w.updateSubmodules(&SubmoduleUpdateOptions{ - RecurseSubmodules: o.RecurseSubmodules, - Auth: o.Auth, - }) - } - - return nil -} - -func (w *Worktree) updateSubmodules(o *SubmoduleUpdateOptions) error { - s, err := w.Submodules() - if err != nil { - return err - } - o.Init = true - return s.Update(o) -} - -// Checkout switch branches or restore working tree files. -func (w *Worktree) Checkout(opts *CheckoutOptions) error { - if err := opts.Validate(); err != nil { - return err - } - - if opts.Create { - if err := w.createBranch(opts); err != nil { - return err - } - } - - c, err := w.getCommitFromCheckoutOptions(opts) - if err != nil { - return err - } - - ro := &ResetOptions{Commit: c, Mode: MergeReset} - if opts.Force { - ro.Mode = HardReset - } else if opts.Keep { - ro.Mode = SoftReset - } - - if !opts.Hash.IsZero() && !opts.Create { - err = w.setHEADToCommit(opts.Hash) - } else { - err = w.setHEADToBranch(opts.Branch, c) - } - - if err != nil { - return err - } - - return w.Reset(ro) -} -func (w *Worktree) createBranch(opts *CheckoutOptions) error { - _, err := w.r.Storer.Reference(opts.Branch) - if err == nil { - return fmt.Errorf("a branch named %q already exists", opts.Branch) - } - - if err != plumbing.ErrReferenceNotFound { - return err - } - - if opts.Hash.IsZero() { - ref, err := w.r.Head() - if err != nil { - return err - } - - opts.Hash = ref.Hash() - } - - return w.r.Storer.SetReference( - plumbing.NewHashReference(opts.Branch, opts.Hash), - ) -} - -func (w *Worktree) getCommitFromCheckoutOptions(opts *CheckoutOptions) (plumbing.Hash, error) { - if !opts.Hash.IsZero() { - return opts.Hash, nil - } - - b, err := w.r.Reference(opts.Branch, true) - if err != nil { - return plumbing.ZeroHash, err - } - - if !b.Name().IsTag() { - return b.Hash(), nil - } - - o, err := w.r.Object(plumbing.AnyObject, b.Hash()) - if err != nil { - return plumbing.ZeroHash, err - } - - switch o := o.(type) { - case *object.Tag: - if o.TargetType != plumbing.CommitObject { - return plumbing.ZeroHash, fmt.Errorf("unsupported tag object target %q", o.TargetType) - } - - return o.Target, nil - case *object.Commit: - return o.Hash, nil - } - - return plumbing.ZeroHash, fmt.Errorf("unsupported tag target %q", o.Type()) -} - -func (w *Worktree) setHEADToCommit(commit plumbing.Hash) error { - head := plumbing.NewHashReference(plumbing.HEAD, commit) - return w.r.Storer.SetReference(head) -} - -func (w *Worktree) setHEADToBranch(branch plumbing.ReferenceName, commit plumbing.Hash) error { - target, err := w.r.Storer.Reference(branch) - if err != nil { - return err - } - - var head *plumbing.Reference - if target.Name().IsBranch() { - head = plumbing.NewSymbolicReference(plumbing.HEAD, target.Name()) - } else { - head = plumbing.NewHashReference(plumbing.HEAD, commit) - } - - return w.r.Storer.SetReference(head) -} - -// Reset the worktree to a specified state. -func (w *Worktree) Reset(opts *ResetOptions) error { - if err := opts.Validate(w.r); err != nil { - return err - } - - if opts.Mode == MergeReset { - unstaged, err := w.containsUnstagedChanges() - if err != nil { - return err - } - - if unstaged { - return ErrUnstagedChanges - } - } - - if err := w.setHEADCommit(opts.Commit); err != nil { - return err - } - - if opts.Mode == SoftReset { - return nil - } - - t, err := w.getTreeFromCommitHash(opts.Commit) - if err != nil { - return err - } - - if opts.Mode == MixedReset || opts.Mode == MergeReset || opts.Mode == HardReset { - if err := w.resetIndex(t); err != nil { - return err - } - } - - if opts.Mode == MergeReset || opts.Mode == HardReset { - if err := w.resetWorktree(t); err != nil { - return err - } - } - - return nil -} - -func (w *Worktree) resetIndex(t *object.Tree) error { - idx, err := w.r.Storer.Index() - if err != nil { - return err - } - b := newIndexBuilder(idx) - - changes, err := w.diffTreeWithStaging(t, true) - if err != nil { - return err - } - - for _, ch := range changes { - a, err := ch.Action() - if err != nil { - return err - } - - var name string - var e *object.TreeEntry - - switch a { - case merkletrie.Modify, merkletrie.Insert: - name = ch.To.String() - e, err = t.FindEntry(name) - if err != nil { - return err - } - case merkletrie.Delete: - name = ch.From.String() - } - - b.Remove(name) - if e == nil { - continue - } - - b.Add(&index.Entry{ - Name: name, - Hash: e.Hash, - Mode: e.Mode, - }) - - } - - b.Write(idx) - return w.r.Storer.SetIndex(idx) -} - -func (w *Worktree) resetWorktree(t *object.Tree) error { - changes, err := w.diffStagingWithWorktree(true) - if err != nil { - return err - } - - idx, err := w.r.Storer.Index() - if err != nil { - return err - } - b := newIndexBuilder(idx) - - for _, ch := range changes { - if err := w.checkoutChange(ch, t, b); err != nil { - return err - } - } - - b.Write(idx) - return w.r.Storer.SetIndex(idx) -} - -func (w *Worktree) checkoutChange(ch merkletrie.Change, t *object.Tree, idx *indexBuilder) error { - a, err := ch.Action() - if err != nil { - return err - } - - var e *object.TreeEntry - var name string - var isSubmodule bool - - switch a { - case merkletrie.Modify, merkletrie.Insert: - name = ch.To.String() - e, err = t.FindEntry(name) - if err != nil { - return err - } - - isSubmodule = e.Mode == filemode.Submodule - case merkletrie.Delete: - return rmFileAndDirIfEmpty(w.Filesystem, ch.From.String()) - } - - if isSubmodule { - return w.checkoutChangeSubmodule(name, a, e, idx) - } - - return w.checkoutChangeRegularFile(name, a, t, e, idx) -} - -func (w *Worktree) containsUnstagedChanges() (bool, error) { - ch, err := w.diffStagingWithWorktree(false) - if err != nil { - return false, err - } - - for _, c := range ch { - a, err := c.Action() - if err != nil { - return false, err - } - - if a == merkletrie.Insert { - continue - } - - return true, nil - } - - return false, nil -} - -func (w *Worktree) setHEADCommit(commit plumbing.Hash) error { - head, err := w.r.Reference(plumbing.HEAD, false) - if err != nil { - return err - } - - if head.Type() == plumbing.HashReference { - head = plumbing.NewHashReference(plumbing.HEAD, commit) - return w.r.Storer.SetReference(head) - } - - branch, err := w.r.Reference(head.Target(), false) - if err != nil { - return err - } - - if !branch.Name().IsBranch() { - return fmt.Errorf("invalid HEAD target should be a branch, found %s", branch.Type()) - } - - branch = plumbing.NewHashReference(branch.Name(), commit) - return w.r.Storer.SetReference(branch) -} - -func (w *Worktree) checkoutChangeSubmodule(name string, - a merkletrie.Action, - e *object.TreeEntry, - idx *indexBuilder, -) error { - switch a { - case merkletrie.Modify: - sub, err := w.Submodule(name) - if err != nil { - return err - } - - if !sub.initialized { - return nil - } - - return w.addIndexFromTreeEntry(name, e, idx) - case merkletrie.Insert: - mode, err := e.Mode.ToOSFileMode() - if err != nil { - return err - } - - if err := w.Filesystem.MkdirAll(name, mode); err != nil { - return err - } - - return w.addIndexFromTreeEntry(name, e, idx) - } - - return nil -} - -func (w *Worktree) checkoutChangeRegularFile(name string, - a merkletrie.Action, - t *object.Tree, - e *object.TreeEntry, - idx *indexBuilder, -) error { - switch a { - case merkletrie.Modify: - idx.Remove(name) - - // to apply perm changes the file is deleted, billy doesn't implement - // chmod - if err := w.Filesystem.Remove(name); err != nil { - return err - } - - fallthrough - case merkletrie.Insert: - f, err := t.File(name) - if err != nil { - return err - } - - if err := w.checkoutFile(f); err != nil { - return err - } - - return w.addIndexFromFile(name, e.Hash, idx) - } - - return nil -} - -var copyBufferPool = sync.Pool{ - New: func() interface{} { - return make([]byte, 32*1024) - }, -} - -func (w *Worktree) checkoutFile(f *object.File) (err error) { - mode, err := f.Mode.ToOSFileMode() - if err != nil { - return - } - - if mode&os.ModeSymlink != 0 { - return w.checkoutFileSymlink(f) - } - - from, err := f.Reader() - if err != nil { - return - } - - defer ioutil.CheckClose(from, &err) - - to, err := w.Filesystem.OpenFile(f.Name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, mode.Perm()) - if err != nil { - return - } - - defer ioutil.CheckClose(to, &err) - buf := copyBufferPool.Get().([]byte) - _, err = io.CopyBuffer(to, from, buf) - copyBufferPool.Put(buf) - return -} - -func (w *Worktree) checkoutFileSymlink(f *object.File) (err error) { - from, err := f.Reader() - if err != nil { - return - } - - defer ioutil.CheckClose(from, &err) - - bytes, err := stdioutil.ReadAll(from) - if err != nil { - return - } - - err = w.Filesystem.Symlink(string(bytes), f.Name) - - // On windows, this might fail. - // Follow Git on Windows behavior by writing the link as it is. - if err != nil && isSymlinkWindowsNonAdmin(err) { - mode, _ := f.Mode.ToOSFileMode() - - to, err := w.Filesystem.OpenFile(f.Name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, mode.Perm()) - if err != nil { - return err - } - - defer ioutil.CheckClose(to, &err) - - _, err = to.Write(bytes) - return err - } - return -} - -func (w *Worktree) addIndexFromTreeEntry(name string, f *object.TreeEntry, idx *indexBuilder) error { - idx.Remove(name) - idx.Add(&index.Entry{ - Hash: f.Hash, - Name: name, - Mode: filemode.Submodule, - }) - return nil -} - -func (w *Worktree) addIndexFromFile(name string, h plumbing.Hash, idx *indexBuilder) error { - idx.Remove(name) - fi, err := w.Filesystem.Lstat(name) - if err != nil { - return err - } - - mode, err := filemode.NewFromOSFileMode(fi.Mode()) - if err != nil { - return err - } - - e := &index.Entry{ - Hash: h, - Name: name, - Mode: mode, - ModifiedAt: fi.ModTime(), - Size: uint32(fi.Size()), - } - - // if the FileInfo.Sys() comes from os the ctime, dev, inode, uid and gid - // can be retrieved, otherwise this doesn't apply - if fillSystemInfo != nil { - fillSystemInfo(e, fi.Sys()) - } - idx.Add(e) - return nil -} - -func (w *Worktree) getTreeFromCommitHash(commit plumbing.Hash) (*object.Tree, error) { - c, err := w.r.CommitObject(commit) - if err != nil { - return nil, err - } - - return c.Tree() -} - -var fillSystemInfo func(e *index.Entry, sys interface{}) - -const gitmodulesFile = ".gitmodules" - -// Submodule returns the submodule with the given name -func (w *Worktree) Submodule(name string) (*Submodule, error) { - l, err := w.Submodules() - if err != nil { - return nil, err - } - - for _, m := range l { - if m.Config().Name == name { - return m, nil - } - } - - return nil, ErrSubmoduleNotFound -} - -// Submodules returns all the available submodules -func (w *Worktree) Submodules() (Submodules, error) { - l := make(Submodules, 0) - m, err := w.readGitmodulesFile() - if err != nil || m == nil { - return l, err - } - - c, err := w.r.Config() - if err != nil { - return nil, err - } - - for _, s := range m.Submodules { - l = append(l, w.newSubmodule(s, c.Submodules[s.Name])) - } - - return l, nil -} - -func (w *Worktree) newSubmodule(fromModules, fromConfig *config.Submodule) *Submodule { - m := &Submodule{w: w} - m.initialized = fromConfig != nil - - if !m.initialized { - m.c = fromModules - return m - } - - m.c = fromConfig - m.c.Path = fromModules.Path - return m -} - -func (w *Worktree) isSymlink(path string) bool { - if s, err := w.Filesystem.Lstat(path); err == nil { - return s.Mode()&os.ModeSymlink != 0 - } - return false -} - -func (w *Worktree) readGitmodulesFile() (*config.Modules, error) { - if w.isSymlink(gitmodulesFile) { - return nil, ErrGitModulesSymlink - } - - f, err := w.Filesystem.Open(gitmodulesFile) - if err != nil { - if os.IsNotExist(err) { - return nil, nil - } - - return nil, err - } - - defer f.Close() - input, err := stdioutil.ReadAll(f) - if err != nil { - return nil, err - } - - m := config.NewModules() - if err := m.Unmarshal(input); err != nil { - return m, err - } - - return m, nil -} - -// Clean the worktree by removing untracked files. -// An empty dir could be removed - this is what `git clean -f -d .` does. -func (w *Worktree) Clean(opts *CleanOptions) error { - s, err := w.Status() - if err != nil { - return err - } - - root := "" - files, err := w.Filesystem.ReadDir(root) - if err != nil { - return err - } - return w.doClean(s, opts, root, files) -} - -func (w *Worktree) doClean(status Status, opts *CleanOptions, dir string, files []os.FileInfo) error { - for _, fi := range files { - if fi.Name() == GitDirName { - continue - } - - // relative path under the root - path := filepath.Join(dir, fi.Name()) - if fi.IsDir() { - if !opts.Dir { - continue - } - - subfiles, err := w.Filesystem.ReadDir(path) - if err != nil { - return err - } - err = w.doClean(status, opts, path, subfiles) - if err != nil { - return err - } - } else { - if status.IsUntracked(path) { - if err := w.Filesystem.Remove(path); err != nil { - return err - } - } - } - } - - if opts.Dir && dir != "" { - return doCleanDirectories(w.Filesystem, dir) - } - return nil -} - -// GrepResult is structure of a grep result. -type GrepResult struct { - // FileName is the name of file which contains match. - FileName string - // LineNumber is the line number of a file at which a match was found. - LineNumber int - // Content is the content of the file at the matching line. - Content string - // TreeName is the name of the tree (reference name/commit hash) at - // which the match was performed. - TreeName string -} - -func (gr GrepResult) String() string { - return fmt.Sprintf("%s:%s:%d:%s", gr.TreeName, gr.FileName, gr.LineNumber, gr.Content) -} - -// Grep performs grep on a worktree. -func (w *Worktree) Grep(opts *GrepOptions) ([]GrepResult, error) { - if err := opts.Validate(w); err != nil { - return nil, err - } - - // Obtain commit hash from options (CommitHash or ReferenceName). - var commitHash plumbing.Hash - // treeName contains the value of TreeName in GrepResult. - var treeName string - - if opts.ReferenceName != "" { - ref, err := w.r.Reference(opts.ReferenceName, true) - if err != nil { - return nil, err - } - commitHash = ref.Hash() - treeName = opts.ReferenceName.String() - } else if !opts.CommitHash.IsZero() { - commitHash = opts.CommitHash - treeName = opts.CommitHash.String() - } - - // Obtain a tree from the commit hash and get a tracked files iterator from - // the tree. - tree, err := w.getTreeFromCommitHash(commitHash) - if err != nil { - return nil, err - } - fileiter := tree.Files() - - return findMatchInFiles(fileiter, treeName, opts) -} - -// findMatchInFiles takes a FileIter, worktree name and GrepOptions, and -// returns a slice of GrepResult containing the result of regex pattern matching -// in content of all the files. -func findMatchInFiles(fileiter *object.FileIter, treeName string, opts *GrepOptions) ([]GrepResult, error) { - var results []GrepResult - - err := fileiter.ForEach(func(file *object.File) error { - var fileInPathSpec bool - - // When no pathspecs are provided, search all the files. - if len(opts.PathSpecs) == 0 { - fileInPathSpec = true - } - - // Check if the file name matches with the pathspec. Break out of the - // loop once a match is found. - for _, pathSpec := range opts.PathSpecs { - if pathSpec != nil && pathSpec.MatchString(file.Name) { - fileInPathSpec = true - break - } - } - - // If the file does not match with any of the pathspec, skip it. - if !fileInPathSpec { - return nil - } - - grepResults, err := findMatchInFile(file, treeName, opts) - if err != nil { - return err - } - results = append(results, grepResults...) - - return nil - }) - - return results, err -} - -// findMatchInFile takes a single File, worktree name and GrepOptions, -// and returns a slice of GrepResult containing the result of regex pattern -// matching in the given file. -func findMatchInFile(file *object.File, treeName string, opts *GrepOptions) ([]GrepResult, error) { - var grepResults []GrepResult - - content, err := file.Contents() - if err != nil { - return grepResults, err - } - - // Split the file content and parse line-by-line. - contentByLine := strings.Split(content, "\n") - for lineNum, cnt := range contentByLine { - addToResult := false - - // Match the patterns and content. Break out of the loop once a - // match is found. - for _, pattern := range opts.Patterns { - if pattern != nil && pattern.MatchString(cnt) { - // Add to result only if invert match is not enabled. - if !opts.InvertMatch { - addToResult = true - break - } - } else if opts.InvertMatch { - // If matching fails, and invert match is enabled, add to - // results. - addToResult = true - break - } - } - - if addToResult { - grepResults = append(grepResults, GrepResult{ - FileName: file.Name, - LineNumber: lineNum + 1, - Content: cnt, - TreeName: treeName, - }) - } - } - - return grepResults, nil -} - -func rmFileAndDirIfEmpty(fs billy.Filesystem, name string) error { - if err := util.RemoveAll(fs, name); err != nil { - return err - } - - dir := filepath.Dir(name) - return doCleanDirectories(fs, dir) -} - -// doCleanDirectories removes empty subdirs (without files) -func doCleanDirectories(fs billy.Filesystem, dir string) error { - files, err := fs.ReadDir(dir) - if err != nil { - return err - } - if len(files) == 0 { - return fs.Remove(dir) - } - return nil -} - -type indexBuilder struct { - entries map[string]*index.Entry -} - -func newIndexBuilder(idx *index.Index) *indexBuilder { - entries := make(map[string]*index.Entry, len(idx.Entries)) - for _, e := range idx.Entries { - entries[e.Name] = e - } - return &indexBuilder{ - entries: entries, - } -} - -func (b *indexBuilder) Write(idx *index.Index) { - idx.Entries = idx.Entries[:0] - for _, e := range b.entries { - idx.Entries = append(idx.Entries, e) - } -} - -func (b *indexBuilder) Add(e *index.Entry) { - b.entries[e.Name] = e -} - -func (b *indexBuilder) Remove(name string) { - delete(b.entries, filepath.ToSlash(name)) -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/worktree_bsd.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/worktree_bsd.go deleted file mode 100644 index d4ea327588f..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/worktree_bsd.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build darwin freebsd netbsd - -package git - -import ( - "syscall" - "time" - - "github.com/go-git/go-git/v5/plumbing/format/index" -) - -func init() { - fillSystemInfo = func(e *index.Entry, sys interface{}) { - if os, ok := sys.(*syscall.Stat_t); ok { - e.CreatedAt = time.Unix(int64(os.Atimespec.Sec), int64(os.Atimespec.Nsec)) - e.Dev = uint32(os.Dev) - e.Inode = uint32(os.Ino) - e.GID = os.Gid - e.UID = os.Uid - } - } -} - -func isSymlinkWindowsNonAdmin(err error) bool { - return false -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/worktree_commit.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/worktree_commit.go deleted file mode 100644 index dc7956909ee..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/worktree_commit.go +++ /dev/null @@ -1,238 +0,0 @@ -package git - -import ( - "bytes" - "path" - "sort" - "strings" - - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/filemode" - "github.com/go-git/go-git/v5/plumbing/format/index" - "github.com/go-git/go-git/v5/plumbing/object" - "github.com/go-git/go-git/v5/storage" - - "github.com/ProtonMail/go-crypto/openpgp" - "github.com/go-git/go-billy/v5" -) - -// Commit stores the current contents of the index in a new commit along with -// a log message from the user describing the changes. -func (w *Worktree) Commit(msg string, opts *CommitOptions) (plumbing.Hash, error) { - if err := opts.Validate(w.r); err != nil { - return plumbing.ZeroHash, err - } - - if opts.All { - if err := w.autoAddModifiedAndDeleted(); err != nil { - return plumbing.ZeroHash, err - } - } - - idx, err := w.r.Storer.Index() - if err != nil { - return plumbing.ZeroHash, err - } - - h := &buildTreeHelper{ - fs: w.Filesystem, - s: w.r.Storer, - } - - tree, err := h.BuildTree(idx) - if err != nil { - return plumbing.ZeroHash, err - } - - commit, err := w.buildCommitObject(msg, opts, tree) - if err != nil { - return plumbing.ZeroHash, err - } - - return commit, w.updateHEAD(commit) -} - -func (w *Worktree) autoAddModifiedAndDeleted() error { - s, err := w.Status() - if err != nil { - return err - } - - idx, err := w.r.Storer.Index() - if err != nil { - return err - } - - for path, fs := range s { - if fs.Worktree != Modified && fs.Worktree != Deleted { - continue - } - - if _, _, err := w.doAddFile(idx, s, path, nil); err != nil { - return err - } - - } - - return w.r.Storer.SetIndex(idx) -} - -func (w *Worktree) updateHEAD(commit plumbing.Hash) error { - head, err := w.r.Storer.Reference(plumbing.HEAD) - if err != nil { - return err - } - - name := plumbing.HEAD - if head.Type() != plumbing.HashReference { - name = head.Target() - } - - ref := plumbing.NewHashReference(name, commit) - return w.r.Storer.SetReference(ref) -} - -func (w *Worktree) buildCommitObject(msg string, opts *CommitOptions, tree plumbing.Hash) (plumbing.Hash, error) { - commit := &object.Commit{ - Author: *opts.Author, - Committer: *opts.Committer, - Message: msg, - TreeHash: tree, - ParentHashes: opts.Parents, - } - - if opts.SignKey != nil { - sig, err := w.buildCommitSignature(commit, opts.SignKey) - if err != nil { - return plumbing.ZeroHash, err - } - commit.PGPSignature = sig - } - - obj := w.r.Storer.NewEncodedObject() - if err := commit.Encode(obj); err != nil { - return plumbing.ZeroHash, err - } - return w.r.Storer.SetEncodedObject(obj) -} - -func (w *Worktree) buildCommitSignature(commit *object.Commit, signKey *openpgp.Entity) (string, error) { - encoded := &plumbing.MemoryObject{} - if err := commit.Encode(encoded); err != nil { - return "", err - } - r, err := encoded.Reader() - if err != nil { - return "", err - } - var b bytes.Buffer - if err := openpgp.ArmoredDetachSign(&b, signKey, r, nil); err != nil { - return "", err - } - return b.String(), nil -} - -// buildTreeHelper converts a given index.Index file into multiple git objects -// reading the blobs from the given filesystem and creating the trees from the -// index structure. The created objects are pushed to a given Storer. -type buildTreeHelper struct { - fs billy.Filesystem - s storage.Storer - - trees map[string]*object.Tree - entries map[string]*object.TreeEntry -} - -// BuildTree builds the tree objects and push its to the storer, the hash -// of the root tree is returned. -func (h *buildTreeHelper) BuildTree(idx *index.Index) (plumbing.Hash, error) { - const rootNode = "" - h.trees = map[string]*object.Tree{rootNode: {}} - h.entries = map[string]*object.TreeEntry{} - - for _, e := range idx.Entries { - if err := h.commitIndexEntry(e); err != nil { - return plumbing.ZeroHash, err - } - } - - return h.copyTreeToStorageRecursive(rootNode, h.trees[rootNode]) -} - -func (h *buildTreeHelper) commitIndexEntry(e *index.Entry) error { - parts := strings.Split(e.Name, "/") - - var fullpath string - for _, part := range parts { - parent := fullpath - fullpath = path.Join(fullpath, part) - - h.doBuildTree(e, parent, fullpath) - } - - return nil -} - -func (h *buildTreeHelper) doBuildTree(e *index.Entry, parent, fullpath string) { - if _, ok := h.trees[fullpath]; ok { - return - } - - if _, ok := h.entries[fullpath]; ok { - return - } - - te := object.TreeEntry{Name: path.Base(fullpath)} - - if fullpath == e.Name { - te.Mode = e.Mode - te.Hash = e.Hash - } else { - te.Mode = filemode.Dir - h.trees[fullpath] = &object.Tree{} - } - - h.trees[parent].Entries = append(h.trees[parent].Entries, te) -} - -type sortableEntries []object.TreeEntry - -func (sortableEntries) sortName(te object.TreeEntry) string { - if te.Mode == filemode.Dir { - return te.Name + "/" - } - return te.Name -} -func (se sortableEntries) Len() int { return len(se) } -func (se sortableEntries) Less(i int, j int) bool { return se.sortName(se[i]) < se.sortName(se[j]) } -func (se sortableEntries) Swap(i int, j int) { se[i], se[j] = se[j], se[i] } - -func (h *buildTreeHelper) copyTreeToStorageRecursive(parent string, t *object.Tree) (plumbing.Hash, error) { - sort.Sort(sortableEntries(t.Entries)) - for i, e := range t.Entries { - if e.Mode != filemode.Dir && !e.Hash.IsZero() { - continue - } - - path := path.Join(parent, e.Name) - - var err error - e.Hash, err = h.copyTreeToStorageRecursive(path, h.trees[path]) - if err != nil { - return plumbing.ZeroHash, err - } - - t.Entries[i] = e - } - - o := h.s.NewEncodedObject() - if err := t.Encode(o); err != nil { - return plumbing.ZeroHash, err - } - - hash := o.Hash() - if h.s.HasEncodedObject(hash) == nil { - return hash, nil - } - return h.s.SetEncodedObject(o) -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/worktree_js.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/worktree_js.go deleted file mode 100644 index 7267d055e7a..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/worktree_js.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build js - -package git - -import ( - "syscall" - "time" - - "github.com/go-git/go-git/v5/plumbing/format/index" -) - -func init() { - fillSystemInfo = func(e *index.Entry, sys interface{}) { - if os, ok := sys.(*syscall.Stat_t); ok { - e.CreatedAt = time.Unix(int64(os.Ctime), int64(os.CtimeNsec)) - e.Dev = uint32(os.Dev) - e.Inode = uint32(os.Ino) - e.GID = os.Gid - e.UID = os.Uid - } - } -} - -func isSymlinkWindowsNonAdmin(err error) bool { - return false -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/worktree_linux.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/worktree_linux.go deleted file mode 100644 index cf0db25242d..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/worktree_linux.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build linux - -package git - -import ( - "syscall" - "time" - - "github.com/go-git/go-git/v5/plumbing/format/index" -) - -func init() { - fillSystemInfo = func(e *index.Entry, sys interface{}) { - if os, ok := sys.(*syscall.Stat_t); ok { - e.CreatedAt = time.Unix(int64(os.Ctim.Sec), int64(os.Ctim.Nsec)) - e.Dev = uint32(os.Dev) - e.Inode = uint32(os.Ino) - e.GID = os.Gid - e.UID = os.Uid - } - } -} - -func isSymlinkWindowsNonAdmin(err error) bool { - return false -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/worktree_plan9.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/worktree_plan9.go deleted file mode 100644 index 8cedf71a32a..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/worktree_plan9.go +++ /dev/null @@ -1,31 +0,0 @@ -package git - -import ( - "syscall" - "time" - - "github.com/go-git/go-git/v5/plumbing/format/index" -) - -func init() { - fillSystemInfo = func(e *index.Entry, sys interface{}) { - if os, ok := sys.(*syscall.Dir); ok { - // Plan 9 doesn't have a CreatedAt field. - e.CreatedAt = time.Unix(int64(os.Mtime), 0) - - e.Dev = uint32(os.Dev) - - // Plan 9 has no Inode. - // ext2srv(4) appears to store Inode in Qid.Path. - e.Inode = uint32(os.Qid.Path) - - // Plan 9 has string UID/GID - e.GID = 0 - e.UID = 0 - } - } -} - -func isSymlinkWindowsNonAdmin(err error) bool { - return true -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/worktree_status.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/worktree_status.go deleted file mode 100644 index c639f132088..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/worktree_status.go +++ /dev/null @@ -1,708 +0,0 @@ -package git - -import ( - "bytes" - "errors" - "io" - "os" - "path" - "path/filepath" - "strings" - - "github.com/go-git/go-billy/v5/util" - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/filemode" - "github.com/go-git/go-git/v5/plumbing/format/gitignore" - "github.com/go-git/go-git/v5/plumbing/format/index" - "github.com/go-git/go-git/v5/plumbing/object" - "github.com/go-git/go-git/v5/utils/ioutil" - "github.com/go-git/go-git/v5/utils/merkletrie" - "github.com/go-git/go-git/v5/utils/merkletrie/filesystem" - mindex "github.com/go-git/go-git/v5/utils/merkletrie/index" - "github.com/go-git/go-git/v5/utils/merkletrie/noder" -) - -var ( - // ErrDestinationExists in an Move operation means that the target exists on - // the worktree. - ErrDestinationExists = errors.New("destination exists") - // ErrGlobNoMatches in an AddGlob if the glob pattern does not match any - // files in the worktree. - ErrGlobNoMatches = errors.New("glob pattern did not match any files") -) - -// Status returns the working tree status. -func (w *Worktree) Status() (Status, error) { - var hash plumbing.Hash - - ref, err := w.r.Head() - if err != nil && err != plumbing.ErrReferenceNotFound { - return nil, err - } - - if err == nil { - hash = ref.Hash() - } - - return w.status(hash) -} - -func (w *Worktree) status(commit plumbing.Hash) (Status, error) { - s := make(Status) - - left, err := w.diffCommitWithStaging(commit, false) - if err != nil { - return nil, err - } - - for _, ch := range left { - a, err := ch.Action() - if err != nil { - return nil, err - } - - fs := s.File(nameFromAction(&ch)) - fs.Worktree = Unmodified - - switch a { - case merkletrie.Delete: - s.File(ch.From.String()).Staging = Deleted - case merkletrie.Insert: - s.File(ch.To.String()).Staging = Added - case merkletrie.Modify: - s.File(ch.To.String()).Staging = Modified - } - } - - right, err := w.diffStagingWithWorktree(false) - if err != nil { - return nil, err - } - - for _, ch := range right { - a, err := ch.Action() - if err != nil { - return nil, err - } - - fs := s.File(nameFromAction(&ch)) - if fs.Staging == Untracked { - fs.Staging = Unmodified - } - - switch a { - case merkletrie.Delete: - fs.Worktree = Deleted - case merkletrie.Insert: - fs.Worktree = Untracked - fs.Staging = Untracked - case merkletrie.Modify: - fs.Worktree = Modified - } - } - - return s, nil -} - -func nameFromAction(ch *merkletrie.Change) string { - name := ch.To.String() - if name == "" { - return ch.From.String() - } - - return name -} - -func (w *Worktree) diffStagingWithWorktree(reverse bool) (merkletrie.Changes, error) { - idx, err := w.r.Storer.Index() - if err != nil { - return nil, err - } - - from := mindex.NewRootNode(idx) - submodules, err := w.getSubmodulesStatus() - if err != nil { - return nil, err - } - - to := filesystem.NewRootNode(w.Filesystem, submodules) - - var c merkletrie.Changes - if reverse { - c, err = merkletrie.DiffTree(to, from, diffTreeIsEquals) - } else { - c, err = merkletrie.DiffTree(from, to, diffTreeIsEquals) - } - - if err != nil { - return nil, err - } - - return w.excludeIgnoredChanges(c), nil -} - -func (w *Worktree) excludeIgnoredChanges(changes merkletrie.Changes) merkletrie.Changes { - patterns, err := gitignore.ReadPatterns(w.Filesystem, nil) - if err != nil { - return changes - } - - patterns = append(patterns, w.Excludes...) - - if len(patterns) == 0 { - return changes - } - - m := gitignore.NewMatcher(patterns) - - var res merkletrie.Changes - for _, ch := range changes { - var path []string - for _, n := range ch.To { - path = append(path, n.Name()) - } - if len(path) == 0 { - for _, n := range ch.From { - path = append(path, n.Name()) - } - } - if len(path) != 0 { - isDir := (len(ch.To) > 0 && ch.To.IsDir()) || (len(ch.From) > 0 && ch.From.IsDir()) - if m.Match(path, isDir) { - continue - } - } - res = append(res, ch) - } - return res -} - -func (w *Worktree) getSubmodulesStatus() (map[string]plumbing.Hash, error) { - o := map[string]plumbing.Hash{} - - sub, err := w.Submodules() - if err != nil { - return nil, err - } - - status, err := sub.Status() - if err != nil { - return nil, err - } - - for _, s := range status { - if s.Current.IsZero() { - o[s.Path] = s.Expected - continue - } - - o[s.Path] = s.Current - } - - return o, nil -} - -func (w *Worktree) diffCommitWithStaging(commit plumbing.Hash, reverse bool) (merkletrie.Changes, error) { - var t *object.Tree - if !commit.IsZero() { - c, err := w.r.CommitObject(commit) - if err != nil { - return nil, err - } - - t, err = c.Tree() - if err != nil { - return nil, err - } - } - - return w.diffTreeWithStaging(t, reverse) -} - -func (w *Worktree) diffTreeWithStaging(t *object.Tree, reverse bool) (merkletrie.Changes, error) { - var from noder.Noder - if t != nil { - from = object.NewTreeRootNode(t) - } - - idx, err := w.r.Storer.Index() - if err != nil { - return nil, err - } - - to := mindex.NewRootNode(idx) - - if reverse { - return merkletrie.DiffTree(to, from, diffTreeIsEquals) - } - - return merkletrie.DiffTree(from, to, diffTreeIsEquals) -} - -var emptyNoderHash = make([]byte, 24) - -// diffTreeIsEquals is a implementation of noder.Equals, used to compare -// noder.Noder, it compare the content and the length of the hashes. -// -// Since some of the noder.Noder implementations doesn't compute a hash for -// some directories, if any of the hashes is a 24-byte slice of zero values -// the comparison is not done and the hashes are take as different. -func diffTreeIsEquals(a, b noder.Hasher) bool { - hashA := a.Hash() - hashB := b.Hash() - - if bytes.Equal(hashA, emptyNoderHash) || bytes.Equal(hashB, emptyNoderHash) { - return false - } - - return bytes.Equal(hashA, hashB) -} - -// Add adds the file contents of a file in the worktree to the index. if the -// file is already staged in the index no error is returned. If a file deleted -// from the Workspace is given, the file is removed from the index. If a -// directory given, adds the files and all his sub-directories recursively in -// the worktree to the index. If any of the files is already staged in the index -// no error is returned. When path is a file, the blob.Hash is returned. -func (w *Worktree) Add(path string) (plumbing.Hash, error) { - // TODO(mcuadros): deprecate in favor of AddWithOption in v6. - return w.doAdd(path, make([]gitignore.Pattern, 0)) -} - -func (w *Worktree) doAddDirectory(idx *index.Index, s Status, directory string, ignorePattern []gitignore.Pattern) (added bool, err error) { - files, err := w.Filesystem.ReadDir(directory) - if err != nil { - return false, err - } - if len(ignorePattern) > 0 { - m := gitignore.NewMatcher(ignorePattern) - matchPath := strings.Split(directory, string(os.PathSeparator)) - if m.Match(matchPath, true) { - // ignore - return false, nil - } - } - - for _, file := range files { - name := path.Join(directory, file.Name()) - - var a bool - if file.IsDir() { - if file.Name() == GitDirName { - // ignore special git directory - continue - } - a, err = w.doAddDirectory(idx, s, name, ignorePattern) - } else { - a, _, err = w.doAddFile(idx, s, name, ignorePattern) - } - - if err != nil { - return - } - - if !added && a { - added = true - } - } - - return -} - -// AddWithOptions file contents to the index, updates the index using the -// current content found in the working tree, to prepare the content staged for -// the next commit. -// -// It typically adds the current content of existing paths as a whole, but with -// some options it can also be used to add content with only part of the changes -// made to the working tree files applied, or remove paths that do not exist in -// the working tree anymore. -func (w *Worktree) AddWithOptions(opts *AddOptions) error { - if err := opts.Validate(w.r); err != nil { - return err - } - - if opts.All { - _, err := w.doAdd(".", w.Excludes) - return err - } - - if opts.Glob != "" { - return w.AddGlob(opts.Glob) - } - - _, err := w.Add(opts.Path) - return err -} - -func (w *Worktree) doAdd(path string, ignorePattern []gitignore.Pattern) (plumbing.Hash, error) { - s, err := w.Status() - if err != nil { - return plumbing.ZeroHash, err - } - - idx, err := w.r.Storer.Index() - if err != nil { - return plumbing.ZeroHash, err - } - - var h plumbing.Hash - var added bool - - fi, err := w.Filesystem.Lstat(path) - if err != nil || !fi.IsDir() { - added, h, err = w.doAddFile(idx, s, path, ignorePattern) - } else { - added, err = w.doAddDirectory(idx, s, path, ignorePattern) - } - - if err != nil { - return h, err - } - - if !added { - return h, nil - } - - return h, w.r.Storer.SetIndex(idx) -} - -// AddGlob adds all paths, matching pattern, to the index. If pattern matches a -// directory path, all directory contents are added to the index recursively. No -// error is returned if all matching paths are already staged in index. -func (w *Worktree) AddGlob(pattern string) error { - // TODO(mcuadros): deprecate in favor of AddWithOption in v6. - files, err := util.Glob(w.Filesystem, pattern) - if err != nil { - return err - } - - if len(files) == 0 { - return ErrGlobNoMatches - } - - s, err := w.Status() - if err != nil { - return err - } - - idx, err := w.r.Storer.Index() - if err != nil { - return err - } - - var saveIndex bool - for _, file := range files { - fi, err := w.Filesystem.Lstat(file) - if err != nil { - return err - } - - var added bool - if fi.IsDir() { - added, err = w.doAddDirectory(idx, s, file, make([]gitignore.Pattern, 0)) - } else { - added, _, err = w.doAddFile(idx, s, file, make([]gitignore.Pattern, 0)) - } - - if err != nil { - return err - } - - if !saveIndex && added { - saveIndex = true - } - } - - if saveIndex { - return w.r.Storer.SetIndex(idx) - } - - return nil -} - -// doAddFile create a new blob from path and update the index, added is true if -// the file added is different from the index. -func (w *Worktree) doAddFile(idx *index.Index, s Status, path string, ignorePattern []gitignore.Pattern) (added bool, h plumbing.Hash, err error) { - if s.File(path).Worktree == Unmodified { - return false, h, nil - } - if len(ignorePattern) > 0 { - m := gitignore.NewMatcher(ignorePattern) - matchPath := strings.Split(path, string(os.PathSeparator)) - if m.Match(matchPath, true) { - // ignore - return false, h, nil - } - } - - h, err = w.copyFileToStorage(path) - if err != nil { - if os.IsNotExist(err) { - added = true - h, err = w.deleteFromIndex(idx, path) - } - - return - } - - if err := w.addOrUpdateFileToIndex(idx, path, h); err != nil { - return false, h, err - } - - return true, h, err -} - -func (w *Worktree) copyFileToStorage(path string) (hash plumbing.Hash, err error) { - fi, err := w.Filesystem.Lstat(path) - if err != nil { - return plumbing.ZeroHash, err - } - - obj := w.r.Storer.NewEncodedObject() - obj.SetType(plumbing.BlobObject) - obj.SetSize(fi.Size()) - - writer, err := obj.Writer() - if err != nil { - return plumbing.ZeroHash, err - } - - defer ioutil.CheckClose(writer, &err) - - if fi.Mode()&os.ModeSymlink != 0 { - err = w.fillEncodedObjectFromSymlink(writer, path, fi) - } else { - err = w.fillEncodedObjectFromFile(writer, path, fi) - } - - if err != nil { - return plumbing.ZeroHash, err - } - - return w.r.Storer.SetEncodedObject(obj) -} - -func (w *Worktree) fillEncodedObjectFromFile(dst io.Writer, path string, fi os.FileInfo) (err error) { - src, err := w.Filesystem.Open(path) - if err != nil { - return err - } - - defer ioutil.CheckClose(src, &err) - - if _, err := io.Copy(dst, src); err != nil { - return err - } - - return err -} - -func (w *Worktree) fillEncodedObjectFromSymlink(dst io.Writer, path string, fi os.FileInfo) error { - target, err := w.Filesystem.Readlink(path) - if err != nil { - return err - } - - _, err = dst.Write([]byte(target)) - return err -} - -func (w *Worktree) addOrUpdateFileToIndex(idx *index.Index, filename string, h plumbing.Hash) error { - e, err := idx.Entry(filename) - if err != nil && err != index.ErrEntryNotFound { - return err - } - - if err == index.ErrEntryNotFound { - return w.doAddFileToIndex(idx, filename, h) - } - - return w.doUpdateFileToIndex(e, filename, h) -} - -func (w *Worktree) doAddFileToIndex(idx *index.Index, filename string, h plumbing.Hash) error { - return w.doUpdateFileToIndex(idx.Add(filename), filename, h) -} - -func (w *Worktree) doUpdateFileToIndex(e *index.Entry, filename string, h plumbing.Hash) error { - info, err := w.Filesystem.Lstat(filename) - if err != nil { - return err - } - - e.Hash = h - e.ModifiedAt = info.ModTime() - e.Mode, err = filemode.NewFromOSFileMode(info.Mode()) - if err != nil { - return err - } - - if e.Mode.IsRegular() { - e.Size = uint32(info.Size()) - } - - fillSystemInfo(e, info.Sys()) - return nil -} - -// Remove removes files from the working tree and from the index. -func (w *Worktree) Remove(path string) (plumbing.Hash, error) { - // TODO(mcuadros): remove plumbing.Hash from signature at v5. - idx, err := w.r.Storer.Index() - if err != nil { - return plumbing.ZeroHash, err - } - - var h plumbing.Hash - - fi, err := w.Filesystem.Lstat(path) - if err != nil || !fi.IsDir() { - h, err = w.doRemoveFile(idx, path) - } else { - _, err = w.doRemoveDirectory(idx, path) - } - if err != nil { - return h, err - } - - return h, w.r.Storer.SetIndex(idx) -} - -func (w *Worktree) doRemoveDirectory(idx *index.Index, directory string) (removed bool, err error) { - files, err := w.Filesystem.ReadDir(directory) - if err != nil { - return false, err - } - - for _, file := range files { - name := path.Join(directory, file.Name()) - - var r bool - if file.IsDir() { - r, err = w.doRemoveDirectory(idx, name) - } else { - _, err = w.doRemoveFile(idx, name) - if err == index.ErrEntryNotFound { - err = nil - } - } - - if err != nil { - return - } - - if !removed && r { - removed = true - } - } - - err = w.removeEmptyDirectory(directory) - return -} - -func (w *Worktree) removeEmptyDirectory(path string) error { - files, err := w.Filesystem.ReadDir(path) - if err != nil { - return err - } - - if len(files) != 0 { - return nil - } - - return w.Filesystem.Remove(path) -} - -func (w *Worktree) doRemoveFile(idx *index.Index, path string) (plumbing.Hash, error) { - hash, err := w.deleteFromIndex(idx, path) - if err != nil { - return plumbing.ZeroHash, err - } - - return hash, w.deleteFromFilesystem(path) -} - -func (w *Worktree) deleteFromIndex(idx *index.Index, path string) (plumbing.Hash, error) { - e, err := idx.Remove(path) - if err != nil { - return plumbing.ZeroHash, err - } - - return e.Hash, nil -} - -func (w *Worktree) deleteFromFilesystem(path string) error { - err := w.Filesystem.Remove(path) - if os.IsNotExist(err) { - return nil - } - - return err -} - -// RemoveGlob removes all paths, matching pattern, from the index. If pattern -// matches a directory path, all directory contents are removed from the index -// recursively. -func (w *Worktree) RemoveGlob(pattern string) error { - idx, err := w.r.Storer.Index() - if err != nil { - return err - } - - entries, err := idx.Glob(pattern) - if err != nil { - return err - } - - for _, e := range entries { - file := filepath.FromSlash(e.Name) - if _, err := w.Filesystem.Lstat(file); err != nil && !os.IsNotExist(err) { - return err - } - - if _, err := w.doRemoveFile(idx, file); err != nil { - return err - } - - dir, _ := filepath.Split(file) - if err := w.removeEmptyDirectory(dir); err != nil { - return err - } - } - - return w.r.Storer.SetIndex(idx) -} - -// Move moves or rename a file in the worktree and the index, directories are -// not supported. -func (w *Worktree) Move(from, to string) (plumbing.Hash, error) { - // TODO(mcuadros): support directories and/or implement support for glob - if _, err := w.Filesystem.Lstat(from); err != nil { - return plumbing.ZeroHash, err - } - - if _, err := w.Filesystem.Lstat(to); err == nil { - return plumbing.ZeroHash, ErrDestinationExists - } - - idx, err := w.r.Storer.Index() - if err != nil { - return plumbing.ZeroHash, err - } - - hash, err := w.deleteFromIndex(idx, from) - if err != nil { - return plumbing.ZeroHash, err - } - - if err := w.Filesystem.Rename(from, to); err != nil { - return hash, err - } - - if err := w.addOrUpdateFileToIndex(idx, to, hash); err != nil { - return hash, err - } - - return hash, w.r.Storer.SetIndex(idx) -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/worktree_unix_other.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/worktree_unix_other.go deleted file mode 100644 index f45966be961..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/worktree_unix_other.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build openbsd dragonfly solaris - -package git - -import ( - "syscall" - "time" - - "github.com/go-git/go-git/v5/plumbing/format/index" -) - -func init() { - fillSystemInfo = func(e *index.Entry, sys interface{}) { - if os, ok := sys.(*syscall.Stat_t); ok { - e.CreatedAt = time.Unix(int64(os.Atim.Sec), int64(os.Atim.Nsec)) - e.Dev = uint32(os.Dev) - e.Inode = uint32(os.Ino) - e.GID = os.Gid - e.UID = os.Uid - } - } -} - -func isSymlinkWindowsNonAdmin(err error) bool { - return false -} diff --git a/ui/wasm/vendor/github.com/go-git/go-git/v5/worktree_windows.go b/ui/wasm/vendor/github.com/go-git/go-git/v5/worktree_windows.go deleted file mode 100644 index 1928f9712e9..00000000000 --- a/ui/wasm/vendor/github.com/go-git/go-git/v5/worktree_windows.go +++ /dev/null @@ -1,35 +0,0 @@ -// +build windows - -package git - -import ( - "os" - "syscall" - "time" - - "github.com/go-git/go-git/v5/plumbing/format/index" -) - -func init() { - fillSystemInfo = func(e *index.Entry, sys interface{}) { - if os, ok := sys.(*syscall.Win32FileAttributeData); ok { - seconds := os.CreationTime.Nanoseconds() / 1000000000 - nanoseconds := os.CreationTime.Nanoseconds() - seconds*1000000000 - e.CreatedAt = time.Unix(seconds, nanoseconds) - } - } -} - -func isSymlinkWindowsNonAdmin(err error) bool { - const ERROR_PRIVILEGE_NOT_HELD syscall.Errno = 1314 - - if err != nil { - if errLink, ok := err.(*os.LinkError); ok { - if errNo, ok := errLink.Err.(syscall.Errno); ok { - return errNo == ERROR_PRIVILEGE_NOT_HELD - } - } - } - - return false -} diff --git a/ui/wasm/vendor/github.com/go-logr/logr/LICENSE b/ui/wasm/vendor/github.com/go-logr/logr/LICENSE deleted file mode 100644 index 8dada3edaf5..00000000000 --- a/ui/wasm/vendor/github.com/go-logr/logr/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/ui/wasm/vendor/github.com/go-logr/logr/README.md b/ui/wasm/vendor/github.com/go-logr/logr/README.md deleted file mode 100644 index e9b5520a1c5..00000000000 --- a/ui/wasm/vendor/github.com/go-logr/logr/README.md +++ /dev/null @@ -1,183 +0,0 @@ -# A more minimal logging API for Go - -Before you consider this package, please read [this blog post by the -inimitable Dave Cheney][warning-makes-no-sense]. I really appreciate what -he has to say, and it largely aligns with my own experiences. Too many -choices of levels means inconsistent logs. - -This package offers a purely abstract interface, based on these ideas but with -a few twists. Code can depend on just this interface and have the actual -logging implementation be injected from callers. Ideally only `main()` knows -what logging implementation is being used. - -# Differences from Dave's ideas - -The main differences are: - -1) Dave basically proposes doing away with the notion of a logging API in favor -of `fmt.Printf()`. I disagree, especially when you consider things like output -locations, timestamps, file and line decorations, and structured logging. I -restrict the API to just 2 types of logs: info and error. - -Info logs are things you want to tell the user which are not errors. Error -logs are, well, errors. If your code receives an `error` from a subordinate -function call and is logging that `error` *and not returning it*, use error -logs. - -2) Verbosity-levels on info logs. This gives developers a chance to indicate -arbitrary grades of importance for info logs, without assigning names with -semantic meaning such as "warning", "trace", and "debug". Superficially this -may feel very similar, but the primary difference is the lack of semantics. -Because verbosity is a numerical value, it's safe to assume that an app running -with higher verbosity means more (and less important) logs will be generated. - -This is a BETA grade API. - -There are implementations for the following logging libraries: - -- **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr) -- **k8s.io/klog**: [klogr](https://git.k8s.io/klog/klogr) -- **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr) -- **log** (the Go standard library logger): - [stdr](https://github.com/go-logr/stdr) -- **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr) -- **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend) -- **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr) - -# FAQ - -## Conceptual - -## Why structured logging? - -- **Structured logs are more easily queriable**: Since you've got - key-value pairs, it's much easier to query your structured logs for - particular values by filtering on the contents of a particular key -- - think searching request logs for error codes, Kubernetes reconcilers for - the name and namespace of the reconciled object, etc - -- **Structured logging makes it easier to have cross-referencable logs**: - Similarly to searchability, if you maintain conventions around your - keys, it becomes easy to gather all log lines related to a particular - concept. - -- **Structured logs allow better dimensions of filtering**: if you have - structure to your logs, you've got more precise control over how much - information is logged -- you might choose in a particular configuration - to log certain keys but not others, only log lines where a certain key - matches a certain value, etc, instead of just having v-levels and names - to key off of. - -- **Structured logs better represent structured data**: sometimes, the - data that you want to log is inherently structured (think tuple-link - objects). Structured logs allow you to preserve that structure when - outputting. - -## Why V-levels? - -**V-levels give operators an easy way to control the chattiness of log -operations**. V-levels provide a way for a given package to distinguish -the relative importance or verbosity of a given log message. Then, if -a particular logger or package is logging too many messages, the user -of the package can simply change the v-levels for that library. - -## Why not more named levels, like Warning? - -Read [Dave Cheney's post][warning-makes-no-sense]. Then read [Differences -from Dave's ideas](#differences-from-daves-ideas). - -## Why not allow format strings, too? - -**Format strings negate many of the benefits of structured logs**: - -- They're not easily searchable without resorting to fuzzy searching, - regular expressions, etc - -- They don't store structured data well, since contents are flattened into - a string - -- They're not cross-referencable - -- They don't compress easily, since the message is not constant - -(unless you turn positional parameters into key-value pairs with numerical -keys, at which point you've gotten key-value logging with meaningless -keys) - -## Practical - -## Why key-value pairs, and not a map? - -Key-value pairs are *much* easier to optimize, especially around -allocations. Zap (a structured logger that inspired logr's interface) has -[performance measurements](https://github.com/uber-go/zap#performance) -that show this quite nicely. - -While the interface ends up being a little less obvious, you get -potentially better performance, plus avoid making users type -`map[string]string{}` every time they want to log. - -## What if my V-levels differ between libraries? - -That's fine. Control your V-levels on a per-logger basis, and use the -`WithName` function to pass different loggers to different libraries. - -Generally, you should take care to ensure that you have relatively -consistent V-levels within a given logger, however, as this makes deciding -on what verbosity of logs to request easier. - -## But I *really* want to use a format string! - -That's not actually a question. Assuming your question is "how do -I convert my mental model of logging with format strings to logging with -constant messages": - -1. figure out what the error actually is, as you'd write in a TL;DR style, - and use that as a message - -2. For every place you'd write a format specifier, look to the word before - it, and add that as a key value pair - -For instance, consider the following examples (all taken from spots in the -Kubernetes codebase): - -- `klog.V(4).Infof("Client is returning errors: code %v, error %v", - responseCode, err)` becomes `logger.Error(err, "client returned an - error", "code", responseCode)` - -- `klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v", - seconds, retries, url)` becomes `logger.V(4).Info("got a retry-after - response when requesting url", "attempt", retries, "after - seconds", seconds, "url", url)` - -If you *really* must use a format string, place it as a key value, and -call `fmt.Sprintf` yourself -- for instance, `log.Printf("unable to -reflect over type %T")` becomes `logger.Info("unable to reflect over -type", "type", fmt.Sprintf("%T"))`. In general though, the cases where -this is necessary should be few and far between. - -## How do I choose my V-levels? - -This is basically the only hard constraint: increase V-levels to denote -more verbose or more debug-y logs. - -Otherwise, you can start out with `0` as "you always want to see this", -`1` as "common logging that you might *possibly* want to turn off", and -`10` as "I would like to performance-test your log collection stack". - -Then gradually choose levels in between as you need them, working your way -down from 10 (for debug and trace style logs) and up from 1 (for chattier -info-type logs). - -## How do I choose my keys - -- make your keys human-readable -- constant keys are generally a good idea -- be consistent across your codebase -- keys should naturally match parts of the message string - -While key names are mostly unrestricted (and spaces are acceptable), -it's generally a good idea to stick to printable ascii characters, or at -least match the general character set of your log lines. - -[warning-makes-no-sense]: http://dave.cheney.net/2015/11/05/lets-talk-about-logging diff --git a/ui/wasm/vendor/github.com/go-logr/logr/discard.go b/ui/wasm/vendor/github.com/go-logr/logr/discard.go deleted file mode 100644 index 2bafb13d15c..00000000000 --- a/ui/wasm/vendor/github.com/go-logr/logr/discard.go +++ /dev/null @@ -1,51 +0,0 @@ -/* -Copyright 2020 The logr Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package logr - -// Discard returns a valid Logger that discards all messages logged to it. -// It can be used whenever the caller is not interested in the logs. -func Discard() Logger { - return DiscardLogger{} -} - -// DiscardLogger is a Logger that discards all messages. -type DiscardLogger struct{} - -func (l DiscardLogger) Enabled() bool { - return false -} - -func (l DiscardLogger) Info(msg string, keysAndValues ...interface{}) { -} - -func (l DiscardLogger) Error(err error, msg string, keysAndValues ...interface{}) { -} - -func (l DiscardLogger) V(level int) Logger { - return l -} - -func (l DiscardLogger) WithValues(keysAndValues ...interface{}) Logger { - return l -} - -func (l DiscardLogger) WithName(name string) Logger { - return l -} - -// Verify that it actually implements the interface -var _ Logger = DiscardLogger{} diff --git a/ui/wasm/vendor/github.com/go-logr/logr/logr.go b/ui/wasm/vendor/github.com/go-logr/logr/logr.go deleted file mode 100644 index 842428bd3a3..00000000000 --- a/ui/wasm/vendor/github.com/go-logr/logr/logr.go +++ /dev/null @@ -1,266 +0,0 @@ -/* -Copyright 2019 The logr Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This design derives from Dave Cheney's blog: -// http://dave.cheney.net/2015/11/05/lets-talk-about-logging -// -// This is a BETA grade API. Until there is a significant 2nd implementation, -// I don't really know how it will change. - -// Package logr defines abstract interfaces for logging. Packages can depend on -// these interfaces and callers can implement logging in whatever way is -// appropriate. -// -// Usage -// -// Logging is done using a Logger. Loggers can have name prefixes and named -// values attached, so that all log messages logged with that Logger have some -// base context associated. -// -// The term "key" is used to refer to the name associated with a particular -// value, to disambiguate it from the general Logger name. -// -// For instance, suppose we're trying to reconcile the state of an object, and -// we want to log that we've made some decision. -// -// With the traditional log package, we might write: -// log.Printf("decided to set field foo to value %q for object %s/%s", -// targetValue, object.Namespace, object.Name) -// -// With logr's structured logging, we'd write: -// // elsewhere in the file, set up the logger to log with the prefix of -// // "reconcilers", and the named value target-type=Foo, for extra context. -// log := mainLogger.WithName("reconcilers").WithValues("target-type", "Foo") -// -// // later on... -// log.Info("setting foo on object", "value", targetValue, "object", object) -// -// Depending on our logging implementation, we could then make logging decisions -// based on field values (like only logging such events for objects in a certain -// namespace), or copy the structured information into a structured log store. -// -// For logging errors, Logger has a method called Error. Suppose we wanted to -// log an error while reconciling. With the traditional log package, we might -// write: -// log.Errorf("unable to reconcile object %s/%s: %v", object.Namespace, object.Name, err) -// -// With logr, we'd instead write: -// // assuming the above setup for log -// log.Error(err, "unable to reconcile object", "object", object) -// -// This functions similarly to: -// log.Info("unable to reconcile object", "error", err, "object", object) -// -// However, it ensures that a standard key for the error value ("error") is used -// across all error logging. Furthermore, certain implementations may choose to -// attach additional information (such as stack traces) on calls to Error, so -// it's preferred to use Error to log errors. -// -// Parts of a log line -// -// Each log message from a Logger has four types of context: -// logger name, log verbosity, log message, and the named values. -// -// The Logger name consists of a series of name "segments" added by successive -// calls to WithName. These name segments will be joined in some way by the -// underlying implementation. It is strongly recommended that name segments -// contain simple identifiers (letters, digits, and hyphen), and do not contain -// characters that could muddle the log output or confuse the joining operation -// (e.g. whitespace, commas, periods, slashes, brackets, quotes, etc). -// -// Log verbosity represents how little a log matters. Level zero, the default, -// matters most. Increasing levels matter less and less. Try to avoid lots of -// different verbosity levels, and instead provide useful keys, logger names, -// and log messages for users to filter on. It's illegal to pass a log level -// below zero. -// -// The log message consists of a constant message attached to the log line. -// This should generally be a simple description of what's occurring, and should -// never be a format string. -// -// Variable information can then be attached using named values (key/value -// pairs). Keys are arbitrary strings, while values may be any Go value. -// -// Key Naming Conventions -// -// Keys are not strictly required to conform to any specification or regex, but -// it is recommended that they: -// * be human-readable and meaningful (not auto-generated or simple ordinals) -// * be constant (not dependent on input data) -// * contain only printable characters -// * not contain whitespace or punctuation -// -// These guidelines help ensure that log data is processed properly regardless -// of the log implementation. For example, log implementations will try to -// output JSON data or will store data for later database (e.g. SQL) queries. -// -// While users are generally free to use key names of their choice, it's -// generally best to avoid using the following keys, as they're frequently used -// by implementations: -// -// * `"caller"`: the calling information (file/line) of a particular log line. -// * `"error"`: the underlying error value in the `Error` method. -// * `"level"`: the log level. -// * `"logger"`: the name of the associated logger. -// * `"msg"`: the log message. -// * `"stacktrace"`: the stack trace associated with a particular log line or -// error (often from the `Error` message). -// * `"ts"`: the timestamp for a log line. -// -// Implementations are encouraged to make use of these keys to represent the -// above concepts, when necessary (for example, in a pure-JSON output form, it -// would be necessary to represent at least message and timestamp as ordinary -// named values). -// -// Implementations may choose to give callers access to the underlying -// logging implementation. The recommended pattern for this is: -// // Underlier exposes access to the underlying logging implementation. -// // Since callers only have a logr.Logger, they have to know which -// // implementation is in use, so this interface is less of an abstraction -// // and more of way to test type conversion. -// type Underlier interface { -// GetUnderlying() -// } -package logr - -import ( - "context" -) - -// TODO: consider adding back in format strings if they're really needed -// TODO: consider other bits of zap/zapcore functionality like ObjectMarshaller (for arbitrary objects) -// TODO: consider other bits of glog functionality like Flush, OutputStats - -// Logger represents the ability to log messages, both errors and not. -type Logger interface { - // Enabled tests whether this Logger is enabled. For example, commandline - // flags might be used to set the logging verbosity and disable some info - // logs. - Enabled() bool - - // Info logs a non-error message with the given key/value pairs as context. - // - // The msg argument should be used to add some constant description to - // the log line. The key/value pairs can then be used to add additional - // variable information. The key/value pairs should alternate string - // keys and arbitrary values. - Info(msg string, keysAndValues ...interface{}) - - // Error logs an error, with the given message and key/value pairs as context. - // It functions similarly to calling Info with the "error" named value, but may - // have unique behavior, and should be preferred for logging errors (see the - // package documentations for more information). - // - // The msg field should be used to add context to any underlying error, - // while the err field should be used to attach the actual error that - // triggered this log line, if present. - Error(err error, msg string, keysAndValues ...interface{}) - - // V returns an Logger value for a specific verbosity level, relative to - // this Logger. In other words, V values are additive. V higher verbosity - // level means a log message is less important. It's illegal to pass a log - // level less than zero. - V(level int) Logger - - // WithValues adds some key-value pairs of context to a logger. - // See Info for documentation on how key/value pairs work. - WithValues(keysAndValues ...interface{}) Logger - - // WithName adds a new element to the logger's name. - // Successive calls with WithName continue to append - // suffixes to the logger's name. It's strongly recommended - // that name segments contain only letters, digits, and hyphens - // (see the package documentation for more information). - WithName(name string) Logger -} - -// InfoLogger provides compatibility with code that relies on the v0.1.0 -// interface. -// -// Deprecated: InfoLogger is an artifact of early versions of this API. New -// users should never use it and existing users should use Logger instead. This -// will be removed in a future release. -type InfoLogger = Logger - -type contextKey struct{} - -// FromContext returns a Logger constructed from ctx or nil if no -// logger details are found. -func FromContext(ctx context.Context) Logger { - if v, ok := ctx.Value(contextKey{}).(Logger); ok { - return v - } - - return nil -} - -// FromContextOrDiscard returns a Logger constructed from ctx or a Logger -// that discards all messages if no logger details are found. -func FromContextOrDiscard(ctx context.Context) Logger { - if v, ok := ctx.Value(contextKey{}).(Logger); ok { - return v - } - - return Discard() -} - -// NewContext returns a new context derived from ctx that embeds the Logger. -func NewContext(ctx context.Context, l Logger) context.Context { - return context.WithValue(ctx, contextKey{}, l) -} - -// CallDepthLogger represents a Logger that knows how to climb the call stack -// to identify the original call site and can offset the depth by a specified -// number of frames. This is useful for users who have helper functions -// between the "real" call site and the actual calls to Logger methods. -// Implementations that log information about the call site (such as file, -// function, or line) would otherwise log information about the intermediate -// helper functions. -// -// This is an optional interface and implementations are not required to -// support it. -type CallDepthLogger interface { - Logger - - // WithCallDepth returns a Logger that will offset the call stack by the - // specified number of frames when logging call site information. If depth - // is 0 the attribution should be to the direct caller of this method. If - // depth is 1 the attribution should skip 1 call frame, and so on. - // Successive calls to this are additive. - WithCallDepth(depth int) Logger -} - -// WithCallDepth returns a Logger that will offset the call stack by the -// specified number of frames when logging call site information, if possible. -// This is useful for users who have helper functions between the "real" call -// site and the actual calls to Logger methods. If depth is 0 the attribution -// should be to the direct caller of this function. If depth is 1 the -// attribution should skip 1 call frame, and so on. Successive calls to this -// are additive. -// -// If the underlying log implementation supports the CallDepthLogger interface, -// the WithCallDepth method will be called and the result returned. If the -// implementation does not support CallDepthLogger, the original Logger will be -// returned. -// -// Callers which care about whether this was supported or not should test for -// CallDepthLogger support themselves. -func WithCallDepth(logger Logger, depth int) Logger { - if decorator, ok := logger.(CallDepthLogger); ok { - return decorator.WithCallDepth(depth) - } - return logger -} diff --git a/ui/wasm/vendor/github.com/gobwas/glob/.gitignore b/ui/wasm/vendor/github.com/gobwas/glob/.gitignore deleted file mode 100644 index b4ae623be55..00000000000 --- a/ui/wasm/vendor/github.com/gobwas/glob/.gitignore +++ /dev/null @@ -1,8 +0,0 @@ -glob.iml -.idea -*.cpu -*.mem -*.test -*.dot -*.png -*.svg diff --git a/ui/wasm/vendor/github.com/gobwas/glob/.travis.yml b/ui/wasm/vendor/github.com/gobwas/glob/.travis.yml deleted file mode 100644 index e8a276826cf..00000000000 --- a/ui/wasm/vendor/github.com/gobwas/glob/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -sudo: false - -language: go - -go: - - 1.5.3 - -script: - - go test -v ./... diff --git a/ui/wasm/vendor/github.com/gobwas/glob/LICENSE b/ui/wasm/vendor/github.com/gobwas/glob/LICENSE deleted file mode 100644 index 9d4735cad9f..00000000000 --- a/ui/wasm/vendor/github.com/gobwas/glob/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Sergey Kamardin - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/ui/wasm/vendor/github.com/gobwas/glob/bench.sh b/ui/wasm/vendor/github.com/gobwas/glob/bench.sh deleted file mode 100644 index 804cf22e646..00000000000 --- a/ui/wasm/vendor/github.com/gobwas/glob/bench.sh +++ /dev/null @@ -1,26 +0,0 @@ -#! /bin/bash - -bench() { - filename="/tmp/$1-$2.bench" - if test -e "${filename}"; - then - echo "Already exists ${filename}" - else - backup=`git rev-parse --abbrev-ref HEAD` - git checkout $1 - echo -n "Creating ${filename}... " - go test ./... -run=NONE -bench=$2 > "${filename}" -benchmem - echo "OK" - git checkout ${backup} - sleep 5 - fi -} - - -to=$1 -current=`git rev-parse --abbrev-ref HEAD` - -bench ${to} $2 -bench ${current} $2 - -benchcmp $3 "/tmp/${to}-$2.bench" "/tmp/${current}-$2.bench" diff --git a/ui/wasm/vendor/github.com/gobwas/glob/compiler/compiler.go b/ui/wasm/vendor/github.com/gobwas/glob/compiler/compiler.go deleted file mode 100644 index 02e7de80a0b..00000000000 --- a/ui/wasm/vendor/github.com/gobwas/glob/compiler/compiler.go +++ /dev/null @@ -1,525 +0,0 @@ -package compiler - -// TODO use constructor with all matchers, and to their structs private -// TODO glue multiple Text nodes (like after QuoteMeta) - -import ( - "fmt" - "reflect" - - "github.com/gobwas/glob/match" - "github.com/gobwas/glob/syntax/ast" - "github.com/gobwas/glob/util/runes" -) - -func optimizeMatcher(matcher match.Matcher) match.Matcher { - switch m := matcher.(type) { - - case match.Any: - if len(m.Separators) == 0 { - return match.NewSuper() - } - - case match.AnyOf: - if len(m.Matchers) == 1 { - return m.Matchers[0] - } - - return m - - case match.List: - if m.Not == false && len(m.List) == 1 { - return match.NewText(string(m.List)) - } - - return m - - case match.BTree: - m.Left = optimizeMatcher(m.Left) - m.Right = optimizeMatcher(m.Right) - - r, ok := m.Value.(match.Text) - if !ok { - return m - } - - var ( - leftNil = m.Left == nil - rightNil = m.Right == nil - ) - if leftNil && rightNil { - return match.NewText(r.Str) - } - - _, leftSuper := m.Left.(match.Super) - lp, leftPrefix := m.Left.(match.Prefix) - la, leftAny := m.Left.(match.Any) - - _, rightSuper := m.Right.(match.Super) - rs, rightSuffix := m.Right.(match.Suffix) - ra, rightAny := m.Right.(match.Any) - - switch { - case leftSuper && rightSuper: - return match.NewContains(r.Str, false) - - case leftSuper && rightNil: - return match.NewSuffix(r.Str) - - case rightSuper && leftNil: - return match.NewPrefix(r.Str) - - case leftNil && rightSuffix: - return match.NewPrefixSuffix(r.Str, rs.Suffix) - - case rightNil && leftPrefix: - return match.NewPrefixSuffix(lp.Prefix, r.Str) - - case rightNil && leftAny: - return match.NewSuffixAny(r.Str, la.Separators) - - case leftNil && rightAny: - return match.NewPrefixAny(r.Str, ra.Separators) - } - - return m - } - - return matcher -} - -func compileMatchers(matchers []match.Matcher) (match.Matcher, error) { - if len(matchers) == 0 { - return nil, fmt.Errorf("compile error: need at least one matcher") - } - if len(matchers) == 1 { - return matchers[0], nil - } - if m := glueMatchers(matchers); m != nil { - return m, nil - } - - idx := -1 - maxLen := -1 - var val match.Matcher - for i, matcher := range matchers { - if l := matcher.Len(); l != -1 && l >= maxLen { - maxLen = l - idx = i - val = matcher - } - } - - if val == nil { // not found matcher with static length - r, err := compileMatchers(matchers[1:]) - if err != nil { - return nil, err - } - return match.NewBTree(matchers[0], nil, r), nil - } - - left := matchers[:idx] - var right []match.Matcher - if len(matchers) > idx+1 { - right = matchers[idx+1:] - } - - var l, r match.Matcher - var err error - if len(left) > 0 { - l, err = compileMatchers(left) - if err != nil { - return nil, err - } - } - - if len(right) > 0 { - r, err = compileMatchers(right) - if err != nil { - return nil, err - } - } - - return match.NewBTree(val, l, r), nil -} - -func glueMatchers(matchers []match.Matcher) match.Matcher { - if m := glueMatchersAsEvery(matchers); m != nil { - return m - } - if m := glueMatchersAsRow(matchers); m != nil { - return m - } - return nil -} - -func glueMatchersAsRow(matchers []match.Matcher) match.Matcher { - if len(matchers) <= 1 { - return nil - } - - var ( - c []match.Matcher - l int - ) - for _, matcher := range matchers { - if ml := matcher.Len(); ml == -1 { - return nil - } else { - c = append(c, matcher) - l += ml - } - } - return match.NewRow(l, c...) -} - -func glueMatchersAsEvery(matchers []match.Matcher) match.Matcher { - if len(matchers) <= 1 { - return nil - } - - var ( - hasAny bool - hasSuper bool - hasSingle bool - min int - separator []rune - ) - - for i, matcher := range matchers { - var sep []rune - - switch m := matcher.(type) { - case match.Super: - sep = []rune{} - hasSuper = true - - case match.Any: - sep = m.Separators - hasAny = true - - case match.Single: - sep = m.Separators - hasSingle = true - min++ - - case match.List: - if !m.Not { - return nil - } - sep = m.List - hasSingle = true - min++ - - default: - return nil - } - - // initialize - if i == 0 { - separator = sep - } - - if runes.Equal(sep, separator) { - continue - } - - return nil - } - - if hasSuper && !hasAny && !hasSingle { - return match.NewSuper() - } - - if hasAny && !hasSuper && !hasSingle { - return match.NewAny(separator) - } - - if (hasAny || hasSuper) && min > 0 && len(separator) == 0 { - return match.NewMin(min) - } - - every := match.NewEveryOf() - - if min > 0 { - every.Add(match.NewMin(min)) - - if !hasAny && !hasSuper { - every.Add(match.NewMax(min)) - } - } - - if len(separator) > 0 { - every.Add(match.NewContains(string(separator), true)) - } - - return every -} - -func minimizeMatchers(matchers []match.Matcher) []match.Matcher { - var done match.Matcher - var left, right, count int - - for l := 0; l < len(matchers); l++ { - for r := len(matchers); r > l; r-- { - if glued := glueMatchers(matchers[l:r]); glued != nil { - var swap bool - - if done == nil { - swap = true - } else { - cl, gl := done.Len(), glued.Len() - swap = cl > -1 && gl > -1 && gl > cl - swap = swap || count < r-l - } - - if swap { - done = glued - left = l - right = r - count = r - l - } - } - } - } - - if done == nil { - return matchers - } - - next := append(append([]match.Matcher{}, matchers[:left]...), done) - if right < len(matchers) { - next = append(next, matchers[right:]...) - } - - if len(next) == len(matchers) { - return next - } - - return minimizeMatchers(next) -} - -// minimizeAnyOf tries to apply some heuristics to minimize number of nodes in given tree -func minimizeTree(tree *ast.Node) *ast.Node { - switch tree.Kind { - case ast.KindAnyOf: - return minimizeTreeAnyOf(tree) - default: - return nil - } -} - -// minimizeAnyOf tries to find common children of given node of AnyOf pattern -// it searches for common children from left and from right -// if any common children are found – then it returns new optimized ast tree -// else it returns nil -func minimizeTreeAnyOf(tree *ast.Node) *ast.Node { - if !areOfSameKind(tree.Children, ast.KindPattern) { - return nil - } - - commonLeft, commonRight := commonChildren(tree.Children) - commonLeftCount, commonRightCount := len(commonLeft), len(commonRight) - if commonLeftCount == 0 && commonRightCount == 0 { // there are no common parts - return nil - } - - var result []*ast.Node - if commonLeftCount > 0 { - result = append(result, ast.NewNode(ast.KindPattern, nil, commonLeft...)) - } - - var anyOf []*ast.Node - for _, child := range tree.Children { - reuse := child.Children[commonLeftCount : len(child.Children)-commonRightCount] - var node *ast.Node - if len(reuse) == 0 { - // this pattern is completely reduced by commonLeft and commonRight patterns - // so it become nothing - node = ast.NewNode(ast.KindNothing, nil) - } else { - node = ast.NewNode(ast.KindPattern, nil, reuse...) - } - anyOf = appendIfUnique(anyOf, node) - } - switch { - case len(anyOf) == 1 && anyOf[0].Kind != ast.KindNothing: - result = append(result, anyOf[0]) - case len(anyOf) > 1: - result = append(result, ast.NewNode(ast.KindAnyOf, nil, anyOf...)) - } - - if commonRightCount > 0 { - result = append(result, ast.NewNode(ast.KindPattern, nil, commonRight...)) - } - - return ast.NewNode(ast.KindPattern, nil, result...) -} - -func commonChildren(nodes []*ast.Node) (commonLeft, commonRight []*ast.Node) { - if len(nodes) <= 1 { - return - } - - // find node that has least number of children - idx := leastChildren(nodes) - if idx == -1 { - return - } - tree := nodes[idx] - treeLength := len(tree.Children) - - // allocate max able size for rightCommon slice - // to get ability insert elements in reverse order (from end to start) - // without sorting - commonRight = make([]*ast.Node, treeLength) - lastRight := treeLength // will use this to get results as commonRight[lastRight:] - - var ( - breakLeft bool - breakRight bool - commonTotal int - ) - for i, j := 0, treeLength-1; commonTotal < treeLength && j >= 0 && !(breakLeft && breakRight); i, j = i+1, j-1 { - treeLeft := tree.Children[i] - treeRight := tree.Children[j] - - for k := 0; k < len(nodes) && !(breakLeft && breakRight); k++ { - // skip least children node - if k == idx { - continue - } - - restLeft := nodes[k].Children[i] - restRight := nodes[k].Children[j+len(nodes[k].Children)-treeLength] - - breakLeft = breakLeft || !treeLeft.Equal(restLeft) - - // disable searching for right common parts, if left part is already overlapping - breakRight = breakRight || (!breakLeft && j <= i) - breakRight = breakRight || !treeRight.Equal(restRight) - } - - if !breakLeft { - commonTotal++ - commonLeft = append(commonLeft, treeLeft) - } - if !breakRight { - commonTotal++ - lastRight = j - commonRight[j] = treeRight - } - } - - commonRight = commonRight[lastRight:] - - return -} - -func appendIfUnique(target []*ast.Node, val *ast.Node) []*ast.Node { - for _, n := range target { - if reflect.DeepEqual(n, val) { - return target - } - } - return append(target, val) -} - -func areOfSameKind(nodes []*ast.Node, kind ast.Kind) bool { - for _, n := range nodes { - if n.Kind != kind { - return false - } - } - return true -} - -func leastChildren(nodes []*ast.Node) int { - min := -1 - idx := -1 - for i, n := range nodes { - if idx == -1 || (len(n.Children) < min) { - min = len(n.Children) - idx = i - } - } - return idx -} - -func compileTreeChildren(tree *ast.Node, sep []rune) ([]match.Matcher, error) { - var matchers []match.Matcher - for _, desc := range tree.Children { - m, err := compile(desc, sep) - if err != nil { - return nil, err - } - matchers = append(matchers, optimizeMatcher(m)) - } - return matchers, nil -} - -func compile(tree *ast.Node, sep []rune) (m match.Matcher, err error) { - switch tree.Kind { - case ast.KindAnyOf: - // todo this could be faster on pattern_alternatives_combine_lite (see glob_test.go) - if n := minimizeTree(tree); n != nil { - return compile(n, sep) - } - matchers, err := compileTreeChildren(tree, sep) - if err != nil { - return nil, err - } - return match.NewAnyOf(matchers...), nil - - case ast.KindPattern: - if len(tree.Children) == 0 { - return match.NewNothing(), nil - } - matchers, err := compileTreeChildren(tree, sep) - if err != nil { - return nil, err - } - m, err = compileMatchers(minimizeMatchers(matchers)) - if err != nil { - return nil, err - } - - case ast.KindAny: - m = match.NewAny(sep) - - case ast.KindSuper: - m = match.NewSuper() - - case ast.KindSingle: - m = match.NewSingle(sep) - - case ast.KindNothing: - m = match.NewNothing() - - case ast.KindList: - l := tree.Value.(ast.List) - m = match.NewList([]rune(l.Chars), l.Not) - - case ast.KindRange: - r := tree.Value.(ast.Range) - m = match.NewRange(r.Lo, r.Hi, r.Not) - - case ast.KindText: - t := tree.Value.(ast.Text) - m = match.NewText(t.Text) - - default: - return nil, fmt.Errorf("could not compile tree: unknown node type") - } - - return optimizeMatcher(m), nil -} - -func Compile(tree *ast.Node, sep []rune) (match.Matcher, error) { - m, err := compile(tree, sep) - if err != nil { - return nil, err - } - - return m, nil -} diff --git a/ui/wasm/vendor/github.com/gobwas/glob/glob.go b/ui/wasm/vendor/github.com/gobwas/glob/glob.go deleted file mode 100644 index 2afde343af8..00000000000 --- a/ui/wasm/vendor/github.com/gobwas/glob/glob.go +++ /dev/null @@ -1,80 +0,0 @@ -package glob - -import ( - "github.com/gobwas/glob/compiler" - "github.com/gobwas/glob/syntax" -) - -// Glob represents compiled glob pattern. -type Glob interface { - Match(string) bool -} - -// Compile creates Glob for given pattern and strings (if any present after pattern) as separators. -// The pattern syntax is: -// -// pattern: -// { term } -// -// term: -// `*` matches any sequence of non-separator characters -// `**` matches any sequence of characters -// `?` matches any single non-separator character -// `[` [ `!` ] { character-range } `]` -// character class (must be non-empty) -// `{` pattern-list `}` -// pattern alternatives -// c matches character c (c != `*`, `**`, `?`, `\`, `[`, `{`, `}`) -// `\` c matches character c -// -// character-range: -// c matches character c (c != `\\`, `-`, `]`) -// `\` c matches character c -// lo `-` hi matches character c for lo <= c <= hi -// -// pattern-list: -// pattern { `,` pattern } -// comma-separated (without spaces) patterns -// -func Compile(pattern string, separators ...rune) (Glob, error) { - ast, err := syntax.Parse(pattern) - if err != nil { - return nil, err - } - - matcher, err := compiler.Compile(ast, separators) - if err != nil { - return nil, err - } - - return matcher, nil -} - -// MustCompile is the same as Compile, except that if Compile returns error, this will panic -func MustCompile(pattern string, separators ...rune) Glob { - g, err := Compile(pattern, separators...) - if err != nil { - panic(err) - } - - return g -} - -// QuoteMeta returns a string that quotes all glob pattern meta characters -// inside the argument text; For example, QuoteMeta(`{foo*}`) returns `\[foo\*\]`. -func QuoteMeta(s string) string { - b := make([]byte, 2*len(s)) - - // a byte loop is correct because all meta characters are ASCII - j := 0 - for i := 0; i < len(s); i++ { - if syntax.Special(s[i]) { - b[j] = '\\' - j++ - } - b[j] = s[i] - j++ - } - - return string(b[0:j]) -} diff --git a/ui/wasm/vendor/github.com/gobwas/glob/match/any.go b/ui/wasm/vendor/github.com/gobwas/glob/match/any.go deleted file mode 100644 index 514a9a5c450..00000000000 --- a/ui/wasm/vendor/github.com/gobwas/glob/match/any.go +++ /dev/null @@ -1,45 +0,0 @@ -package match - -import ( - "fmt" - "github.com/gobwas/glob/util/strings" -) - -type Any struct { - Separators []rune -} - -func NewAny(s []rune) Any { - return Any{s} -} - -func (self Any) Match(s string) bool { - return strings.IndexAnyRunes(s, self.Separators) == -1 -} - -func (self Any) Index(s string) (int, []int) { - found := strings.IndexAnyRunes(s, self.Separators) - switch found { - case -1: - case 0: - return 0, segments0 - default: - s = s[:found] - } - - segments := acquireSegments(len(s)) - for i := range s { - segments = append(segments, i) - } - segments = append(segments, len(s)) - - return 0, segments -} - -func (self Any) Len() int { - return lenNo -} - -func (self Any) String() string { - return fmt.Sprintf("", string(self.Separators)) -} diff --git a/ui/wasm/vendor/github.com/gobwas/glob/match/any_of.go b/ui/wasm/vendor/github.com/gobwas/glob/match/any_of.go deleted file mode 100644 index 8e65356cdc9..00000000000 --- a/ui/wasm/vendor/github.com/gobwas/glob/match/any_of.go +++ /dev/null @@ -1,82 +0,0 @@ -package match - -import "fmt" - -type AnyOf struct { - Matchers Matchers -} - -func NewAnyOf(m ...Matcher) AnyOf { - return AnyOf{Matchers(m)} -} - -func (self *AnyOf) Add(m Matcher) error { - self.Matchers = append(self.Matchers, m) - return nil -} - -func (self AnyOf) Match(s string) bool { - for _, m := range self.Matchers { - if m.Match(s) { - return true - } - } - - return false -} - -func (self AnyOf) Index(s string) (int, []int) { - index := -1 - - segments := acquireSegments(len(s)) - for _, m := range self.Matchers { - idx, seg := m.Index(s) - if idx == -1 { - continue - } - - if index == -1 || idx < index { - index = idx - segments = append(segments[:0], seg...) - continue - } - - if idx > index { - continue - } - - // here idx == index - segments = appendMerge(segments, seg) - } - - if index == -1 { - releaseSegments(segments) - return -1, nil - } - - return index, segments -} - -func (self AnyOf) Len() (l int) { - l = -1 - for _, m := range self.Matchers { - ml := m.Len() - switch { - case l == -1: - l = ml - continue - - case ml == -1: - return -1 - - case l != ml: - return -1 - } - } - - return -} - -func (self AnyOf) String() string { - return fmt.Sprintf("", self.Matchers) -} diff --git a/ui/wasm/vendor/github.com/gobwas/glob/match/btree.go b/ui/wasm/vendor/github.com/gobwas/glob/match/btree.go deleted file mode 100644 index a8130e93eae..00000000000 --- a/ui/wasm/vendor/github.com/gobwas/glob/match/btree.go +++ /dev/null @@ -1,146 +0,0 @@ -package match - -import ( - "fmt" - "unicode/utf8" -) - -type BTree struct { - Value Matcher - Left Matcher - Right Matcher - ValueLengthRunes int - LeftLengthRunes int - RightLengthRunes int - LengthRunes int -} - -func NewBTree(Value, Left, Right Matcher) (tree BTree) { - tree.Value = Value - tree.Left = Left - tree.Right = Right - - lenOk := true - if tree.ValueLengthRunes = Value.Len(); tree.ValueLengthRunes == -1 { - lenOk = false - } - - if Left != nil { - if tree.LeftLengthRunes = Left.Len(); tree.LeftLengthRunes == -1 { - lenOk = false - } - } - - if Right != nil { - if tree.RightLengthRunes = Right.Len(); tree.RightLengthRunes == -1 { - lenOk = false - } - } - - if lenOk { - tree.LengthRunes = tree.LeftLengthRunes + tree.ValueLengthRunes + tree.RightLengthRunes - } else { - tree.LengthRunes = -1 - } - - return tree -} - -func (self BTree) Len() int { - return self.LengthRunes -} - -// todo? -func (self BTree) Index(s string) (int, []int) { - return -1, nil -} - -func (self BTree) Match(s string) bool { - inputLen := len(s) - - // self.Length, self.RLen and self.LLen are values meaning the length of runes for each part - // here we manipulating byte length for better optimizations - // but these checks still works, cause minLen of 1-rune string is 1 byte. - if self.LengthRunes != -1 && self.LengthRunes > inputLen { - return false - } - - // try to cut unnecessary parts - // by knowledge of length of right and left part - var offset, limit int - if self.LeftLengthRunes >= 0 { - offset = self.LeftLengthRunes - } - if self.RightLengthRunes >= 0 { - limit = inputLen - self.RightLengthRunes - } else { - limit = inputLen - } - - for offset < limit { - // search for matching part in substring - index, segments := self.Value.Index(s[offset:limit]) - if index == -1 { - releaseSegments(segments) - return false - } - - l := s[:offset+index] - var left bool - if self.Left != nil { - left = self.Left.Match(l) - } else { - left = l == "" - } - - if left { - for i := len(segments) - 1; i >= 0; i-- { - length := segments[i] - - var right bool - var r string - // if there is no string for the right branch - if inputLen <= offset+index+length { - r = "" - } else { - r = s[offset+index+length:] - } - - if self.Right != nil { - right = self.Right.Match(r) - } else { - right = r == "" - } - - if right { - releaseSegments(segments) - return true - } - } - } - - _, step := utf8.DecodeRuneInString(s[offset+index:]) - offset += index + step - - releaseSegments(segments) - } - - return false -} - -func (self BTree) String() string { - const n string = "" - var l, r string - if self.Left == nil { - l = n - } else { - l = self.Left.String() - } - if self.Right == nil { - r = n - } else { - r = self.Right.String() - } - - return fmt.Sprintf("%s]>", l, self.Value, r) -} diff --git a/ui/wasm/vendor/github.com/gobwas/glob/match/contains.go b/ui/wasm/vendor/github.com/gobwas/glob/match/contains.go deleted file mode 100644 index 0998e95b0ea..00000000000 --- a/ui/wasm/vendor/github.com/gobwas/glob/match/contains.go +++ /dev/null @@ -1,58 +0,0 @@ -package match - -import ( - "fmt" - "strings" -) - -type Contains struct { - Needle string - Not bool -} - -func NewContains(needle string, not bool) Contains { - return Contains{needle, not} -} - -func (self Contains) Match(s string) bool { - return strings.Contains(s, self.Needle) != self.Not -} - -func (self Contains) Index(s string) (int, []int) { - var offset int - - idx := strings.Index(s, self.Needle) - - if !self.Not { - if idx == -1 { - return -1, nil - } - - offset = idx + len(self.Needle) - if len(s) <= offset { - return 0, []int{offset} - } - s = s[offset:] - } else if idx != -1 { - s = s[:idx] - } - - segments := acquireSegments(len(s) + 1) - for i := range s { - segments = append(segments, offset+i) - } - - return 0, append(segments, offset+len(s)) -} - -func (self Contains) Len() int { - return lenNo -} - -func (self Contains) String() string { - var not string - if self.Not { - not = "!" - } - return fmt.Sprintf("", not, self.Needle) -} diff --git a/ui/wasm/vendor/github.com/gobwas/glob/match/every_of.go b/ui/wasm/vendor/github.com/gobwas/glob/match/every_of.go deleted file mode 100644 index 7c968ee368b..00000000000 --- a/ui/wasm/vendor/github.com/gobwas/glob/match/every_of.go +++ /dev/null @@ -1,99 +0,0 @@ -package match - -import ( - "fmt" -) - -type EveryOf struct { - Matchers Matchers -} - -func NewEveryOf(m ...Matcher) EveryOf { - return EveryOf{Matchers(m)} -} - -func (self *EveryOf) Add(m Matcher) error { - self.Matchers = append(self.Matchers, m) - return nil -} - -func (self EveryOf) Len() (l int) { - for _, m := range self.Matchers { - if ml := m.Len(); l > 0 { - l += ml - } else { - return -1 - } - } - - return -} - -func (self EveryOf) Index(s string) (int, []int) { - var index int - var offset int - - // make `in` with cap as len(s), - // cause it is the maximum size of output segments values - next := acquireSegments(len(s)) - current := acquireSegments(len(s)) - - sub := s - for i, m := range self.Matchers { - idx, seg := m.Index(sub) - if idx == -1 { - releaseSegments(next) - releaseSegments(current) - return -1, nil - } - - if i == 0 { - // we use copy here instead of `current = seg` - // cause seg is a slice from reusable buffer `in` - // and it could be overwritten in next iteration - current = append(current, seg...) - } else { - // clear the next - next = next[:0] - - delta := index - (idx + offset) - for _, ex := range current { - for _, n := range seg { - if ex+delta == n { - next = append(next, n) - } - } - } - - if len(next) == 0 { - releaseSegments(next) - releaseSegments(current) - return -1, nil - } - - current = append(current[:0], next...) - } - - index = idx + offset - sub = s[index:] - offset += idx - } - - releaseSegments(next) - - return index, current -} - -func (self EveryOf) Match(s string) bool { - for _, m := range self.Matchers { - if !m.Match(s) { - return false - } - } - - return true -} - -func (self EveryOf) String() string { - return fmt.Sprintf("", self.Matchers) -} diff --git a/ui/wasm/vendor/github.com/gobwas/glob/match/list.go b/ui/wasm/vendor/github.com/gobwas/glob/match/list.go deleted file mode 100644 index 7fd763ecd8e..00000000000 --- a/ui/wasm/vendor/github.com/gobwas/glob/match/list.go +++ /dev/null @@ -1,49 +0,0 @@ -package match - -import ( - "fmt" - "github.com/gobwas/glob/util/runes" - "unicode/utf8" -) - -type List struct { - List []rune - Not bool -} - -func NewList(list []rune, not bool) List { - return List{list, not} -} - -func (self List) Match(s string) bool { - r, w := utf8.DecodeRuneInString(s) - if len(s) > w { - return false - } - - inList := runes.IndexRune(self.List, r) != -1 - return inList == !self.Not -} - -func (self List) Len() int { - return lenOne -} - -func (self List) Index(s string) (int, []int) { - for i, r := range s { - if self.Not == (runes.IndexRune(self.List, r) == -1) { - return i, segmentsByRuneLength[utf8.RuneLen(r)] - } - } - - return -1, nil -} - -func (self List) String() string { - var not string - if self.Not { - not = "!" - } - - return fmt.Sprintf("", not, string(self.List)) -} diff --git a/ui/wasm/vendor/github.com/gobwas/glob/match/match.go b/ui/wasm/vendor/github.com/gobwas/glob/match/match.go deleted file mode 100644 index f80e007fb83..00000000000 --- a/ui/wasm/vendor/github.com/gobwas/glob/match/match.go +++ /dev/null @@ -1,81 +0,0 @@ -package match - -// todo common table of rune's length - -import ( - "fmt" - "strings" -) - -const lenOne = 1 -const lenZero = 0 -const lenNo = -1 - -type Matcher interface { - Match(string) bool - Index(string) (int, []int) - Len() int - String() string -} - -type Matchers []Matcher - -func (m Matchers) String() string { - var s []string - for _, matcher := range m { - s = append(s, fmt.Sprint(matcher)) - } - - return fmt.Sprintf("%s", strings.Join(s, ",")) -} - -// appendMerge merges and sorts given already SORTED and UNIQUE segments. -func appendMerge(target, sub []int) []int { - lt, ls := len(target), len(sub) - out := make([]int, 0, lt+ls) - - for x, y := 0, 0; x < lt || y < ls; { - if x >= lt { - out = append(out, sub[y:]...) - break - } - - if y >= ls { - out = append(out, target[x:]...) - break - } - - xValue := target[x] - yValue := sub[y] - - switch { - - case xValue == yValue: - out = append(out, xValue) - x++ - y++ - - case xValue < yValue: - out = append(out, xValue) - x++ - - case yValue < xValue: - out = append(out, yValue) - y++ - - } - } - - target = append(target[:0], out...) - - return target -} - -func reverseSegments(input []int) { - l := len(input) - m := l / 2 - - for i := 0; i < m; i++ { - input[i], input[l-i-1] = input[l-i-1], input[i] - } -} diff --git a/ui/wasm/vendor/github.com/gobwas/glob/match/max.go b/ui/wasm/vendor/github.com/gobwas/glob/match/max.go deleted file mode 100644 index d72f69efff7..00000000000 --- a/ui/wasm/vendor/github.com/gobwas/glob/match/max.go +++ /dev/null @@ -1,49 +0,0 @@ -package match - -import ( - "fmt" - "unicode/utf8" -) - -type Max struct { - Limit int -} - -func NewMax(l int) Max { - return Max{l} -} - -func (self Max) Match(s string) bool { - var l int - for range s { - l += 1 - if l > self.Limit { - return false - } - } - - return true -} - -func (self Max) Index(s string) (int, []int) { - segments := acquireSegments(self.Limit + 1) - segments = append(segments, 0) - var count int - for i, r := range s { - count++ - if count > self.Limit { - break - } - segments = append(segments, i+utf8.RuneLen(r)) - } - - return 0, segments -} - -func (self Max) Len() int { - return lenNo -} - -func (self Max) String() string { - return fmt.Sprintf("", self.Limit) -} diff --git a/ui/wasm/vendor/github.com/gobwas/glob/match/min.go b/ui/wasm/vendor/github.com/gobwas/glob/match/min.go deleted file mode 100644 index db57ac8eb49..00000000000 --- a/ui/wasm/vendor/github.com/gobwas/glob/match/min.go +++ /dev/null @@ -1,57 +0,0 @@ -package match - -import ( - "fmt" - "unicode/utf8" -) - -type Min struct { - Limit int -} - -func NewMin(l int) Min { - return Min{l} -} - -func (self Min) Match(s string) bool { - var l int - for range s { - l += 1 - if l >= self.Limit { - return true - } - } - - return false -} - -func (self Min) Index(s string) (int, []int) { - var count int - - c := len(s) - self.Limit + 1 - if c <= 0 { - return -1, nil - } - - segments := acquireSegments(c) - for i, r := range s { - count++ - if count >= self.Limit { - segments = append(segments, i+utf8.RuneLen(r)) - } - } - - if len(segments) == 0 { - return -1, nil - } - - return 0, segments -} - -func (self Min) Len() int { - return lenNo -} - -func (self Min) String() string { - return fmt.Sprintf("", self.Limit) -} diff --git a/ui/wasm/vendor/github.com/gobwas/glob/match/nothing.go b/ui/wasm/vendor/github.com/gobwas/glob/match/nothing.go deleted file mode 100644 index 0d4ecd36b80..00000000000 --- a/ui/wasm/vendor/github.com/gobwas/glob/match/nothing.go +++ /dev/null @@ -1,27 +0,0 @@ -package match - -import ( - "fmt" -) - -type Nothing struct{} - -func NewNothing() Nothing { - return Nothing{} -} - -func (self Nothing) Match(s string) bool { - return len(s) == 0 -} - -func (self Nothing) Index(s string) (int, []int) { - return 0, segments0 -} - -func (self Nothing) Len() int { - return lenZero -} - -func (self Nothing) String() string { - return fmt.Sprintf("") -} diff --git a/ui/wasm/vendor/github.com/gobwas/glob/match/prefix.go b/ui/wasm/vendor/github.com/gobwas/glob/match/prefix.go deleted file mode 100644 index a7347250e8d..00000000000 --- a/ui/wasm/vendor/github.com/gobwas/glob/match/prefix.go +++ /dev/null @@ -1,50 +0,0 @@ -package match - -import ( - "fmt" - "strings" - "unicode/utf8" -) - -type Prefix struct { - Prefix string -} - -func NewPrefix(p string) Prefix { - return Prefix{p} -} - -func (self Prefix) Index(s string) (int, []int) { - idx := strings.Index(s, self.Prefix) - if idx == -1 { - return -1, nil - } - - length := len(self.Prefix) - var sub string - if len(s) > idx+length { - sub = s[idx+length:] - } else { - sub = "" - } - - segments := acquireSegments(len(sub) + 1) - segments = append(segments, length) - for i, r := range sub { - segments = append(segments, length+i+utf8.RuneLen(r)) - } - - return idx, segments -} - -func (self Prefix) Len() int { - return lenNo -} - -func (self Prefix) Match(s string) bool { - return strings.HasPrefix(s, self.Prefix) -} - -func (self Prefix) String() string { - return fmt.Sprintf("", self.Prefix) -} diff --git a/ui/wasm/vendor/github.com/gobwas/glob/match/prefix_any.go b/ui/wasm/vendor/github.com/gobwas/glob/match/prefix_any.go deleted file mode 100644 index 8ee58fe1b3c..00000000000 --- a/ui/wasm/vendor/github.com/gobwas/glob/match/prefix_any.go +++ /dev/null @@ -1,55 +0,0 @@ -package match - -import ( - "fmt" - "strings" - "unicode/utf8" - - sutil "github.com/gobwas/glob/util/strings" -) - -type PrefixAny struct { - Prefix string - Separators []rune -} - -func NewPrefixAny(s string, sep []rune) PrefixAny { - return PrefixAny{s, sep} -} - -func (self PrefixAny) Index(s string) (int, []int) { - idx := strings.Index(s, self.Prefix) - if idx == -1 { - return -1, nil - } - - n := len(self.Prefix) - sub := s[idx+n:] - i := sutil.IndexAnyRunes(sub, self.Separators) - if i > -1 { - sub = sub[:i] - } - - seg := acquireSegments(len(sub) + 1) - seg = append(seg, n) - for i, r := range sub { - seg = append(seg, n+i+utf8.RuneLen(r)) - } - - return idx, seg -} - -func (self PrefixAny) Len() int { - return lenNo -} - -func (self PrefixAny) Match(s string) bool { - if !strings.HasPrefix(s, self.Prefix) { - return false - } - return sutil.IndexAnyRunes(s[len(self.Prefix):], self.Separators) == -1 -} - -func (self PrefixAny) String() string { - return fmt.Sprintf("", self.Prefix, string(self.Separators)) -} diff --git a/ui/wasm/vendor/github.com/gobwas/glob/match/prefix_suffix.go b/ui/wasm/vendor/github.com/gobwas/glob/match/prefix_suffix.go deleted file mode 100644 index 8208085a199..00000000000 --- a/ui/wasm/vendor/github.com/gobwas/glob/match/prefix_suffix.go +++ /dev/null @@ -1,62 +0,0 @@ -package match - -import ( - "fmt" - "strings" -) - -type PrefixSuffix struct { - Prefix, Suffix string -} - -func NewPrefixSuffix(p, s string) PrefixSuffix { - return PrefixSuffix{p, s} -} - -func (self PrefixSuffix) Index(s string) (int, []int) { - prefixIdx := strings.Index(s, self.Prefix) - if prefixIdx == -1 { - return -1, nil - } - - suffixLen := len(self.Suffix) - if suffixLen <= 0 { - return prefixIdx, []int{len(s) - prefixIdx} - } - - if (len(s) - prefixIdx) <= 0 { - return -1, nil - } - - segments := acquireSegments(len(s) - prefixIdx) - for sub := s[prefixIdx:]; ; { - suffixIdx := strings.LastIndex(sub, self.Suffix) - if suffixIdx == -1 { - break - } - - segments = append(segments, suffixIdx+suffixLen) - sub = sub[:suffixIdx] - } - - if len(segments) == 0 { - releaseSegments(segments) - return -1, nil - } - - reverseSegments(segments) - - return prefixIdx, segments -} - -func (self PrefixSuffix) Len() int { - return lenNo -} - -func (self PrefixSuffix) Match(s string) bool { - return strings.HasPrefix(s, self.Prefix) && strings.HasSuffix(s, self.Suffix) -} - -func (self PrefixSuffix) String() string { - return fmt.Sprintf("", self.Prefix, self.Suffix) -} diff --git a/ui/wasm/vendor/github.com/gobwas/glob/match/range.go b/ui/wasm/vendor/github.com/gobwas/glob/match/range.go deleted file mode 100644 index ce30245a40b..00000000000 --- a/ui/wasm/vendor/github.com/gobwas/glob/match/range.go +++ /dev/null @@ -1,48 +0,0 @@ -package match - -import ( - "fmt" - "unicode/utf8" -) - -type Range struct { - Lo, Hi rune - Not bool -} - -func NewRange(lo, hi rune, not bool) Range { - return Range{lo, hi, not} -} - -func (self Range) Len() int { - return lenOne -} - -func (self Range) Match(s string) bool { - r, w := utf8.DecodeRuneInString(s) - if len(s) > w { - return false - } - - inRange := r >= self.Lo && r <= self.Hi - - return inRange == !self.Not -} - -func (self Range) Index(s string) (int, []int) { - for i, r := range s { - if self.Not != (r >= self.Lo && r <= self.Hi) { - return i, segmentsByRuneLength[utf8.RuneLen(r)] - } - } - - return -1, nil -} - -func (self Range) String() string { - var not string - if self.Not { - not = "!" - } - return fmt.Sprintf("", not, string(self.Lo), string(self.Hi)) -} diff --git a/ui/wasm/vendor/github.com/gobwas/glob/match/row.go b/ui/wasm/vendor/github.com/gobwas/glob/match/row.go deleted file mode 100644 index 4379042e42f..00000000000 --- a/ui/wasm/vendor/github.com/gobwas/glob/match/row.go +++ /dev/null @@ -1,77 +0,0 @@ -package match - -import ( - "fmt" -) - -type Row struct { - Matchers Matchers - RunesLength int - Segments []int -} - -func NewRow(len int, m ...Matcher) Row { - return Row{ - Matchers: Matchers(m), - RunesLength: len, - Segments: []int{len}, - } -} - -func (self Row) matchAll(s string) bool { - var idx int - for _, m := range self.Matchers { - length := m.Len() - - var next, i int - for next = range s[idx:] { - i++ - if i == length { - break - } - } - - if i < length || !m.Match(s[idx:idx+next+1]) { - return false - } - - idx += next + 1 - } - - return true -} - -func (self Row) lenOk(s string) bool { - var i int - for range s { - i++ - if i > self.RunesLength { - return false - } - } - return self.RunesLength == i -} - -func (self Row) Match(s string) bool { - return self.lenOk(s) && self.matchAll(s) -} - -func (self Row) Len() (l int) { - return self.RunesLength -} - -func (self Row) Index(s string) (int, []int) { - for i := range s { - if len(s[i:]) < self.RunesLength { - break - } - if self.matchAll(s[i:]) { - return i, self.Segments - } - } - return -1, nil -} - -func (self Row) String() string { - return fmt.Sprintf("", self.RunesLength, self.Matchers) -} diff --git a/ui/wasm/vendor/github.com/gobwas/glob/match/segments.go b/ui/wasm/vendor/github.com/gobwas/glob/match/segments.go deleted file mode 100644 index 9ea6f309439..00000000000 --- a/ui/wasm/vendor/github.com/gobwas/glob/match/segments.go +++ /dev/null @@ -1,91 +0,0 @@ -package match - -import ( - "sync" -) - -type SomePool interface { - Get() []int - Put([]int) -} - -var segmentsPools [1024]sync.Pool - -func toPowerOfTwo(v int) int { - v-- - v |= v >> 1 - v |= v >> 2 - v |= v >> 4 - v |= v >> 8 - v |= v >> 16 - v++ - - return v -} - -const ( - cacheFrom = 16 - cacheToAndHigher = 1024 - cacheFromIndex = 15 - cacheToAndHigherIndex = 1023 -) - -var ( - segments0 = []int{0} - segments1 = []int{1} - segments2 = []int{2} - segments3 = []int{3} - segments4 = []int{4} -) - -var segmentsByRuneLength [5][]int = [5][]int{ - 0: segments0, - 1: segments1, - 2: segments2, - 3: segments3, - 4: segments4, -} - -func init() { - for i := cacheToAndHigher; i >= cacheFrom; i >>= 1 { - func(i int) { - segmentsPools[i-1] = sync.Pool{New: func() interface{} { - return make([]int, 0, i) - }} - }(i) - } -} - -func getTableIndex(c int) int { - p := toPowerOfTwo(c) - switch { - case p >= cacheToAndHigher: - return cacheToAndHigherIndex - case p <= cacheFrom: - return cacheFromIndex - default: - return p - 1 - } -} - -func acquireSegments(c int) []int { - // make []int with less capacity than cacheFrom - // is faster than acquiring it from pool - if c < cacheFrom { - return make([]int, 0, c) - } - - return segmentsPools[getTableIndex(c)].Get().([]int)[:0] -} - -func releaseSegments(s []int) { - c := cap(s) - - // make []int with less capacity than cacheFrom - // is faster than acquiring it from pool - if c < cacheFrom { - return - } - - segmentsPools[getTableIndex(c)].Put(s) -} diff --git a/ui/wasm/vendor/github.com/gobwas/glob/match/single.go b/ui/wasm/vendor/github.com/gobwas/glob/match/single.go deleted file mode 100644 index ee6e3954c1f..00000000000 --- a/ui/wasm/vendor/github.com/gobwas/glob/match/single.go +++ /dev/null @@ -1,43 +0,0 @@ -package match - -import ( - "fmt" - "github.com/gobwas/glob/util/runes" - "unicode/utf8" -) - -// single represents ? -type Single struct { - Separators []rune -} - -func NewSingle(s []rune) Single { - return Single{s} -} - -func (self Single) Match(s string) bool { - r, w := utf8.DecodeRuneInString(s) - if len(s) > w { - return false - } - - return runes.IndexRune(self.Separators, r) == -1 -} - -func (self Single) Len() int { - return lenOne -} - -func (self Single) Index(s string) (int, []int) { - for i, r := range s { - if runes.IndexRune(self.Separators, r) == -1 { - return i, segmentsByRuneLength[utf8.RuneLen(r)] - } - } - - return -1, nil -} - -func (self Single) String() string { - return fmt.Sprintf("", string(self.Separators)) -} diff --git a/ui/wasm/vendor/github.com/gobwas/glob/match/suffix.go b/ui/wasm/vendor/github.com/gobwas/glob/match/suffix.go deleted file mode 100644 index 85bea8c68ec..00000000000 --- a/ui/wasm/vendor/github.com/gobwas/glob/match/suffix.go +++ /dev/null @@ -1,35 +0,0 @@ -package match - -import ( - "fmt" - "strings" -) - -type Suffix struct { - Suffix string -} - -func NewSuffix(s string) Suffix { - return Suffix{s} -} - -func (self Suffix) Len() int { - return lenNo -} - -func (self Suffix) Match(s string) bool { - return strings.HasSuffix(s, self.Suffix) -} - -func (self Suffix) Index(s string) (int, []int) { - idx := strings.Index(s, self.Suffix) - if idx == -1 { - return -1, nil - } - - return 0, []int{idx + len(self.Suffix)} -} - -func (self Suffix) String() string { - return fmt.Sprintf("", self.Suffix) -} diff --git a/ui/wasm/vendor/github.com/gobwas/glob/match/suffix_any.go b/ui/wasm/vendor/github.com/gobwas/glob/match/suffix_any.go deleted file mode 100644 index c5106f8196c..00000000000 --- a/ui/wasm/vendor/github.com/gobwas/glob/match/suffix_any.go +++ /dev/null @@ -1,43 +0,0 @@ -package match - -import ( - "fmt" - "strings" - - sutil "github.com/gobwas/glob/util/strings" -) - -type SuffixAny struct { - Suffix string - Separators []rune -} - -func NewSuffixAny(s string, sep []rune) SuffixAny { - return SuffixAny{s, sep} -} - -func (self SuffixAny) Index(s string) (int, []int) { - idx := strings.Index(s, self.Suffix) - if idx == -1 { - return -1, nil - } - - i := sutil.LastIndexAnyRunes(s[:idx], self.Separators) + 1 - - return i, []int{idx + len(self.Suffix) - i} -} - -func (self SuffixAny) Len() int { - return lenNo -} - -func (self SuffixAny) Match(s string) bool { - if !strings.HasSuffix(s, self.Suffix) { - return false - } - return sutil.IndexAnyRunes(s[:len(s)-len(self.Suffix)], self.Separators) == -1 -} - -func (self SuffixAny) String() string { - return fmt.Sprintf("", string(self.Separators), self.Suffix) -} diff --git a/ui/wasm/vendor/github.com/gobwas/glob/match/super.go b/ui/wasm/vendor/github.com/gobwas/glob/match/super.go deleted file mode 100644 index 3875950bb8c..00000000000 --- a/ui/wasm/vendor/github.com/gobwas/glob/match/super.go +++ /dev/null @@ -1,33 +0,0 @@ -package match - -import ( - "fmt" -) - -type Super struct{} - -func NewSuper() Super { - return Super{} -} - -func (self Super) Match(s string) bool { - return true -} - -func (self Super) Len() int { - return lenNo -} - -func (self Super) Index(s string) (int, []int) { - segments := acquireSegments(len(s) + 1) - for i := range s { - segments = append(segments, i) - } - segments = append(segments, len(s)) - - return 0, segments -} - -func (self Super) String() string { - return fmt.Sprintf("") -} diff --git a/ui/wasm/vendor/github.com/gobwas/glob/match/text.go b/ui/wasm/vendor/github.com/gobwas/glob/match/text.go deleted file mode 100644 index 0a17616d3cb..00000000000 --- a/ui/wasm/vendor/github.com/gobwas/glob/match/text.go +++ /dev/null @@ -1,45 +0,0 @@ -package match - -import ( - "fmt" - "strings" - "unicode/utf8" -) - -// raw represents raw string to match -type Text struct { - Str string - RunesLength int - BytesLength int - Segments []int -} - -func NewText(s string) Text { - return Text{ - Str: s, - RunesLength: utf8.RuneCountInString(s), - BytesLength: len(s), - Segments: []int{len(s)}, - } -} - -func (self Text) Match(s string) bool { - return self.Str == s -} - -func (self Text) Len() int { - return self.RunesLength -} - -func (self Text) Index(s string) (int, []int) { - index := strings.Index(s, self.Str) - if index == -1 { - return -1, nil - } - - return index, self.Segments -} - -func (self Text) String() string { - return fmt.Sprintf("", self.Str) -} diff --git a/ui/wasm/vendor/github.com/gobwas/glob/readme.md b/ui/wasm/vendor/github.com/gobwas/glob/readme.md deleted file mode 100644 index f58144e733e..00000000000 --- a/ui/wasm/vendor/github.com/gobwas/glob/readme.md +++ /dev/null @@ -1,148 +0,0 @@ -# glob.[go](https://golang.org) - -[![GoDoc][godoc-image]][godoc-url] [![Build Status][travis-image]][travis-url] - -> Go Globbing Library. - -## Install - -```shell - go get github.com/gobwas/glob -``` - -## Example - -```go - -package main - -import "github.com/gobwas/glob" - -func main() { - var g glob.Glob - - // create simple glob - g = glob.MustCompile("*.github.com") - g.Match("api.github.com") // true - - // quote meta characters and then create simple glob - g = glob.MustCompile(glob.QuoteMeta("*.github.com")) - g.Match("*.github.com") // true - - // create new glob with set of delimiters as ["."] - g = glob.MustCompile("api.*.com", '.') - g.Match("api.github.com") // true - g.Match("api.gi.hub.com") // false - - // create new glob with set of delimiters as ["."] - // but now with super wildcard - g = glob.MustCompile("api.**.com", '.') - g.Match("api.github.com") // true - g.Match("api.gi.hub.com") // true - - // create glob with single symbol wildcard - g = glob.MustCompile("?at") - g.Match("cat") // true - g.Match("fat") // true - g.Match("at") // false - - // create glob with single symbol wildcard and delimiters ['f'] - g = glob.MustCompile("?at", 'f') - g.Match("cat") // true - g.Match("fat") // false - g.Match("at") // false - - // create glob with character-list matchers - g = glob.MustCompile("[abc]at") - g.Match("cat") // true - g.Match("bat") // true - g.Match("fat") // false - g.Match("at") // false - - // create glob with character-list matchers - g = glob.MustCompile("[!abc]at") - g.Match("cat") // false - g.Match("bat") // false - g.Match("fat") // true - g.Match("at") // false - - // create glob with character-range matchers - g = glob.MustCompile("[a-c]at") - g.Match("cat") // true - g.Match("bat") // true - g.Match("fat") // false - g.Match("at") // false - - // create glob with character-range matchers - g = glob.MustCompile("[!a-c]at") - g.Match("cat") // false - g.Match("bat") // false - g.Match("fat") // true - g.Match("at") // false - - // create glob with pattern-alternatives list - g = glob.MustCompile("{cat,bat,[fr]at}") - g.Match("cat") // true - g.Match("bat") // true - g.Match("fat") // true - g.Match("rat") // true - g.Match("at") // false - g.Match("zat") // false -} - -``` - -## Performance - -This library is created for compile-once patterns. This means, that compilation could take time, but -strings matching is done faster, than in case when always parsing template. - -If you will not use compiled `glob.Glob` object, and do `g := glob.MustCompile(pattern); g.Match(...)` every time, then your code will be much more slower. - -Run `go test -bench=.` from source root to see the benchmarks: - -Pattern | Fixture | Match | Speed (ns/op) ---------|---------|-------|-------------- -`[a-z][!a-x]*cat*[h][!b]*eyes*` | `my cat has very bright eyes` | `true` | 432 -`[a-z][!a-x]*cat*[h][!b]*eyes*` | `my dog has very bright eyes` | `false` | 199 -`https://*.google.*` | `https://account.google.com` | `true` | 96 -`https://*.google.*` | `https://google.com` | `false` | 66 -`{https://*.google.*,*yandex.*,*yahoo.*,*mail.ru}` | `http://yahoo.com` | `true` | 163 -`{https://*.google.*,*yandex.*,*yahoo.*,*mail.ru}` | `http://google.com` | `false` | 197 -`{https://*gobwas.com,http://exclude.gobwas.com}` | `https://safe.gobwas.com` | `true` | 22 -`{https://*gobwas.com,http://exclude.gobwas.com}` | `http://safe.gobwas.com` | `false` | 24 -`abc*` | `abcdef` | `true` | 8.15 -`abc*` | `af` | `false` | 5.68 -`*def` | `abcdef` | `true` | 8.84 -`*def` | `af` | `false` | 5.74 -`ab*ef` | `abcdef` | `true` | 15.2 -`ab*ef` | `af` | `false` | 10.4 - -The same things with `regexp` package: - -Pattern | Fixture | Match | Speed (ns/op) ---------|---------|-------|-------------- -`^[a-z][^a-x].*cat.*[h][^b].*eyes.*$` | `my cat has very bright eyes` | `true` | 2553 -`^[a-z][^a-x].*cat.*[h][^b].*eyes.*$` | `my dog has very bright eyes` | `false` | 1383 -`^https:\/\/.*\.google\..*$` | `https://account.google.com` | `true` | 1205 -`^https:\/\/.*\.google\..*$` | `https://google.com` | `false` | 767 -`^(https:\/\/.*\.google\..*|.*yandex\..*|.*yahoo\..*|.*mail\.ru)$` | `http://yahoo.com` | `true` | 1435 -`^(https:\/\/.*\.google\..*|.*yandex\..*|.*yahoo\..*|.*mail\.ru)$` | `http://google.com` | `false` | 1674 -`^(https:\/\/.*gobwas\.com|http://exclude.gobwas.com)$` | `https://safe.gobwas.com` | `true` | 1039 -`^(https:\/\/.*gobwas\.com|http://exclude.gobwas.com)$` | `http://safe.gobwas.com` | `false` | 272 -`^abc.*$` | `abcdef` | `true` | 237 -`^abc.*$` | `af` | `false` | 100 -`^.*def$` | `abcdef` | `true` | 464 -`^.*def$` | `af` | `false` | 265 -`^ab.*ef$` | `abcdef` | `true` | 375 -`^ab.*ef$` | `af` | `false` | 145 - -[godoc-image]: https://godoc.org/github.com/gobwas/glob?status.svg -[godoc-url]: https://godoc.org/github.com/gobwas/glob -[travis-image]: https://travis-ci.org/gobwas/glob.svg?branch=master -[travis-url]: https://travis-ci.org/gobwas/glob - -## Syntax - -Syntax is inspired by [standard wildcards](http://tldp.org/LDP/GNU-Linux-Tools-Summary/html/x11655.htm), -except that `**` is aka super-asterisk, that do not sensitive for separators. \ No newline at end of file diff --git a/ui/wasm/vendor/github.com/gobwas/glob/syntax/ast/ast.go b/ui/wasm/vendor/github.com/gobwas/glob/syntax/ast/ast.go deleted file mode 100644 index 3220a694a9c..00000000000 --- a/ui/wasm/vendor/github.com/gobwas/glob/syntax/ast/ast.go +++ /dev/null @@ -1,122 +0,0 @@ -package ast - -import ( - "bytes" - "fmt" -) - -type Node struct { - Parent *Node - Children []*Node - Value interface{} - Kind Kind -} - -func NewNode(k Kind, v interface{}, ch ...*Node) *Node { - n := &Node{ - Kind: k, - Value: v, - } - for _, c := range ch { - Insert(n, c) - } - return n -} - -func (a *Node) Equal(b *Node) bool { - if a.Kind != b.Kind { - return false - } - if a.Value != b.Value { - return false - } - if len(a.Children) != len(b.Children) { - return false - } - for i, c := range a.Children { - if !c.Equal(b.Children[i]) { - return false - } - } - return true -} - -func (a *Node) String() string { - var buf bytes.Buffer - buf.WriteString(a.Kind.String()) - if a.Value != nil { - buf.WriteString(" =") - buf.WriteString(fmt.Sprintf("%v", a.Value)) - } - if len(a.Children) > 0 { - buf.WriteString(" [") - for i, c := range a.Children { - if i > 0 { - buf.WriteString(", ") - } - buf.WriteString(c.String()) - } - buf.WriteString("]") - } - return buf.String() -} - -func Insert(parent *Node, children ...*Node) { - parent.Children = append(parent.Children, children...) - for _, ch := range children { - ch.Parent = parent - } -} - -type List struct { - Not bool - Chars string -} - -type Range struct { - Not bool - Lo, Hi rune -} - -type Text struct { - Text string -} - -type Kind int - -const ( - KindNothing Kind = iota - KindPattern - KindList - KindRange - KindText - KindAny - KindSuper - KindSingle - KindAnyOf -) - -func (k Kind) String() string { - switch k { - case KindNothing: - return "Nothing" - case KindPattern: - return "Pattern" - case KindList: - return "List" - case KindRange: - return "Range" - case KindText: - return "Text" - case KindAny: - return "Any" - case KindSuper: - return "Super" - case KindSingle: - return "Single" - case KindAnyOf: - return "AnyOf" - default: - return "" - } -} diff --git a/ui/wasm/vendor/github.com/gobwas/glob/syntax/ast/parser.go b/ui/wasm/vendor/github.com/gobwas/glob/syntax/ast/parser.go deleted file mode 100644 index 429b4094303..00000000000 --- a/ui/wasm/vendor/github.com/gobwas/glob/syntax/ast/parser.go +++ /dev/null @@ -1,157 +0,0 @@ -package ast - -import ( - "errors" - "fmt" - "github.com/gobwas/glob/syntax/lexer" - "unicode/utf8" -) - -type Lexer interface { - Next() lexer.Token -} - -type parseFn func(*Node, Lexer) (parseFn, *Node, error) - -func Parse(lexer Lexer) (*Node, error) { - var parser parseFn - - root := NewNode(KindPattern, nil) - - var ( - tree *Node - err error - ) - for parser, tree = parserMain, root; parser != nil; { - parser, tree, err = parser(tree, lexer) - if err != nil { - return nil, err - } - } - - return root, nil -} - -func parserMain(tree *Node, lex Lexer) (parseFn, *Node, error) { - for { - token := lex.Next() - switch token.Type { - case lexer.EOF: - return nil, tree, nil - - case lexer.Error: - return nil, tree, errors.New(token.Raw) - - case lexer.Text: - Insert(tree, NewNode(KindText, Text{token.Raw})) - return parserMain, tree, nil - - case lexer.Any: - Insert(tree, NewNode(KindAny, nil)) - return parserMain, tree, nil - - case lexer.Super: - Insert(tree, NewNode(KindSuper, nil)) - return parserMain, tree, nil - - case lexer.Single: - Insert(tree, NewNode(KindSingle, nil)) - return parserMain, tree, nil - - case lexer.RangeOpen: - return parserRange, tree, nil - - case lexer.TermsOpen: - a := NewNode(KindAnyOf, nil) - Insert(tree, a) - - p := NewNode(KindPattern, nil) - Insert(a, p) - - return parserMain, p, nil - - case lexer.Separator: - p := NewNode(KindPattern, nil) - Insert(tree.Parent, p) - - return parserMain, p, nil - - case lexer.TermsClose: - return parserMain, tree.Parent.Parent, nil - - default: - return nil, tree, fmt.Errorf("unexpected token: %s", token) - } - } - return nil, tree, fmt.Errorf("unknown error") -} - -func parserRange(tree *Node, lex Lexer) (parseFn, *Node, error) { - var ( - not bool - lo rune - hi rune - chars string - ) - for { - token := lex.Next() - switch token.Type { - case lexer.EOF: - return nil, tree, errors.New("unexpected end") - - case lexer.Error: - return nil, tree, errors.New(token.Raw) - - case lexer.Not: - not = true - - case lexer.RangeLo: - r, w := utf8.DecodeRuneInString(token.Raw) - if len(token.Raw) > w { - return nil, tree, fmt.Errorf("unexpected length of lo character") - } - lo = r - - case lexer.RangeBetween: - // - - case lexer.RangeHi: - r, w := utf8.DecodeRuneInString(token.Raw) - if len(token.Raw) > w { - return nil, tree, fmt.Errorf("unexpected length of lo character") - } - - hi = r - - if hi < lo { - return nil, tree, fmt.Errorf("hi character '%s' should be greater than lo '%s'", string(hi), string(lo)) - } - - case lexer.Text: - chars = token.Raw - - case lexer.RangeClose: - isRange := lo != 0 && hi != 0 - isChars := chars != "" - - if isChars == isRange { - return nil, tree, fmt.Errorf("could not parse range") - } - - if isRange { - Insert(tree, NewNode(KindRange, Range{ - Lo: lo, - Hi: hi, - Not: not, - })) - } else { - Insert(tree, NewNode(KindList, List{ - Chars: chars, - Not: not, - })) - } - - return parserMain, tree, nil - } - } -} diff --git a/ui/wasm/vendor/github.com/gobwas/glob/syntax/lexer/lexer.go b/ui/wasm/vendor/github.com/gobwas/glob/syntax/lexer/lexer.go deleted file mode 100644 index a1c8d1962a0..00000000000 --- a/ui/wasm/vendor/github.com/gobwas/glob/syntax/lexer/lexer.go +++ /dev/null @@ -1,273 +0,0 @@ -package lexer - -import ( - "bytes" - "fmt" - "github.com/gobwas/glob/util/runes" - "unicode/utf8" -) - -const ( - char_any = '*' - char_comma = ',' - char_single = '?' - char_escape = '\\' - char_range_open = '[' - char_range_close = ']' - char_terms_open = '{' - char_terms_close = '}' - char_range_not = '!' - char_range_between = '-' -) - -var specials = []byte{ - char_any, - char_single, - char_escape, - char_range_open, - char_range_close, - char_terms_open, - char_terms_close, -} - -func Special(c byte) bool { - return bytes.IndexByte(specials, c) != -1 -} - -type tokens []Token - -func (i *tokens) shift() (ret Token) { - ret = (*i)[0] - copy(*i, (*i)[1:]) - *i = (*i)[:len(*i)-1] - return -} - -func (i *tokens) push(v Token) { - *i = append(*i, v) -} - -func (i *tokens) empty() bool { - return len(*i) == 0 -} - -var eof rune = 0 - -type lexer struct { - data string - pos int - err error - - tokens tokens - termsLevel int - - lastRune rune - lastRuneSize int - hasRune bool -} - -func NewLexer(source string) *lexer { - l := &lexer{ - data: source, - tokens: tokens(make([]Token, 0, 4)), - } - return l -} - -func (l *lexer) Next() Token { - if l.err != nil { - return Token{Error, l.err.Error()} - } - if !l.tokens.empty() { - return l.tokens.shift() - } - - l.fetchItem() - return l.Next() -} - -func (l *lexer) peek() (r rune, w int) { - if l.pos == len(l.data) { - return eof, 0 - } - - r, w = utf8.DecodeRuneInString(l.data[l.pos:]) - if r == utf8.RuneError { - l.errorf("could not read rune") - r = eof - w = 0 - } - - return -} - -func (l *lexer) read() rune { - if l.hasRune { - l.hasRune = false - l.seek(l.lastRuneSize) - return l.lastRune - } - - r, s := l.peek() - l.seek(s) - - l.lastRune = r - l.lastRuneSize = s - - return r -} - -func (l *lexer) seek(w int) { - l.pos += w -} - -func (l *lexer) unread() { - if l.hasRune { - l.errorf("could not unread rune") - return - } - l.seek(-l.lastRuneSize) - l.hasRune = true -} - -func (l *lexer) errorf(f string, v ...interface{}) { - l.err = fmt.Errorf(f, v...) -} - -func (l *lexer) inTerms() bool { - return l.termsLevel > 0 -} - -func (l *lexer) termsEnter() { - l.termsLevel++ -} - -func (l *lexer) termsLeave() { - l.termsLevel-- -} - -var inTextBreakers = []rune{char_single, char_any, char_range_open, char_terms_open} -var inTermsBreakers = append(inTextBreakers, char_terms_close, char_comma) - -func (l *lexer) fetchItem() { - r := l.read() - switch { - case r == eof: - l.tokens.push(Token{EOF, ""}) - - case r == char_terms_open: - l.termsEnter() - l.tokens.push(Token{TermsOpen, string(r)}) - - case r == char_comma && l.inTerms(): - l.tokens.push(Token{Separator, string(r)}) - - case r == char_terms_close && l.inTerms(): - l.tokens.push(Token{TermsClose, string(r)}) - l.termsLeave() - - case r == char_range_open: - l.tokens.push(Token{RangeOpen, string(r)}) - l.fetchRange() - - case r == char_single: - l.tokens.push(Token{Single, string(r)}) - - case r == char_any: - if l.read() == char_any { - l.tokens.push(Token{Super, string(r) + string(r)}) - } else { - l.unread() - l.tokens.push(Token{Any, string(r)}) - } - - default: - l.unread() - - var breakers []rune - if l.inTerms() { - breakers = inTermsBreakers - } else { - breakers = inTextBreakers - } - l.fetchText(breakers) - } -} - -func (l *lexer) fetchRange() { - var wantHi bool - var wantClose bool - var seenNot bool - for { - r := l.read() - if r == eof { - l.errorf("unexpected end of input") - return - } - - if wantClose { - if r != char_range_close { - l.errorf("expected close range character") - } else { - l.tokens.push(Token{RangeClose, string(r)}) - } - return - } - - if wantHi { - l.tokens.push(Token{RangeHi, string(r)}) - wantClose = true - continue - } - - if !seenNot && r == char_range_not { - l.tokens.push(Token{Not, string(r)}) - seenNot = true - continue - } - - if n, w := l.peek(); n == char_range_between { - l.seek(w) - l.tokens.push(Token{RangeLo, string(r)}) - l.tokens.push(Token{RangeBetween, string(n)}) - wantHi = true - continue - } - - l.unread() // unread first peek and fetch as text - l.fetchText([]rune{char_range_close}) - wantClose = true - } -} - -func (l *lexer) fetchText(breakers []rune) { - var data []rune - var escaped bool - -reading: - for { - r := l.read() - if r == eof { - break - } - - if !escaped { - if r == char_escape { - escaped = true - continue - } - - if runes.IndexRune(breakers, r) != -1 { - l.unread() - break reading - } - } - - escaped = false - data = append(data, r) - } - - if len(data) > 0 { - l.tokens.push(Token{Text, string(data)}) - } -} diff --git a/ui/wasm/vendor/github.com/gobwas/glob/syntax/lexer/token.go b/ui/wasm/vendor/github.com/gobwas/glob/syntax/lexer/token.go deleted file mode 100644 index 2797c4e83a4..00000000000 --- a/ui/wasm/vendor/github.com/gobwas/glob/syntax/lexer/token.go +++ /dev/null @@ -1,88 +0,0 @@ -package lexer - -import "fmt" - -type TokenType int - -const ( - EOF TokenType = iota - Error - Text - Char - Any - Super - Single - Not - Separator - RangeOpen - RangeClose - RangeLo - RangeHi - RangeBetween - TermsOpen - TermsClose -) - -func (tt TokenType) String() string { - switch tt { - case EOF: - return "eof" - - case Error: - return "error" - - case Text: - return "text" - - case Char: - return "char" - - case Any: - return "any" - - case Super: - return "super" - - case Single: - return "single" - - case Not: - return "not" - - case Separator: - return "separator" - - case RangeOpen: - return "range_open" - - case RangeClose: - return "range_close" - - case RangeLo: - return "range_lo" - - case RangeHi: - return "range_hi" - - case RangeBetween: - return "range_between" - - case TermsOpen: - return "terms_open" - - case TermsClose: - return "terms_close" - - default: - return "undef" - } -} - -type Token struct { - Type TokenType - Raw string -} - -func (t Token) String() string { - return fmt.Sprintf("%v<%q>", t.Type, t.Raw) -} diff --git a/ui/wasm/vendor/github.com/gobwas/glob/syntax/syntax.go b/ui/wasm/vendor/github.com/gobwas/glob/syntax/syntax.go deleted file mode 100644 index 1d168b14829..00000000000 --- a/ui/wasm/vendor/github.com/gobwas/glob/syntax/syntax.go +++ /dev/null @@ -1,14 +0,0 @@ -package syntax - -import ( - "github.com/gobwas/glob/syntax/ast" - "github.com/gobwas/glob/syntax/lexer" -) - -func Parse(s string) (*ast.Node, error) { - return ast.Parse(lexer.NewLexer(s)) -} - -func Special(b byte) bool { - return lexer.Special(b) -} diff --git a/ui/wasm/vendor/github.com/gobwas/glob/util/runes/runes.go b/ui/wasm/vendor/github.com/gobwas/glob/util/runes/runes.go deleted file mode 100644 index a7235564107..00000000000 --- a/ui/wasm/vendor/github.com/gobwas/glob/util/runes/runes.go +++ /dev/null @@ -1,154 +0,0 @@ -package runes - -func Index(s, needle []rune) int { - ls, ln := len(s), len(needle) - - switch { - case ln == 0: - return 0 - case ln == 1: - return IndexRune(s, needle[0]) - case ln == ls: - if Equal(s, needle) { - return 0 - } - return -1 - case ln > ls: - return -1 - } - -head: - for i := 0; i < ls && ls-i >= ln; i++ { - for y := 0; y < ln; y++ { - if s[i+y] != needle[y] { - continue head - } - } - - return i - } - - return -1 -} - -func LastIndex(s, needle []rune) int { - ls, ln := len(s), len(needle) - - switch { - case ln == 0: - if ls == 0 { - return 0 - } - return ls - case ln == 1: - return IndexLastRune(s, needle[0]) - case ln == ls: - if Equal(s, needle) { - return 0 - } - return -1 - case ln > ls: - return -1 - } - -head: - for i := ls - 1; i >= 0 && i >= ln; i-- { - for y := ln - 1; y >= 0; y-- { - if s[i-(ln-y-1)] != needle[y] { - continue head - } - } - - return i - ln + 1 - } - - return -1 -} - -// IndexAny returns the index of the first instance of any Unicode code point -// from chars in s, or -1 if no Unicode code point from chars is present in s. -func IndexAny(s, chars []rune) int { - if len(chars) > 0 { - for i, c := range s { - for _, m := range chars { - if c == m { - return i - } - } - } - } - return -1 -} - -func Contains(s, needle []rune) bool { - return Index(s, needle) >= 0 -} - -func Max(s []rune) (max rune) { - for _, r := range s { - if r > max { - max = r - } - } - - return -} - -func Min(s []rune) rune { - min := rune(-1) - for _, r := range s { - if min == -1 { - min = r - continue - } - - if r < min { - min = r - } - } - - return min -} - -func IndexRune(s []rune, r rune) int { - for i, c := range s { - if c == r { - return i - } - } - return -1 -} - -func IndexLastRune(s []rune, r rune) int { - for i := len(s) - 1; i >= 0; i-- { - if s[i] == r { - return i - } - } - - return -1 -} - -func Equal(a, b []rune) bool { - if len(a) == len(b) { - for i := 0; i < len(a); i++ { - if a[i] != b[i] { - return false - } - } - - return true - } - - return false -} - -// HasPrefix tests whether the string s begins with prefix. -func HasPrefix(s, prefix []rune) bool { - return len(s) >= len(prefix) && Equal(s[0:len(prefix)], prefix) -} - -// HasSuffix tests whether the string s ends with suffix. -func HasSuffix(s, suffix []rune) bool { - return len(s) >= len(suffix) && Equal(s[len(s)-len(suffix):], suffix) -} diff --git a/ui/wasm/vendor/github.com/gobwas/glob/util/strings/strings.go b/ui/wasm/vendor/github.com/gobwas/glob/util/strings/strings.go deleted file mode 100644 index e8ee1920b17..00000000000 --- a/ui/wasm/vendor/github.com/gobwas/glob/util/strings/strings.go +++ /dev/null @@ -1,39 +0,0 @@ -package strings - -import ( - "strings" - "unicode/utf8" -) - -func IndexAnyRunes(s string, rs []rune) int { - for _, r := range rs { - if i := strings.IndexRune(s, r); i != -1 { - return i - } - } - - return -1 -} - -func LastIndexAnyRunes(s string, rs []rune) int { - for _, r := range rs { - i := -1 - if 0 <= r && r < utf8.RuneSelf { - i = strings.LastIndexByte(s, byte(r)) - } else { - sub := s - for len(sub) > 0 { - j := strings.IndexRune(s, r) - if j == -1 { - break - } - i = j - sub = sub[i+1:] - } - } - if i != -1 { - return i - } - } - return -1 -} diff --git a/ui/wasm/vendor/github.com/gogo/protobuf/AUTHORS b/ui/wasm/vendor/github.com/gogo/protobuf/AUTHORS deleted file mode 100644 index 3d97fc7a29f..00000000000 --- a/ui/wasm/vendor/github.com/gogo/protobuf/AUTHORS +++ /dev/null @@ -1,15 +0,0 @@ -# This is the official list of GoGo authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS file, which -# lists people. For example, employees are listed in CONTRIBUTORS, -# but not in AUTHORS, because the employer holds the copyright. - -# Names should be added to this file as one of -# Organization's name -# Individual's name -# Individual's name - -# Please keep the list sorted. - -Sendgrid, Inc -Vastech SA (PTY) LTD -Walter Schulze diff --git a/ui/wasm/vendor/github.com/gogo/protobuf/CONTRIBUTORS b/ui/wasm/vendor/github.com/gogo/protobuf/CONTRIBUTORS deleted file mode 100644 index 1b4f6c208a1..00000000000 --- a/ui/wasm/vendor/github.com/gogo/protobuf/CONTRIBUTORS +++ /dev/null @@ -1,23 +0,0 @@ -Anton Povarov -Brian Goff -Clayton Coleman -Denis Smirnov -DongYun Kang -Dwayne Schultz -Georg Apitz -Gustav Paul -Johan Brandhorst -John Shahid -John Tuley -Laurent -Patrick Lee -Peter Edge -Roger Johansson -Sam Nguyen -Sergio Arbeo -Stephen J Day -Tamir Duberstein -Todd Eisenberger -Tormod Erevik Lea -Vyacheslav Kim -Walter Schulze diff --git a/ui/wasm/vendor/github.com/gogo/protobuf/LICENSE b/ui/wasm/vendor/github.com/gogo/protobuf/LICENSE deleted file mode 100644 index f57de90da8a..00000000000 --- a/ui/wasm/vendor/github.com/gogo/protobuf/LICENSE +++ /dev/null @@ -1,35 +0,0 @@ -Copyright (c) 2013, The GoGo Authors. All rights reserved. - -Protocol Buffers for Go with Gadgets - -Go support for Protocol Buffers - Google's data interchange format - -Copyright 2010 The Go Authors. All rights reserved. -https://github.com/golang/protobuf - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/ui/wasm/vendor/github.com/gogo/protobuf/proto/Makefile b/ui/wasm/vendor/github.com/gogo/protobuf/proto/Makefile deleted file mode 100644 index 00d65f32773..00000000000 --- a/ui/wasm/vendor/github.com/gogo/protobuf/proto/Makefile +++ /dev/null @@ -1,43 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -install: - go install - -test: install generate-test-pbs - go test - - -generate-test-pbs: - make install - make -C test_proto - make -C proto3_proto - make diff --git a/ui/wasm/vendor/github.com/gogo/protobuf/proto/clone.go b/ui/wasm/vendor/github.com/gogo/protobuf/proto/clone.go deleted file mode 100644 index a26b046d94f..00000000000 --- a/ui/wasm/vendor/github.com/gogo/protobuf/proto/clone.go +++ /dev/null @@ -1,258 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer deep copy and merge. -// TODO: RawMessage. - -package proto - -import ( - "fmt" - "log" - "reflect" - "strings" -) - -// Clone returns a deep copy of a protocol buffer. -func Clone(src Message) Message { - in := reflect.ValueOf(src) - if in.IsNil() { - return src - } - out := reflect.New(in.Type().Elem()) - dst := out.Interface().(Message) - Merge(dst, src) - return dst -} - -// Merger is the interface representing objects that can merge messages of the same type. -type Merger interface { - // Merge merges src into this message. - // Required and optional fields that are set in src will be set to that value in dst. - // Elements of repeated fields will be appended. - // - // Merge may panic if called with a different argument type than the receiver. - Merge(src Message) -} - -// generatedMerger is the custom merge method that generated protos will have. -// We must add this method since a generate Merge method will conflict with -// many existing protos that have a Merge data field already defined. -type generatedMerger interface { - XXX_Merge(src Message) -} - -// Merge merges src into dst. -// Required and optional fields that are set in src will be set to that value in dst. -// Elements of repeated fields will be appended. -// Merge panics if src and dst are not the same type, or if dst is nil. -func Merge(dst, src Message) { - if m, ok := dst.(Merger); ok { - m.Merge(src) - return - } - - in := reflect.ValueOf(src) - out := reflect.ValueOf(dst) - if out.IsNil() { - panic("proto: nil destination") - } - if in.Type() != out.Type() { - panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) - } - if in.IsNil() { - return // Merge from nil src is a noop - } - if m, ok := dst.(generatedMerger); ok { - m.XXX_Merge(src) - return - } - mergeStruct(out.Elem(), in.Elem()) -} - -func mergeStruct(out, in reflect.Value) { - sprop := GetProperties(in.Type()) - for i := 0; i < in.NumField(); i++ { - f := in.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) - } - - if emIn, ok := in.Addr().Interface().(extensionsBytes); ok { - emOut := out.Addr().Interface().(extensionsBytes) - bIn := emIn.GetExtensions() - bOut := emOut.GetExtensions() - *bOut = append(*bOut, *bIn...) - } else if emIn, err := extendable(in.Addr().Interface()); err == nil { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - uf := in.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return - } - uin := uf.Bytes() - if len(uin) > 0 { - out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) - } -} - -// mergeAny performs a merge between two values of the same type. -// viaPtr indicates whether the values were indirected through a pointer (implying proto2). -// prop is set if this is a struct field (it may be nil). -func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { - if in.Type() == protoMessageType { - if !in.IsNil() { - if out.IsNil() { - out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) - } else { - Merge(out.Interface().(Message), in.Interface().(Message)) - } - } - return - } - switch in.Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - if !viaPtr && isProto3Zero(in) { - return - } - out.Set(in) - case reflect.Interface: - // Probably a oneof field; copy non-nil values. - if in.IsNil() { - return - } - // Allocate destination if it is not set, or set to a different type. - // Otherwise we will merge as normal. - if out.IsNil() || out.Elem().Type() != in.Elem().Type() { - out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) - } - mergeAny(out.Elem(), in.Elem(), false, nil) - case reflect.Map: - if in.Len() == 0 { - return - } - if out.IsNil() { - out.Set(reflect.MakeMap(in.Type())) - } - // For maps with value types of *T or []byte we need to deep copy each value. - elemKind := in.Type().Elem().Kind() - for _, key := range in.MapKeys() { - var val reflect.Value - switch elemKind { - case reflect.Ptr: - val = reflect.New(in.Type().Elem().Elem()) - mergeAny(val, in.MapIndex(key), false, nil) - case reflect.Slice: - val = in.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - default: - val = in.MapIndex(key) - } - out.SetMapIndex(key, val) - } - case reflect.Ptr: - if in.IsNil() { - return - } - if out.IsNil() { - out.Set(reflect.New(in.Elem().Type())) - } - mergeAny(out.Elem(), in.Elem(), true, nil) - case reflect.Slice: - if in.IsNil() { - return - } - if in.Type().Elem().Kind() == reflect.Uint8 { - // []byte is a scalar bytes field, not a repeated field. - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value, and should not - // be merged. - if prop != nil && prop.proto3 && in.Len() == 0 { - return - } - - // Make a deep copy. - // Append to []byte{} instead of []byte(nil) so that we never end up - // with a nil result. - out.SetBytes(append([]byte{}, in.Bytes()...)) - return - } - n := in.Len() - if out.IsNil() { - out.Set(reflect.MakeSlice(in.Type(), 0, n)) - } - switch in.Type().Elem().Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - out.Set(reflect.AppendSlice(out, in)) - default: - for i := 0; i < n; i++ { - x := reflect.Indirect(reflect.New(in.Type().Elem())) - mergeAny(x, in.Index(i), false, nil) - out.Set(reflect.Append(out, x)) - } - } - case reflect.Struct: - mergeStruct(out, in) - default: - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to copy %v", in) - } -} - -func mergeExtension(out, in map[int32]Extension) { - for extNum, eIn := range in { - eOut := Extension{desc: eIn.desc} - if eIn.value != nil { - v := reflect.New(reflect.TypeOf(eIn.value)).Elem() - mergeAny(v, reflect.ValueOf(eIn.value), false, nil) - eOut.value = v.Interface() - } - if eIn.enc != nil { - eOut.enc = make([]byte, len(eIn.enc)) - copy(eOut.enc, eIn.enc) - } - - out[extNum] = eOut - } -} diff --git a/ui/wasm/vendor/github.com/gogo/protobuf/proto/custom_gogo.go b/ui/wasm/vendor/github.com/gogo/protobuf/proto/custom_gogo.go deleted file mode 100644 index 24552483c6c..00000000000 --- a/ui/wasm/vendor/github.com/gogo/protobuf/proto/custom_gogo.go +++ /dev/null @@ -1,39 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import "reflect" - -type custom interface { - Marshal() ([]byte, error) - Unmarshal(data []byte) error - Size() int -} - -var customType = reflect.TypeOf((*custom)(nil)).Elem() diff --git a/ui/wasm/vendor/github.com/gogo/protobuf/proto/decode.go b/ui/wasm/vendor/github.com/gogo/protobuf/proto/decode.go deleted file mode 100644 index 63b0f08bef2..00000000000 --- a/ui/wasm/vendor/github.com/gogo/protobuf/proto/decode.go +++ /dev/null @@ -1,427 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for decoding protocol buffer data to construct in-memory representations. - */ - -import ( - "errors" - "fmt" - "io" -) - -// errOverflow is returned when an integer is too large to be represented. -var errOverflow = errors.New("proto: integer overflow") - -// ErrInternalBadWireType is returned by generated code when an incorrect -// wire type is encountered. It does not get returned to user code. -var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") - -// DecodeVarint reads a varint-encoded integer from the slice. -// It returns the integer and the number of bytes consumed, or -// zero if there is not enough. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func DecodeVarint(buf []byte) (x uint64, n int) { - for shift := uint(0); shift < 64; shift += 7 { - if n >= len(buf) { - return 0, 0 - } - b := uint64(buf[n]) - n++ - x |= (b & 0x7F) << shift - if (b & 0x80) == 0 { - return x, n - } - } - - // The number is too large to represent in a 64-bit value. - return 0, 0 -} - -func (p *Buffer) decodeVarintSlow() (x uint64, err error) { - i := p.index - l := len(p.buf) - - for shift := uint(0); shift < 64; shift += 7 { - if i >= l { - err = io.ErrUnexpectedEOF - return - } - b := p.buf[i] - i++ - x |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - p.index = i - return - } - } - - // The number is too large to represent in a 64-bit value. - err = errOverflow - return -} - -// DecodeVarint reads a varint-encoded integer from the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) DecodeVarint() (x uint64, err error) { - i := p.index - buf := p.buf - - if i >= len(buf) { - return 0, io.ErrUnexpectedEOF - } else if buf[i] < 0x80 { - p.index++ - return uint64(buf[i]), nil - } else if len(buf)-i < 10 { - return p.decodeVarintSlow() - } - - var b uint64 - // we already checked the first byte - x = uint64(buf[i]) - 0x80 - i++ - - b = uint64(buf[i]) - i++ - x += b << 7 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 7 - - b = uint64(buf[i]) - i++ - x += b << 14 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 14 - - b = uint64(buf[i]) - i++ - x += b << 21 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 21 - - b = uint64(buf[i]) - i++ - x += b << 28 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 28 - - b = uint64(buf[i]) - i++ - x += b << 35 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 35 - - b = uint64(buf[i]) - i++ - x += b << 42 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 42 - - b = uint64(buf[i]) - i++ - x += b << 49 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 49 - - b = uint64(buf[i]) - i++ - x += b << 56 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 56 - - b = uint64(buf[i]) - i++ - x += b << 63 - if b&0x80 == 0 { - goto done - } - - return 0, errOverflow - -done: - p.index = i - return x, nil -} - -// DecodeFixed64 reads a 64-bit integer from the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) DecodeFixed64() (x uint64, err error) { - // x, err already 0 - i := p.index + 8 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-8]) - x |= uint64(p.buf[i-7]) << 8 - x |= uint64(p.buf[i-6]) << 16 - x |= uint64(p.buf[i-5]) << 24 - x |= uint64(p.buf[i-4]) << 32 - x |= uint64(p.buf[i-3]) << 40 - x |= uint64(p.buf[i-2]) << 48 - x |= uint64(p.buf[i-1]) << 56 - return -} - -// DecodeFixed32 reads a 32-bit integer from the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) DecodeFixed32() (x uint64, err error) { - // x, err already 0 - i := p.index + 4 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-4]) - x |= uint64(p.buf[i-3]) << 8 - x |= uint64(p.buf[i-2]) << 16 - x |= uint64(p.buf[i-1]) << 24 - return -} - -// DecodeZigzag64 reads a zigzag-encoded 64-bit integer -// from the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) DecodeZigzag64() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) - return -} - -// DecodeZigzag32 reads a zigzag-encoded 32-bit integer -// from the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) DecodeZigzag32() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) - return -} - -// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { - n, err := p.DecodeVarint() - if err != nil { - return nil, err - } - - nb := int(n) - if nb < 0 { - return nil, fmt.Errorf("proto: bad byte length %d", nb) - } - end := p.index + nb - if end < p.index || end > len(p.buf) { - return nil, io.ErrUnexpectedEOF - } - - if !alloc { - // todo: check if can get more uses of alloc=false - buf = p.buf[p.index:end] - p.index += nb - return - } - - buf = make([]byte, nb) - copy(buf, p.buf[p.index:]) - p.index += nb - return -} - -// DecodeStringBytes reads an encoded string from the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) DecodeStringBytes() (s string, err error) { - buf, err := p.DecodeRawBytes(false) - if err != nil { - return - } - return string(buf), nil -} - -// Unmarshaler is the interface representing objects that can -// unmarshal themselves. The argument points to data that may be -// overwritten, so implementations should not keep references to the -// buffer. -// Unmarshal implementations should not clear the receiver. -// Any unmarshaled data should be merged into the receiver. -// Callers of Unmarshal that do not want to retain existing data -// should Reset the receiver before calling Unmarshal. -type Unmarshaler interface { - Unmarshal([]byte) error -} - -// newUnmarshaler is the interface representing objects that can -// unmarshal themselves. The semantics are identical to Unmarshaler. -// -// This exists to support protoc-gen-go generated messages. -// The proto package will stop type-asserting to this interface in the future. -// -// DO NOT DEPEND ON THIS. -type newUnmarshaler interface { - XXX_Unmarshal([]byte) error -} - -// Unmarshal parses the protocol buffer representation in buf and places the -// decoded result in pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// Unmarshal resets pb before starting to unmarshal, so any -// existing data in pb is always removed. Use UnmarshalMerge -// to preserve and append to existing data. -func Unmarshal(buf []byte, pb Message) error { - pb.Reset() - if u, ok := pb.(newUnmarshaler); ok { - return u.XXX_Unmarshal(buf) - } - if u, ok := pb.(Unmarshaler); ok { - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// UnmarshalMerge parses the protocol buffer representation in buf and -// writes the decoded result to pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// UnmarshalMerge merges into existing data in pb. -// Most code should use Unmarshal instead. -func UnmarshalMerge(buf []byte, pb Message) error { - if u, ok := pb.(newUnmarshaler); ok { - return u.XXX_Unmarshal(buf) - } - if u, ok := pb.(Unmarshaler); ok { - // NOTE: The history of proto have unfortunately been inconsistent - // whether Unmarshaler should or should not implicitly clear itself. - // Some implementations do, most do not. - // Thus, calling this here may or may not do what people want. - // - // See https://github.com/golang/protobuf/issues/424 - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// DecodeMessage reads a count-delimited message from the Buffer. -func (p *Buffer) DecodeMessage(pb Message) error { - enc, err := p.DecodeRawBytes(false) - if err != nil { - return err - } - return NewBuffer(enc).Unmarshal(pb) -} - -// DecodeGroup reads a tag-delimited group from the Buffer. -// StartGroup tag is already consumed. This function consumes -// EndGroup tag. -func (p *Buffer) DecodeGroup(pb Message) error { - b := p.buf[p.index:] - x, y := findEndGroup(b) - if x < 0 { - return io.ErrUnexpectedEOF - } - err := Unmarshal(b[:x], pb) - p.index += y - return err -} - -// Unmarshal parses the protocol buffer representation in the -// Buffer and places the decoded result in pb. If the struct -// underlying pb does not match the data in the buffer, the results can be -// unpredictable. -// -// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. -func (p *Buffer) Unmarshal(pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(newUnmarshaler); ok { - err := u.XXX_Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - if u, ok := pb.(Unmarshaler); ok { - // NOTE: The history of proto have unfortunately been inconsistent - // whether Unmarshaler should or should not implicitly clear itself. - // Some implementations do, most do not. - // Thus, calling this here may or may not do what people want. - // - // See https://github.com/golang/protobuf/issues/424 - err := u.Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - - // Slow workaround for messages that aren't Unmarshalers. - // This includes some hand-coded .pb.go files and - // bootstrap protos. - // TODO: fix all of those and then add Unmarshal to - // the Message interface. Then: - // The cast above and code below can be deleted. - // The old unmarshaler can be deleted. - // Clients can call Unmarshal directly (can already do that, actually). - var info InternalMessageInfo - err := info.Unmarshal(pb, p.buf[p.index:]) - p.index = len(p.buf) - return err -} diff --git a/ui/wasm/vendor/github.com/gogo/protobuf/proto/deprecated.go b/ui/wasm/vendor/github.com/gogo/protobuf/proto/deprecated.go deleted file mode 100644 index 35b882c09aa..00000000000 --- a/ui/wasm/vendor/github.com/gogo/protobuf/proto/deprecated.go +++ /dev/null @@ -1,63 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2018 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import "errors" - -// Deprecated: do not use. -type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } - -// Deprecated: do not use. -func GetStats() Stats { return Stats{} } - -// Deprecated: do not use. -func MarshalMessageSet(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func UnmarshalMessageSet([]byte, interface{}) error { - return errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func MarshalMessageSetJSON(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func UnmarshalMessageSetJSON([]byte, interface{}) error { - return errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func RegisterMessageSetType(Message, int32, string) {} diff --git a/ui/wasm/vendor/github.com/gogo/protobuf/proto/discard.go b/ui/wasm/vendor/github.com/gogo/protobuf/proto/discard.go deleted file mode 100644 index fe1bd7d904e..00000000000 --- a/ui/wasm/vendor/github.com/gogo/protobuf/proto/discard.go +++ /dev/null @@ -1,350 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2017 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" - "strings" - "sync" - "sync/atomic" -) - -type generatedDiscarder interface { - XXX_DiscardUnknown() -} - -// DiscardUnknown recursively discards all unknown fields from this message -// and all embedded messages. -// -// When unmarshaling a message with unrecognized fields, the tags and values -// of such fields are preserved in the Message. This allows a later call to -// marshal to be able to produce a message that continues to have those -// unrecognized fields. To avoid this, DiscardUnknown is used to -// explicitly clear the unknown fields after unmarshaling. -// -// For proto2 messages, the unknown fields of message extensions are only -// discarded from messages that have been accessed via GetExtension. -func DiscardUnknown(m Message) { - if m, ok := m.(generatedDiscarder); ok { - m.XXX_DiscardUnknown() - return - } - // TODO: Dynamically populate a InternalMessageInfo for legacy messages, - // but the master branch has no implementation for InternalMessageInfo, - // so it would be more work to replicate that approach. - discardLegacy(m) -} - -// DiscardUnknown recursively discards all unknown fields. -func (a *InternalMessageInfo) DiscardUnknown(m Message) { - di := atomicLoadDiscardInfo(&a.discard) - if di == nil { - di = getDiscardInfo(reflect.TypeOf(m).Elem()) - atomicStoreDiscardInfo(&a.discard, di) - } - di.discard(toPointer(&m)) -} - -type discardInfo struct { - typ reflect.Type - - initialized int32 // 0: only typ is valid, 1: everything is valid - lock sync.Mutex - - fields []discardFieldInfo - unrecognized field -} - -type discardFieldInfo struct { - field field // Offset of field, guaranteed to be valid - discard func(src pointer) -} - -var ( - discardInfoMap = map[reflect.Type]*discardInfo{} - discardInfoLock sync.Mutex -) - -func getDiscardInfo(t reflect.Type) *discardInfo { - discardInfoLock.Lock() - defer discardInfoLock.Unlock() - di := discardInfoMap[t] - if di == nil { - di = &discardInfo{typ: t} - discardInfoMap[t] = di - } - return di -} - -func (di *discardInfo) discard(src pointer) { - if src.isNil() { - return // Nothing to do. - } - - if atomic.LoadInt32(&di.initialized) == 0 { - di.computeDiscardInfo() - } - - for _, fi := range di.fields { - sfp := src.offset(fi.field) - fi.discard(sfp) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { - // Ignore lock since DiscardUnknown is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - DiscardUnknown(m) - } - } - } - - if di.unrecognized.IsValid() { - *src.offset(di.unrecognized).toBytes() = nil - } -} - -func (di *discardInfo) computeDiscardInfo() { - di.lock.Lock() - defer di.lock.Unlock() - if di.initialized != 0 { - return - } - t := di.typ - n := t.NumField() - - for i := 0; i < n; i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - - dfi := discardFieldInfo{field: toField(&f)} - tf := f.Type - - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) - case isSlice: // E.g., []*pb.T - discardInfo := getDiscardInfo(tf) - dfi.discard = func(src pointer) { - sps := src.getPointerSlice() - for _, sp := range sps { - if !sp.isNil() { - discardInfo.discard(sp) - } - } - } - default: // E.g., *pb.T - discardInfo := getDiscardInfo(tf) - dfi.discard = func(src pointer) { - sp := src.getPointer() - if !sp.isNil() { - discardInfo.discard(sp) - } - } - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) - default: // E.g., map[K]V - if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) - dfi.discard = func(src pointer) { - sm := src.asPointerTo(tf).Elem() - if sm.Len() == 0 { - return - } - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - DiscardUnknown(val.Interface().(Message)) - } - } - } else { - dfi.discard = func(pointer) {} // Noop - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) - default: // E.g., interface{} - // TODO: Make this faster? - dfi.discard = func(src pointer) { - su := src.asPointerTo(tf).Elem() - if !su.IsNil() { - sv := su.Elem().Elem().Field(0) - if sv.Kind() == reflect.Ptr && sv.IsNil() { - return - } - switch sv.Type().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - DiscardUnknown(sv.Interface().(Message)) - } - } - } - } - default: - continue - } - di.fields = append(di.fields, dfi) - } - - di.unrecognized = invalidField - if f, ok := t.FieldByName("XXX_unrecognized"); ok { - if f.Type != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - di.unrecognized = toField(&f) - } - - atomic.StoreInt32(&di.initialized, 1) -} - -func discardLegacy(m Message) { - v := reflect.ValueOf(m) - if v.Kind() != reflect.Ptr || v.IsNil() { - return - } - v = v.Elem() - if v.Kind() != reflect.Struct { - return - } - t := v.Type() - - for i := 0; i < v.NumField(); i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - vf := v.Field(i) - tf := f.Type - - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) - case isSlice: // E.g., []*pb.T - for j := 0; j < vf.Len(); j++ { - discardLegacy(vf.Index(j).Interface().(Message)) - } - default: // E.g., *pb.T - discardLegacy(vf.Interface().(Message)) - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) - default: // E.g., map[K]V - tv := vf.Type().Elem() - if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) - for _, key := range vf.MapKeys() { - val := vf.MapIndex(key) - discardLegacy(val.Interface().(Message)) - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) - default: // E.g., test_proto.isCommunique_Union interface - if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { - vf = vf.Elem() // E.g., *test_proto.Communique_Msg - if !vf.IsNil() { - vf = vf.Elem() // E.g., test_proto.Communique_Msg - vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value - if vf.Kind() == reflect.Ptr { - discardLegacy(vf.Interface().(Message)) - } - } - } - } - } - } - - if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { - if vf.Type() != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - vf.Set(reflect.ValueOf([]byte(nil))) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, err := extendable(m); err == nil { - // Ignore lock since discardLegacy is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - discardLegacy(m) - } - } - } -} diff --git a/ui/wasm/vendor/github.com/gogo/protobuf/proto/duration.go b/ui/wasm/vendor/github.com/gogo/protobuf/proto/duration.go deleted file mode 100644 index 93464c91cff..00000000000 --- a/ui/wasm/vendor/github.com/gogo/protobuf/proto/duration.go +++ /dev/null @@ -1,100 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// This file implements conversions between google.protobuf.Duration -// and time.Duration. - -import ( - "errors" - "fmt" - "time" -) - -const ( - // Range of a Duration in seconds, as specified in - // google/protobuf/duration.proto. This is about 10,000 years in seconds. - maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) - minSeconds = -maxSeconds -) - -// validateDuration determines whether the Duration is valid according to the -// definition in google/protobuf/duration.proto. A valid Duration -// may still be too large to fit into a time.Duration (the range of Duration -// is about 10,000 years, and the range of time.Duration is about 290). -func validateDuration(d *duration) error { - if d == nil { - return errors.New("duration: nil Duration") - } - if d.Seconds < minSeconds || d.Seconds > maxSeconds { - return fmt.Errorf("duration: %#v: seconds out of range", d) - } - if d.Nanos <= -1e9 || d.Nanos >= 1e9 { - return fmt.Errorf("duration: %#v: nanos out of range", d) - } - // Seconds and Nanos must have the same sign, unless d.Nanos is zero. - if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { - return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d) - } - return nil -} - -// DurationFromProto converts a Duration to a time.Duration. DurationFromProto -// returns an error if the Duration is invalid or is too large to be -// represented in a time.Duration. -func durationFromProto(p *duration) (time.Duration, error) { - if err := validateDuration(p); err != nil { - return 0, err - } - d := time.Duration(p.Seconds) * time.Second - if int64(d/time.Second) != p.Seconds { - return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) - } - if p.Nanos != 0 { - d += time.Duration(p.Nanos) - if (d < 0) != (p.Nanos < 0) { - return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) - } - } - return d, nil -} - -// DurationProto converts a time.Duration to a Duration. -func durationProto(d time.Duration) *duration { - nanos := d.Nanoseconds() - secs := nanos / 1e9 - nanos -= secs * 1e9 - return &duration{ - Seconds: secs, - Nanos: int32(nanos), - } -} diff --git a/ui/wasm/vendor/github.com/gogo/protobuf/proto/duration_gogo.go b/ui/wasm/vendor/github.com/gogo/protobuf/proto/duration_gogo.go deleted file mode 100644 index e748e1730e1..00000000000 --- a/ui/wasm/vendor/github.com/gogo/protobuf/proto/duration_gogo.go +++ /dev/null @@ -1,49 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2016, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" - "time" -) - -var durationType = reflect.TypeOf((*time.Duration)(nil)).Elem() - -type duration struct { - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` -} - -func (m *duration) Reset() { *m = duration{} } -func (*duration) ProtoMessage() {} -func (*duration) String() string { return "duration" } - -func init() { - RegisterType((*duration)(nil), "gogo.protobuf.proto.duration") -} diff --git a/ui/wasm/vendor/github.com/gogo/protobuf/proto/encode.go b/ui/wasm/vendor/github.com/gogo/protobuf/proto/encode.go deleted file mode 100644 index 9581ccd3042..00000000000 --- a/ui/wasm/vendor/github.com/gogo/protobuf/proto/encode.go +++ /dev/null @@ -1,205 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "errors" - "reflect" -) - -var ( - // errRepeatedHasNil is the error returned if Marshal is called with - // a struct with a repeated field containing a nil element. - errRepeatedHasNil = errors.New("proto: repeated field has nil element") - - // errOneofHasNil is the error returned if Marshal is called with - // a struct with a oneof field containing a nil element. - errOneofHasNil = errors.New("proto: oneof field has nil value") - - // ErrNil is the error returned if Marshal is called with nil. - ErrNil = errors.New("proto: Marshal called with nil") - - // ErrTooLarge is the error returned if Marshal is called with a - // message that encodes to >2GB. - ErrTooLarge = errors.New("proto: message encodes to over 2 GB") -) - -// The fundamental encoders that put bytes on the wire. -// Those that take integer types all accept uint64 and are -// therefore of type valueEncoder. - -const maxVarintBytes = 10 // maximum length of a varint - -// EncodeVarint returns the varint encoding of x. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -// Not used by the package itself, but helpful to clients -// wishing to use the same encoding. -func EncodeVarint(x uint64) []byte { - var buf [maxVarintBytes]byte - var n int - for n = 0; x > 127; n++ { - buf[n] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - buf[n] = uint8(x) - n++ - return buf[0:n] -} - -// EncodeVarint writes a varint-encoded integer to the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) EncodeVarint(x uint64) error { - for x >= 1<<7 { - p.buf = append(p.buf, uint8(x&0x7f|0x80)) - x >>= 7 - } - p.buf = append(p.buf, uint8(x)) - return nil -} - -// SizeVarint returns the varint encoding size of an integer. -func SizeVarint(x uint64) int { - switch { - case x < 1<<7: - return 1 - case x < 1<<14: - return 2 - case x < 1<<21: - return 3 - case x < 1<<28: - return 4 - case x < 1<<35: - return 5 - case x < 1<<42: - return 6 - case x < 1<<49: - return 7 - case x < 1<<56: - return 8 - case x < 1<<63: - return 9 - } - return 10 -} - -// EncodeFixed64 writes a 64-bit integer to the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) EncodeFixed64(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24), - uint8(x>>32), - uint8(x>>40), - uint8(x>>48), - uint8(x>>56)) - return nil -} - -// EncodeFixed32 writes a 32-bit integer to the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) EncodeFixed32(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24)) - return nil -} - -// EncodeZigzag64 writes a zigzag-encoded 64-bit integer -// to the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) EncodeZigzag64(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} - -// EncodeZigzag32 writes a zigzag-encoded 32-bit integer -// to the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) EncodeZigzag32(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) EncodeRawBytes(b []byte) error { - p.EncodeVarint(uint64(len(b))) - p.buf = append(p.buf, b...) - return nil -} - -// EncodeStringBytes writes an encoded string to the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) EncodeStringBytes(s string) error { - p.EncodeVarint(uint64(len(s))) - p.buf = append(p.buf, s...) - return nil -} - -// Marshaler is the interface representing objects that can marshal themselves. -type Marshaler interface { - Marshal() ([]byte, error) -} - -// EncodeMessage writes the protocol buffer to the Buffer, -// prefixed by a varint-encoded length. -func (p *Buffer) EncodeMessage(pb Message) error { - siz := Size(pb) - sizVar := SizeVarint(uint64(siz)) - p.grow(siz + sizVar) - p.EncodeVarint(uint64(siz)) - return p.Marshal(pb) -} - -// All protocol buffer fields are nillable, but be careful. -func isNil(v reflect.Value) bool { - switch v.Kind() { - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() - } - return false -} diff --git a/ui/wasm/vendor/github.com/gogo/protobuf/proto/encode_gogo.go b/ui/wasm/vendor/github.com/gogo/protobuf/proto/encode_gogo.go deleted file mode 100644 index 0f5fb173e9f..00000000000 --- a/ui/wasm/vendor/github.com/gogo/protobuf/proto/encode_gogo.go +++ /dev/null @@ -1,33 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -func NewRequiredNotSetError(field string) *RequiredNotSetError { - return &RequiredNotSetError{field} -} diff --git a/ui/wasm/vendor/github.com/gogo/protobuf/proto/equal.go b/ui/wasm/vendor/github.com/gogo/protobuf/proto/equal.go deleted file mode 100644 index d4db5a1c145..00000000000 --- a/ui/wasm/vendor/github.com/gogo/protobuf/proto/equal.go +++ /dev/null @@ -1,300 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer comparison. - -package proto - -import ( - "bytes" - "log" - "reflect" - "strings" -) - -/* -Equal returns true iff protocol buffers a and b are equal. -The arguments must both be pointers to protocol buffer structs. - -Equality is defined in this way: - - Two messages are equal iff they are the same type, - corresponding fields are equal, unknown field sets - are equal, and extensions sets are equal. - - Two set scalar fields are equal iff their values are equal. - If the fields are of a floating-point type, remember that - NaN != x for all x, including NaN. If the message is defined - in a proto3 .proto file, fields are not "set"; specifically, - zero length proto3 "bytes" fields are equal (nil == {}). - - Two repeated fields are equal iff their lengths are the same, - and their corresponding elements are equal. Note a "bytes" field, - although represented by []byte, is not a repeated field and the - rule for the scalar fields described above applies. - - Two unset fields are equal. - - Two unknown field sets are equal if their current - encoded state is equal. - - Two extension sets are equal iff they have corresponding - elements that are pairwise equal. - - Two map fields are equal iff their lengths are the same, - and they contain the same set of elements. Zero-length map - fields are equal. - - Every other combination of things are not equal. - -The return value is undefined if a and b are not protocol buffers. -*/ -func Equal(a, b Message) bool { - if a == nil || b == nil { - return a == b - } - v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) - if v1.Type() != v2.Type() { - return false - } - if v1.Kind() == reflect.Ptr { - if v1.IsNil() { - return v2.IsNil() - } - if v2.IsNil() { - return false - } - v1, v2 = v1.Elem(), v2.Elem() - } - if v1.Kind() != reflect.Struct { - return false - } - return equalStruct(v1, v2) -} - -// v1 and v2 are known to have the same type. -func equalStruct(v1, v2 reflect.Value) bool { - sprop := GetProperties(v1.Type()) - for i := 0; i < v1.NumField(); i++ { - f := v1.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - f1, f2 := v1.Field(i), v2.Field(i) - if f.Type.Kind() == reflect.Ptr { - if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { - // both unset - continue - } else if n1 != n2 { - // set/unset mismatch - return false - } - f1, f2 = f1.Elem(), f2.Elem() - } - if !equalAny(f1, f2, sprop.Prop[i]) { - return false - } - } - - if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_InternalExtensions") - if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { - return false - } - } - - if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_extensions") - if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { - return false - } - } - - uf := v1.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return true - } - - u1 := uf.Bytes() - u2 := v2.FieldByName("XXX_unrecognized").Bytes() - return bytes.Equal(u1, u2) -} - -// v1 and v2 are known to have the same type. -// prop may be nil. -func equalAny(v1, v2 reflect.Value, prop *Properties) bool { - if v1.Type() == protoMessageType { - m1, _ := v1.Interface().(Message) - m2, _ := v2.Interface().(Message) - return Equal(m1, m2) - } - switch v1.Kind() { - case reflect.Bool: - return v1.Bool() == v2.Bool() - case reflect.Float32, reflect.Float64: - return v1.Float() == v2.Float() - case reflect.Int32, reflect.Int64: - return v1.Int() == v2.Int() - case reflect.Interface: - // Probably a oneof field; compare the inner values. - n1, n2 := v1.IsNil(), v2.IsNil() - if n1 || n2 { - return n1 == n2 - } - e1, e2 := v1.Elem(), v2.Elem() - if e1.Type() != e2.Type() { - return false - } - return equalAny(e1, e2, nil) - case reflect.Map: - if v1.Len() != v2.Len() { - return false - } - for _, key := range v1.MapKeys() { - val2 := v2.MapIndex(key) - if !val2.IsValid() { - // This key was not found in the second map. - return false - } - if !equalAny(v1.MapIndex(key), val2, nil) { - return false - } - } - return true - case reflect.Ptr: - // Maps may have nil values in them, so check for nil. - if v1.IsNil() && v2.IsNil() { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return equalAny(v1.Elem(), v2.Elem(), prop) - case reflect.Slice: - if v1.Type().Elem().Kind() == reflect.Uint8 { - // short circuit: []byte - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value. - if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) - } - - if v1.Len() != v2.Len() { - return false - } - for i := 0; i < v1.Len(); i++ { - if !equalAny(v1.Index(i), v2.Index(i), prop) { - return false - } - } - return true - case reflect.String: - return v1.Interface().(string) == v2.Interface().(string) - case reflect.Struct: - return equalStruct(v1, v2) - case reflect.Uint32, reflect.Uint64: - return v1.Uint() == v2.Uint() - } - - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to compare %v", v1) - return false -} - -// base is the struct type that the extensions are based on. -// x1 and x2 are InternalExtensions. -func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { - em1, _ := x1.extensionsRead() - em2, _ := x2.extensionsRead() - return equalExtMap(base, em1, em2) -} - -func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { - if len(em1) != len(em2) { - return false - } - - for extNum, e1 := range em1 { - e2, ok := em2[extNum] - if !ok { - return false - } - - m1, m2 := e1.value, e2.value - - if m1 == nil && m2 == nil { - // Both have only encoded form. - if bytes.Equal(e1.enc, e2.enc) { - continue - } - // The bytes are different, but the extensions might still be - // equal. We need to decode them to compare. - } - - if m1 != nil && m2 != nil { - // Both are unencoded. - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - continue - } - - // At least one is encoded. To do a semantically correct comparison - // we need to unmarshal them first. - var desc *ExtensionDesc - if m := extensionMaps[base]; m != nil { - desc = m[extNum] - } - if desc == nil { - // If both have only encoded form and the bytes are the same, - // it is handled above. We get here when the bytes are different. - // We don't know how to decode it, so just compare them as byte - // slices. - log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) - return false - } - var err error - if m1 == nil { - m1, err = decodeExtension(e1.enc, desc) - } - if m2 == nil && err == nil { - m2, err = decodeExtension(e2.enc, desc) - } - if err != nil { - // The encoded form is invalid. - log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) - return false - } - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - } - - return true -} diff --git a/ui/wasm/vendor/github.com/gogo/protobuf/proto/extensions.go b/ui/wasm/vendor/github.com/gogo/protobuf/proto/extensions.go deleted file mode 100644 index 341c6f57f52..00000000000 --- a/ui/wasm/vendor/github.com/gogo/protobuf/proto/extensions.go +++ /dev/null @@ -1,605 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Types and routines for supporting protocol buffer extensions. - */ - -import ( - "errors" - "fmt" - "io" - "reflect" - "strconv" - "sync" -) - -// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. -var ErrMissingExtension = errors.New("proto: missing extension") - -// ExtensionRange represents a range of message extensions for a protocol buffer. -// Used in code generated by the protocol compiler. -type ExtensionRange struct { - Start, End int32 // both inclusive -} - -// extendableProto is an interface implemented by any protocol buffer generated by the current -// proto compiler that may be extended. -type extendableProto interface { - Message - ExtensionRangeArray() []ExtensionRange - extensionsWrite() map[int32]Extension - extensionsRead() (map[int32]Extension, sync.Locker) -} - -// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous -// version of the proto compiler that may be extended. -type extendableProtoV1 interface { - Message - ExtensionRangeArray() []ExtensionRange - ExtensionMap() map[int32]Extension -} - -// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. -type extensionAdapter struct { - extendableProtoV1 -} - -func (e extensionAdapter) extensionsWrite() map[int32]Extension { - return e.ExtensionMap() -} - -func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { - return e.ExtensionMap(), notLocker{} -} - -// notLocker is a sync.Locker whose Lock and Unlock methods are nops. -type notLocker struct{} - -func (n notLocker) Lock() {} -func (n notLocker) Unlock() {} - -// extendable returns the extendableProto interface for the given generated proto message. -// If the proto message has the old extension format, it returns a wrapper that implements -// the extendableProto interface. -func extendable(p interface{}) (extendableProto, error) { - switch p := p.(type) { - case extendableProto: - if isNilPtr(p) { - return nil, fmt.Errorf("proto: nil %T is not extendable", p) - } - return p, nil - case extendableProtoV1: - if isNilPtr(p) { - return nil, fmt.Errorf("proto: nil %T is not extendable", p) - } - return extensionAdapter{p}, nil - case extensionsBytes: - return slowExtensionAdapter{p}, nil - } - // Don't allocate a specific error containing %T: - // this is the hot path for Clone and MarshalText. - return nil, errNotExtendable -} - -var errNotExtendable = errors.New("proto: not an extendable proto.Message") - -func isNilPtr(x interface{}) bool { - v := reflect.ValueOf(x) - return v.Kind() == reflect.Ptr && v.IsNil() -} - -// XXX_InternalExtensions is an internal representation of proto extensions. -// -// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, -// thus gaining the unexported 'extensions' method, which can be called only from the proto package. -// -// The methods of XXX_InternalExtensions are not concurrency safe in general, -// but calls to logically read-only methods such as has and get may be executed concurrently. -type XXX_InternalExtensions struct { - // The struct must be indirect so that if a user inadvertently copies a - // generated message and its embedded XXX_InternalExtensions, they - // avoid the mayhem of a copied mutex. - // - // The mutex serializes all logically read-only operations to p.extensionMap. - // It is up to the client to ensure that write operations to p.extensionMap are - // mutually exclusive with other accesses. - p *struct { - mu sync.Mutex - extensionMap map[int32]Extension - } -} - -// extensionsWrite returns the extension map, creating it on first use. -func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { - if e.p == nil { - e.p = new(struct { - mu sync.Mutex - extensionMap map[int32]Extension - }) - e.p.extensionMap = make(map[int32]Extension) - } - return e.p.extensionMap -} - -// extensionsRead returns the extensions map for read-only use. It may be nil. -// The caller must hold the returned mutex's lock when accessing Elements within the map. -func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { - if e.p == nil { - return nil, nil - } - return e.p.extensionMap, &e.p.mu -} - -// ExtensionDesc represents an extension specification. -// Used in generated code from the protocol compiler. -type ExtensionDesc struct { - ExtendedType Message // nil pointer to the type that is being extended - ExtensionType interface{} // nil pointer to the extension type - Field int32 // field number - Name string // fully-qualified name of extension, for text formatting - Tag string // protobuf tag style - Filename string // name of the file in which the extension is defined -} - -func (ed *ExtensionDesc) repeated() bool { - t := reflect.TypeOf(ed.ExtensionType) - return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 -} - -// Extension represents an extension in a message. -type Extension struct { - // When an extension is stored in a message using SetExtension - // only desc and value are set. When the message is marshaled - // enc will be set to the encoded form of the message. - // - // When a message is unmarshaled and contains extensions, each - // extension will have only enc set. When such an extension is - // accessed using GetExtension (or GetExtensions) desc and value - // will be set. - desc *ExtensionDesc - value interface{} - enc []byte -} - -// SetRawExtension is for testing only. -func SetRawExtension(base Message, id int32, b []byte) { - if ebase, ok := base.(extensionsBytes); ok { - clearExtension(base, id) - ext := ebase.GetExtensions() - *ext = append(*ext, b...) - return - } - epb, err := extendable(base) - if err != nil { - return - } - extmap := epb.extensionsWrite() - extmap[id] = Extension{enc: b} -} - -// isExtensionField returns true iff the given field number is in an extension range. -func isExtensionField(pb extendableProto, field int32) bool { - for _, er := range pb.ExtensionRangeArray() { - if er.Start <= field && field <= er.End { - return true - } - } - return false -} - -// checkExtensionTypes checks that the given extension is valid for pb. -func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { - var pbi interface{} = pb - // Check the extended type. - if ea, ok := pbi.(extensionAdapter); ok { - pbi = ea.extendableProtoV1 - } - if ea, ok := pbi.(slowExtensionAdapter); ok { - pbi = ea.extensionsBytes - } - if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { - return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) - } - // Check the range. - if !isExtensionField(pb, extension.Field) { - return errors.New("proto: bad extension number; not in declared ranges") - } - return nil -} - -// extPropKey is sufficient to uniquely identify an extension. -type extPropKey struct { - base reflect.Type - field int32 -} - -var extProp = struct { - sync.RWMutex - m map[extPropKey]*Properties -}{ - m: make(map[extPropKey]*Properties), -} - -func extensionProperties(ed *ExtensionDesc) *Properties { - key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} - - extProp.RLock() - if prop, ok := extProp.m[key]; ok { - extProp.RUnlock() - return prop - } - extProp.RUnlock() - - extProp.Lock() - defer extProp.Unlock() - // Check again. - if prop, ok := extProp.m[key]; ok { - return prop - } - - prop := new(Properties) - prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) - extProp.m[key] = prop - return prop -} - -// HasExtension returns whether the given extension is present in pb. -func HasExtension(pb Message, extension *ExtensionDesc) bool { - if epb, doki := pb.(extensionsBytes); doki { - ext := epb.GetExtensions() - buf := *ext - o := 0 - for o < len(buf) { - tag, n := DecodeVarint(buf[o:]) - fieldNum := int32(tag >> 3) - if int32(fieldNum) == extension.Field { - return true - } - wireType := int(tag & 0x7) - o += n - l, err := size(buf[o:], wireType) - if err != nil { - return false - } - o += l - } - return false - } - // TODO: Check types, field numbers, etc.? - epb, err := extendable(pb) - if err != nil { - return false - } - extmap, mu := epb.extensionsRead() - if extmap == nil { - return false - } - mu.Lock() - _, ok := extmap[extension.Field] - mu.Unlock() - return ok -} - -// ClearExtension removes the given extension from pb. -func ClearExtension(pb Message, extension *ExtensionDesc) { - clearExtension(pb, extension.Field) -} - -func clearExtension(pb Message, fieldNum int32) { - if epb, ok := pb.(extensionsBytes); ok { - offset := 0 - for offset != -1 { - offset = deleteExtension(epb, fieldNum, offset) - } - return - } - epb, err := extendable(pb) - if err != nil { - return - } - // TODO: Check types, field numbers, etc.? - extmap := epb.extensionsWrite() - delete(extmap, fieldNum) -} - -// GetExtension retrieves a proto2 extended field from pb. -// -// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), -// then GetExtension parses the encoded field and returns a Go value of the specified type. -// If the field is not present, then the default value is returned (if one is specified), -// otherwise ErrMissingExtension is reported. -// -// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), -// then GetExtension returns the raw encoded bytes of the field extension. -func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { - if epb, doki := pb.(extensionsBytes); doki { - ext := epb.GetExtensions() - return decodeExtensionFromBytes(extension, *ext) - } - - epb, err := extendable(pb) - if err != nil { - return nil, err - } - - if extension.ExtendedType != nil { - // can only check type if this is a complete descriptor - if cerr := checkExtensionTypes(epb, extension); cerr != nil { - return nil, cerr - } - } - - emap, mu := epb.extensionsRead() - if emap == nil { - return defaultExtensionValue(extension) - } - mu.Lock() - defer mu.Unlock() - e, ok := emap[extension.Field] - if !ok { - // defaultExtensionValue returns the default value or - // ErrMissingExtension if there is no default. - return defaultExtensionValue(extension) - } - - if e.value != nil { - // Already decoded. Check the descriptor, though. - if e.desc != extension { - // This shouldn't happen. If it does, it means that - // GetExtension was called twice with two different - // descriptors with the same field number. - return nil, errors.New("proto: descriptor conflict") - } - return e.value, nil - } - - if extension.ExtensionType == nil { - // incomplete descriptor - return e.enc, nil - } - - v, err := decodeExtension(e.enc, extension) - if err != nil { - return nil, err - } - - // Remember the decoded version and drop the encoded version. - // That way it is safe to mutate what we return. - e.value = v - e.desc = extension - e.enc = nil - emap[extension.Field] = e - return e.value, nil -} - -// defaultExtensionValue returns the default value for extension. -// If no default for an extension is defined ErrMissingExtension is returned. -func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { - if extension.ExtensionType == nil { - // incomplete descriptor, so no default - return nil, ErrMissingExtension - } - - t := reflect.TypeOf(extension.ExtensionType) - props := extensionProperties(extension) - - sf, _, err := fieldDefault(t, props) - if err != nil { - return nil, err - } - - if sf == nil || sf.value == nil { - // There is no default value. - return nil, ErrMissingExtension - } - - if t.Kind() != reflect.Ptr { - // We do not need to return a Ptr, we can directly return sf.value. - return sf.value, nil - } - - // We need to return an interface{} that is a pointer to sf.value. - value := reflect.New(t).Elem() - value.Set(reflect.New(value.Type().Elem())) - if sf.kind == reflect.Int32 { - // We may have an int32 or an enum, but the underlying data is int32. - // Since we can't set an int32 into a non int32 reflect.value directly - // set it as a int32. - value.Elem().SetInt(int64(sf.value.(int32))) - } else { - value.Elem().Set(reflect.ValueOf(sf.value)) - } - return value.Interface(), nil -} - -// decodeExtension decodes an extension encoded in b. -func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - t := reflect.TypeOf(extension.ExtensionType) - unmarshal := typeUnmarshaler(t, extension.Tag) - - // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate space to store the pointer/slice. - value := reflect.New(t).Elem() - - var err error - for { - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - wire := int(x) & 7 - - b, err = unmarshal(b, valToPointer(value.Addr()), wire) - if err != nil { - return nil, err - } - - if len(b) == 0 { - break - } - } - return value.Interface(), nil -} - -// GetExtensions returns a slice of the extensions present in pb that are also listed in es. -// The returned slice has the same length as es; missing extensions will appear as nil elements. -func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - extensions = make([]interface{}, len(es)) - for i, e := range es { - extensions[i], err = GetExtension(epb, e) - if err == ErrMissingExtension { - err = nil - } - if err != nil { - return - } - } - return -} - -// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. -// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing -// just the Field field, which defines the extension's field number. -func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - registeredExtensions := RegisteredExtensions(pb) - - emap, mu := epb.extensionsRead() - if emap == nil { - return nil, nil - } - mu.Lock() - defer mu.Unlock() - extensions := make([]*ExtensionDesc, 0, len(emap)) - for extid, e := range emap { - desc := e.desc - if desc == nil { - desc = registeredExtensions[extid] - if desc == nil { - desc = &ExtensionDesc{Field: extid} - } - } - - extensions = append(extensions, desc) - } - return extensions, nil -} - -// SetExtension sets the specified extension of pb to the specified value. -func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { - if epb, ok := pb.(extensionsBytes); ok { - ClearExtension(pb, extension) - newb, err := encodeExtension(extension, value) - if err != nil { - return err - } - bb := epb.GetExtensions() - *bb = append(*bb, newb...) - return nil - } - epb, err := extendable(pb) - if err != nil { - return err - } - if err := checkExtensionTypes(epb, extension); err != nil { - return err - } - typ := reflect.TypeOf(extension.ExtensionType) - if typ != reflect.TypeOf(value) { - return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) - } - // nil extension values need to be caught early, because the - // encoder can't distinguish an ErrNil due to a nil extension - // from an ErrNil due to a missing field. Extensions are - // always optional, so the encoder would just swallow the error - // and drop all the extensions from the encoded message. - if reflect.ValueOf(value).IsNil() { - return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) - } - - extmap := epb.extensionsWrite() - extmap[extension.Field] = Extension{desc: extension, value: value} - return nil -} - -// ClearAllExtensions clears all extensions from pb. -func ClearAllExtensions(pb Message) { - if epb, doki := pb.(extensionsBytes); doki { - ext := epb.GetExtensions() - *ext = []byte{} - return - } - epb, err := extendable(pb) - if err != nil { - return - } - m := epb.extensionsWrite() - for k := range m { - delete(m, k) - } -} - -// A global registry of extensions. -// The generated code will register the generated descriptors by calling RegisterExtension. - -var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) - -// RegisterExtension is called from the generated code. -func RegisterExtension(desc *ExtensionDesc) { - st := reflect.TypeOf(desc.ExtendedType).Elem() - m := extensionMaps[st] - if m == nil { - m = make(map[int32]*ExtensionDesc) - extensionMaps[st] = m - } - if _, ok := m[desc.Field]; ok { - panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) - } - m[desc.Field] = desc -} - -// RegisteredExtensions returns a map of the registered extensions of a -// protocol buffer struct, indexed by the extension number. -// The argument pb should be a nil pointer to the struct type. -func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { - return extensionMaps[reflect.TypeOf(pb).Elem()] -} diff --git a/ui/wasm/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/ui/wasm/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go deleted file mode 100644 index 6f1ae120ece..00000000000 --- a/ui/wasm/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go +++ /dev/null @@ -1,389 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "bytes" - "errors" - "fmt" - "io" - "reflect" - "sort" - "strings" - "sync" -) - -type extensionsBytes interface { - Message - ExtensionRangeArray() []ExtensionRange - GetExtensions() *[]byte -} - -type slowExtensionAdapter struct { - extensionsBytes -} - -func (s slowExtensionAdapter) extensionsWrite() map[int32]Extension { - panic("Please report a bug to github.com/gogo/protobuf if you see this message: Writing extensions is not supported for extensions stored in a byte slice field.") -} - -func (s slowExtensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { - b := s.GetExtensions() - m, err := BytesToExtensionsMap(*b) - if err != nil { - panic(err) - } - return m, notLocker{} -} - -func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool { - if reflect.ValueOf(pb).IsNil() { - return ifnotset - } - value, err := GetExtension(pb, extension) - if err != nil { - return ifnotset - } - if value == nil { - return ifnotset - } - if value.(*bool) == nil { - return ifnotset - } - return *(value.(*bool)) -} - -func (this *Extension) Equal(that *Extension) bool { - if err := this.Encode(); err != nil { - return false - } - if err := that.Encode(); err != nil { - return false - } - return bytes.Equal(this.enc, that.enc) -} - -func (this *Extension) Compare(that *Extension) int { - if err := this.Encode(); err != nil { - return 1 - } - if err := that.Encode(); err != nil { - return -1 - } - return bytes.Compare(this.enc, that.enc) -} - -func SizeOfInternalExtension(m extendableProto) (n int) { - info := getMarshalInfo(reflect.TypeOf(m)) - return info.sizeV1Extensions(m.extensionsWrite()) -} - -type sortableMapElem struct { - field int32 - ext Extension -} - -func newSortableExtensionsFromMap(m map[int32]Extension) sortableExtensions { - s := make(sortableExtensions, 0, len(m)) - for k, v := range m { - s = append(s, &sortableMapElem{field: k, ext: v}) - } - return s -} - -type sortableExtensions []*sortableMapElem - -func (this sortableExtensions) Len() int { return len(this) } - -func (this sortableExtensions) Swap(i, j int) { this[i], this[j] = this[j], this[i] } - -func (this sortableExtensions) Less(i, j int) bool { return this[i].field < this[j].field } - -func (this sortableExtensions) String() string { - sort.Sort(this) - ss := make([]string, len(this)) - for i := range this { - ss[i] = fmt.Sprintf("%d: %v", this[i].field, this[i].ext) - } - return "map[" + strings.Join(ss, ",") + "]" -} - -func StringFromInternalExtension(m extendableProto) string { - return StringFromExtensionsMap(m.extensionsWrite()) -} - -func StringFromExtensionsMap(m map[int32]Extension) string { - return newSortableExtensionsFromMap(m).String() -} - -func StringFromExtensionsBytes(ext []byte) string { - m, err := BytesToExtensionsMap(ext) - if err != nil { - panic(err) - } - return StringFromExtensionsMap(m) -} - -func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error) { - return EncodeExtensionMap(m.extensionsWrite(), data) -} - -func EncodeInternalExtensionBackwards(m extendableProto, data []byte) (n int, err error) { - return EncodeExtensionMapBackwards(m.extensionsWrite(), data) -} - -func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) { - o := 0 - for _, e := range m { - if err := e.Encode(); err != nil { - return 0, err - } - n := copy(data[o:], e.enc) - if n != len(e.enc) { - return 0, io.ErrShortBuffer - } - o += n - } - return o, nil -} - -func EncodeExtensionMapBackwards(m map[int32]Extension, data []byte) (n int, err error) { - o := 0 - end := len(data) - for _, e := range m { - if err := e.Encode(); err != nil { - return 0, err - } - n := copy(data[end-len(e.enc):], e.enc) - if n != len(e.enc) { - return 0, io.ErrShortBuffer - } - end -= n - o += n - } - return o, nil -} - -func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) { - e := m[id] - if err := e.Encode(); err != nil { - return nil, err - } - return e.enc, nil -} - -func size(buf []byte, wire int) (int, error) { - switch wire { - case WireVarint: - _, n := DecodeVarint(buf) - return n, nil - case WireFixed64: - return 8, nil - case WireBytes: - v, n := DecodeVarint(buf) - return int(v) + n, nil - case WireFixed32: - return 4, nil - case WireStartGroup: - offset := 0 - for { - u, n := DecodeVarint(buf[offset:]) - fwire := int(u & 0x7) - offset += n - if fwire == WireEndGroup { - return offset, nil - } - s, err := size(buf[offset:], wire) - if err != nil { - return 0, err - } - offset += s - } - } - return 0, fmt.Errorf("proto: can't get size for unknown wire type %d", wire) -} - -func BytesToExtensionsMap(buf []byte) (map[int32]Extension, error) { - m := make(map[int32]Extension) - i := 0 - for i < len(buf) { - tag, n := DecodeVarint(buf[i:]) - if n <= 0 { - return nil, fmt.Errorf("unable to decode varint") - } - fieldNum := int32(tag >> 3) - wireType := int(tag & 0x7) - l, err := size(buf[i+n:], wireType) - if err != nil { - return nil, err - } - end := i + int(l) + n - m[int32(fieldNum)] = Extension{enc: buf[i:end]} - i = end - } - return m, nil -} - -func NewExtension(e []byte) Extension { - ee := Extension{enc: make([]byte, len(e))} - copy(ee.enc, e) - return ee -} - -func AppendExtension(e Message, tag int32, buf []byte) { - if ee, eok := e.(extensionsBytes); eok { - ext := ee.GetExtensions() - *ext = append(*ext, buf...) - return - } - if ee, eok := e.(extendableProto); eok { - m := ee.extensionsWrite() - ext := m[int32(tag)] // may be missing - ext.enc = append(ext.enc, buf...) - m[int32(tag)] = ext - } -} - -func encodeExtension(extension *ExtensionDesc, value interface{}) ([]byte, error) { - u := getMarshalInfo(reflect.TypeOf(extension.ExtendedType)) - ei := u.getExtElemInfo(extension) - v := value - p := toAddrPointer(&v, ei.isptr) - siz := ei.sizer(p, SizeVarint(ei.wiretag)) - buf := make([]byte, 0, siz) - return ei.marshaler(buf, p, ei.wiretag, false) -} - -func decodeExtensionFromBytes(extension *ExtensionDesc, buf []byte) (interface{}, error) { - o := 0 - for o < len(buf) { - tag, n := DecodeVarint((buf)[o:]) - fieldNum := int32(tag >> 3) - wireType := int(tag & 0x7) - if o+n > len(buf) { - return nil, fmt.Errorf("unable to decode extension") - } - l, err := size((buf)[o+n:], wireType) - if err != nil { - return nil, err - } - if int32(fieldNum) == extension.Field { - if o+n+l > len(buf) { - return nil, fmt.Errorf("unable to decode extension") - } - v, err := decodeExtension((buf)[o:o+n+l], extension) - if err != nil { - return nil, err - } - return v, nil - } - o += n + l - } - return defaultExtensionValue(extension) -} - -func (this *Extension) Encode() error { - if this.enc == nil { - var err error - this.enc, err = encodeExtension(this.desc, this.value) - if err != nil { - return err - } - } - return nil -} - -func (this Extension) GoString() string { - if err := this.Encode(); err != nil { - return fmt.Sprintf("error encoding extension: %v", err) - } - return fmt.Sprintf("proto.NewExtension(%#v)", this.enc) -} - -func SetUnsafeExtension(pb Message, fieldNum int32, value interface{}) error { - typ := reflect.TypeOf(pb).Elem() - ext, ok := extensionMaps[typ] - if !ok { - return fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) - } - desc, ok := ext[fieldNum] - if !ok { - return errors.New("proto: bad extension number; not in declared ranges") - } - return SetExtension(pb, desc, value) -} - -func GetUnsafeExtension(pb Message, fieldNum int32) (interface{}, error) { - typ := reflect.TypeOf(pb).Elem() - ext, ok := extensionMaps[typ] - if !ok { - return nil, fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) - } - desc, ok := ext[fieldNum] - if !ok { - return nil, fmt.Errorf("unregistered field number %d", fieldNum) - } - return GetExtension(pb, desc) -} - -func NewUnsafeXXX_InternalExtensions(m map[int32]Extension) XXX_InternalExtensions { - x := &XXX_InternalExtensions{ - p: new(struct { - mu sync.Mutex - extensionMap map[int32]Extension - }), - } - x.p.extensionMap = m - return *x -} - -func GetUnsafeExtensionsMap(extendable Message) map[int32]Extension { - pb := extendable.(extendableProto) - return pb.extensionsWrite() -} - -func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int { - ext := pb.GetExtensions() - for offset < len(*ext) { - tag, n1 := DecodeVarint((*ext)[offset:]) - fieldNum := int32(tag >> 3) - wireType := int(tag & 0x7) - n2, err := size((*ext)[offset+n1:], wireType) - if err != nil { - panic(err) - } - newOffset := offset + n1 + n2 - if fieldNum == theFieldNum { - *ext = append((*ext)[:offset], (*ext)[newOffset:]...) - return offset - } - offset = newOffset - } - return -1 -} diff --git a/ui/wasm/vendor/github.com/gogo/protobuf/proto/lib.go b/ui/wasm/vendor/github.com/gogo/protobuf/proto/lib.go deleted file mode 100644 index 80db1c155b5..00000000000 --- a/ui/wasm/vendor/github.com/gogo/protobuf/proto/lib.go +++ /dev/null @@ -1,973 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package proto converts data structures to and from the wire format of -protocol buffers. It works in concert with the Go source code generated -for .proto files by the protocol compiler. - -A summary of the properties of the protocol buffer interface -for a protocol buffer variable v: - - - Names are turned from camel_case to CamelCase for export. - - There are no methods on v to set fields; just treat - them as structure fields. - - There are getters that return a field's value if set, - and return the field's default value if unset. - The getters work even if the receiver is a nil message. - - The zero value for a struct is its correct initialization state. - All desired fields must be set before marshaling. - - A Reset() method will restore a protobuf struct to its zero state. - - Non-repeated fields are pointers to the values; nil means unset. - That is, optional or required field int32 f becomes F *int32. - - Repeated fields are slices. - - Helper functions are available to aid the setting of fields. - msg.Foo = proto.String("hello") // set field - - Constants are defined to hold the default values of all fields that - have them. They have the form Default_StructName_FieldName. - Because the getter methods handle defaulted values, - direct use of these constants should be rare. - - Enums are given type names and maps from names to values. - Enum values are prefixed by the enclosing message's name, or by the - enum's type name if it is a top-level enum. Enum types have a String - method, and a Enum method to assist in message construction. - - Nested messages, groups and enums have type names prefixed with the name of - the surrounding message type. - - Extensions are given descriptor names that start with E_, - followed by an underscore-delimited list of the nested messages - that contain it (if any) followed by the CamelCased name of the - extension field itself. HasExtension, ClearExtension, GetExtension - and SetExtension are functions for manipulating extensions. - - Oneof field sets are given a single field in their message, - with distinguished wrapper types for each possible field value. - - Marshal and Unmarshal are functions to encode and decode the wire format. - -When the .proto file specifies `syntax="proto3"`, there are some differences: - - - Non-repeated fields of non-message type are values instead of pointers. - - Enum types do not get an Enum method. - -The simplest way to describe this is to see an example. -Given file test.proto, containing - - package example; - - enum FOO { X = 17; } - - message Test { - required string label = 1; - optional int32 type = 2 [default=77]; - repeated int64 reps = 3; - optional group OptionalGroup = 4 { - required string RequiredField = 5; - } - oneof union { - int32 number = 6; - string name = 7; - } - } - -The resulting file, test.pb.go, is: - - package example - - import proto "github.com/gogo/protobuf/proto" - import math "math" - - type FOO int32 - const ( - FOO_X FOO = 17 - ) - var FOO_name = map[int32]string{ - 17: "X", - } - var FOO_value = map[string]int32{ - "X": 17, - } - - func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p - } - func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) - } - func (x *FOO) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FOO_value, data) - if err != nil { - return err - } - *x = FOO(value) - return nil - } - - type Test struct { - Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` - Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` - Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` - Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` - // Types that are valid to be assigned to Union: - // *Test_Number - // *Test_Name - Union isTest_Union `protobuf_oneof:"union"` - XXX_unrecognized []byte `json:"-"` - } - func (m *Test) Reset() { *m = Test{} } - func (m *Test) String() string { return proto.CompactTextString(m) } - func (*Test) ProtoMessage() {} - - type isTest_Union interface { - isTest_Union() - } - - type Test_Number struct { - Number int32 `protobuf:"varint,6,opt,name=number"` - } - type Test_Name struct { - Name string `protobuf:"bytes,7,opt,name=name"` - } - - func (*Test_Number) isTest_Union() {} - func (*Test_Name) isTest_Union() {} - - func (m *Test) GetUnion() isTest_Union { - if m != nil { - return m.Union - } - return nil - } - const Default_Test_Type int32 = 77 - - func (m *Test) GetLabel() string { - if m != nil && m.Label != nil { - return *m.Label - } - return "" - } - - func (m *Test) GetType() int32 { - if m != nil && m.Type != nil { - return *m.Type - } - return Default_Test_Type - } - - func (m *Test) GetOptionalgroup() *Test_OptionalGroup { - if m != nil { - return m.Optionalgroup - } - return nil - } - - type Test_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` - } - func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } - func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } - - func (m *Test_OptionalGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" - } - - func (m *Test) GetNumber() int32 { - if x, ok := m.GetUnion().(*Test_Number); ok { - return x.Number - } - return 0 - } - - func (m *Test) GetName() string { - if x, ok := m.GetUnion().(*Test_Name); ok { - return x.Name - } - return "" - } - - func init() { - proto.RegisterEnum("example.FOO", FOO_name, FOO_value) - } - -To create and play with a Test object: - - package main - - import ( - "log" - - "github.com/gogo/protobuf/proto" - pb "./example.pb" - ) - - func main() { - test := &pb.Test{ - Label: proto.String("hello"), - Type: proto.Int32(17), - Reps: []int64{1, 2, 3}, - Optionalgroup: &pb.Test_OptionalGroup{ - RequiredField: proto.String("good bye"), - }, - Union: &pb.Test_Name{"fred"}, - } - data, err := proto.Marshal(test) - if err != nil { - log.Fatal("marshaling error: ", err) - } - newTest := &pb.Test{} - err = proto.Unmarshal(data, newTest) - if err != nil { - log.Fatal("unmarshaling error: ", err) - } - // Now test and newTest contain the same data. - if test.GetLabel() != newTest.GetLabel() { - log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) - } - // Use a type switch to determine which oneof was set. - switch u := test.Union.(type) { - case *pb.Test_Number: // u.Number contains the number. - case *pb.Test_Name: // u.Name contains the string. - } - // etc. - } -*/ -package proto - -import ( - "encoding/json" - "fmt" - "log" - "reflect" - "sort" - "strconv" - "sync" -) - -// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. -// Marshal reports this when a required field is not initialized. -// Unmarshal reports this when a required field is missing from the wire data. -type RequiredNotSetError struct{ field string } - -func (e *RequiredNotSetError) Error() string { - if e.field == "" { - return fmt.Sprintf("proto: required field not set") - } - return fmt.Sprintf("proto: required field %q not set", e.field) -} -func (e *RequiredNotSetError) RequiredNotSet() bool { - return true -} - -type invalidUTF8Error struct{ field string } - -func (e *invalidUTF8Error) Error() string { - if e.field == "" { - return "proto: invalid UTF-8 detected" - } - return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field) -} -func (e *invalidUTF8Error) InvalidUTF8() bool { - return true -} - -// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8. -// This error should not be exposed to the external API as such errors should -// be recreated with the field information. -var errInvalidUTF8 = &invalidUTF8Error{} - -// isNonFatal reports whether the error is either a RequiredNotSet error -// or a InvalidUTF8 error. -func isNonFatal(err error) bool { - if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() { - return true - } - if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() { - return true - } - return false -} - -type nonFatal struct{ E error } - -// Merge merges err into nf and reports whether it was successful. -// Otherwise it returns false for any fatal non-nil errors. -func (nf *nonFatal) Merge(err error) (ok bool) { - if err == nil { - return true // not an error - } - if !isNonFatal(err) { - return false // fatal error - } - if nf.E == nil { - nf.E = err // store first instance of non-fatal error - } - return true -} - -// Message is implemented by generated protocol buffer messages. -type Message interface { - Reset() - String() string - ProtoMessage() -} - -// A Buffer is a buffer manager for marshaling and unmarshaling -// protocol buffers. It may be reused between invocations to -// reduce memory usage. It is not necessary to use a Buffer; -// the global functions Marshal and Unmarshal create a -// temporary Buffer and are fine for most applications. -type Buffer struct { - buf []byte // encode/decode byte stream - index int // read point - - deterministic bool -} - -// NewBuffer allocates a new Buffer and initializes its internal data to -// the contents of the argument slice. -func NewBuffer(e []byte) *Buffer { - return &Buffer{buf: e} -} - -// Reset resets the Buffer, ready for marshaling a new protocol buffer. -func (p *Buffer) Reset() { - p.buf = p.buf[0:0] // for reading/writing - p.index = 0 // for reading -} - -// SetBuf replaces the internal buffer with the slice, -// ready for unmarshaling the contents of the slice. -func (p *Buffer) SetBuf(s []byte) { - p.buf = s - p.index = 0 -} - -// Bytes returns the contents of the Buffer. -func (p *Buffer) Bytes() []byte { return p.buf } - -// SetDeterministic sets whether to use deterministic serialization. -// -// Deterministic serialization guarantees that for a given binary, equal -// messages will always be serialized to the same bytes. This implies: -// -// - Repeated serialization of a message will return the same bytes. -// - Different processes of the same binary (which may be executing on -// different machines) will serialize equal messages to the same bytes. -// -// Note that the deterministic serialization is NOT canonical across -// languages. It is not guaranteed to remain stable over time. It is unstable -// across different builds with schema changes due to unknown fields. -// Users who need canonical serialization (e.g., persistent storage in a -// canonical form, fingerprinting, etc.) should define their own -// canonicalization specification and implement their own serializer rather -// than relying on this API. -// -// If deterministic serialization is requested, map entries will be sorted -// by keys in lexographical order. This is an implementation detail and -// subject to change. -func (p *Buffer) SetDeterministic(deterministic bool) { - p.deterministic = deterministic -} - -/* - * Helper routines for simplifying the creation of optional fields of basic type. - */ - -// Bool is a helper routine that allocates a new bool value -// to store v and returns a pointer to it. -func Bool(v bool) *bool { - return &v -} - -// Int32 is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it. -func Int32(v int32) *int32 { - return &v -} - -// Int is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it, but unlike Int32 -// its argument value is an int. -func Int(v int) *int32 { - p := new(int32) - *p = int32(v) - return p -} - -// Int64 is a helper routine that allocates a new int64 value -// to store v and returns a pointer to it. -func Int64(v int64) *int64 { - return &v -} - -// Float32 is a helper routine that allocates a new float32 value -// to store v and returns a pointer to it. -func Float32(v float32) *float32 { - return &v -} - -// Float64 is a helper routine that allocates a new float64 value -// to store v and returns a pointer to it. -func Float64(v float64) *float64 { - return &v -} - -// Uint32 is a helper routine that allocates a new uint32 value -// to store v and returns a pointer to it. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint64 is a helper routine that allocates a new uint64 value -// to store v and returns a pointer to it. -func Uint64(v uint64) *uint64 { - return &v -} - -// String is a helper routine that allocates a new string value -// to store v and returns a pointer to it. -func String(v string) *string { - return &v -} - -// EnumName is a helper function to simplify printing protocol buffer enums -// by name. Given an enum map and a value, it returns a useful string. -func EnumName(m map[int32]string, v int32) string { - s, ok := m[v] - if ok { - return s - } - return strconv.Itoa(int(v)) -} - -// UnmarshalJSONEnum is a helper function to simplify recovering enum int values -// from their JSON-encoded representation. Given a map from the enum's symbolic -// names to its int values, and a byte buffer containing the JSON-encoded -// value, it returns an int32 that can be cast to the enum type by the caller. -// -// The function can deal with both JSON representations, numeric and symbolic. -func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { - if data[0] == '"' { - // New style: enums are strings. - var repr string - if err := json.Unmarshal(data, &repr); err != nil { - return -1, err - } - val, ok := m[repr] - if !ok { - return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) - } - return val, nil - } - // Old style: enums are ints. - var val int32 - if err := json.Unmarshal(data, &val); err != nil { - return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) - } - return val, nil -} - -// DebugPrint dumps the encoded data in b in a debugging format with a header -// including the string s. Used in testing but made available for general debugging. -func (p *Buffer) DebugPrint(s string, b []byte) { - var u uint64 - - obuf := p.buf - sindex := p.index - p.buf = b - p.index = 0 - depth := 0 - - fmt.Printf("\n--- %s ---\n", s) - -out: - for { - for i := 0; i < depth; i++ { - fmt.Print(" ") - } - - index := p.index - if index == len(p.buf) { - break - } - - op, err := p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: fetching op err %v\n", index, err) - break out - } - tag := op >> 3 - wire := op & 7 - - switch wire { - default: - fmt.Printf("%3d: t=%3d unknown wire=%d\n", - index, tag, wire) - break out - - case WireBytes: - var r []byte - - r, err = p.DecodeRawBytes(false) - if err != nil { - break out - } - fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) - if len(r) <= 6 { - for i := 0; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } else { - for i := 0; i < 3; i++ { - fmt.Printf(" %.2x", r[i]) - } - fmt.Printf(" ..") - for i := len(r) - 3; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } - fmt.Printf("\n") - - case WireFixed32: - u, err = p.DecodeFixed32() - if err != nil { - fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) - - case WireFixed64: - u, err = p.DecodeFixed64() - if err != nil { - fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) - - case WireVarint: - u, err = p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) - - case WireStartGroup: - fmt.Printf("%3d: t=%3d start\n", index, tag) - depth++ - - case WireEndGroup: - depth-- - fmt.Printf("%3d: t=%3d end\n", index, tag) - } - } - - if depth != 0 { - fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) - } - fmt.Printf("\n") - - p.buf = obuf - p.index = sindex -} - -// SetDefaults sets unset protocol buffer fields to their default values. -// It only modifies fields that are both unset and have defined defaults. -// It recursively sets default values in any non-nil sub-messages. -func SetDefaults(pb Message) { - setDefaults(reflect.ValueOf(pb), true, false) -} - -// v is a struct. -func setDefaults(v reflect.Value, recur, zeros bool) { - if v.Kind() == reflect.Ptr { - v = v.Elem() - } - - defaultMu.RLock() - dm, ok := defaults[v.Type()] - defaultMu.RUnlock() - if !ok { - dm = buildDefaultMessage(v.Type()) - defaultMu.Lock() - defaults[v.Type()] = dm - defaultMu.Unlock() - } - - for _, sf := range dm.scalars { - f := v.Field(sf.index) - if !f.IsNil() { - // field already set - continue - } - dv := sf.value - if dv == nil && !zeros { - // no explicit default, and don't want to set zeros - continue - } - fptr := f.Addr().Interface() // **T - // TODO: Consider batching the allocations we do here. - switch sf.kind { - case reflect.Bool: - b := new(bool) - if dv != nil { - *b = dv.(bool) - } - *(fptr.(**bool)) = b - case reflect.Float32: - f := new(float32) - if dv != nil { - *f = dv.(float32) - } - *(fptr.(**float32)) = f - case reflect.Float64: - f := new(float64) - if dv != nil { - *f = dv.(float64) - } - *(fptr.(**float64)) = f - case reflect.Int32: - // might be an enum - if ft := f.Type(); ft != int32PtrType { - // enum - f.Set(reflect.New(ft.Elem())) - if dv != nil { - f.Elem().SetInt(int64(dv.(int32))) - } - } else { - // int32 field - i := new(int32) - if dv != nil { - *i = dv.(int32) - } - *(fptr.(**int32)) = i - } - case reflect.Int64: - i := new(int64) - if dv != nil { - *i = dv.(int64) - } - *(fptr.(**int64)) = i - case reflect.String: - s := new(string) - if dv != nil { - *s = dv.(string) - } - *(fptr.(**string)) = s - case reflect.Uint8: - // exceptional case: []byte - var b []byte - if dv != nil { - db := dv.([]byte) - b = make([]byte, len(db)) - copy(b, db) - } else { - b = []byte{} - } - *(fptr.(*[]byte)) = b - case reflect.Uint32: - u := new(uint32) - if dv != nil { - *u = dv.(uint32) - } - *(fptr.(**uint32)) = u - case reflect.Uint64: - u := new(uint64) - if dv != nil { - *u = dv.(uint64) - } - *(fptr.(**uint64)) = u - default: - log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) - } - } - - for _, ni := range dm.nested { - f := v.Field(ni) - // f is *T or T or []*T or []T - switch f.Kind() { - case reflect.Struct: - setDefaults(f, recur, zeros) - - case reflect.Ptr: - if f.IsNil() { - continue - } - setDefaults(f, recur, zeros) - - case reflect.Slice: - for i := 0; i < f.Len(); i++ { - e := f.Index(i) - if e.Kind() == reflect.Ptr && e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - - case reflect.Map: - for _, k := range f.MapKeys() { - e := f.MapIndex(k) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - } - } -} - -var ( - // defaults maps a protocol buffer struct type to a slice of the fields, - // with its scalar fields set to their proto-declared non-zero default values. - defaultMu sync.RWMutex - defaults = make(map[reflect.Type]defaultMessage) - - int32PtrType = reflect.TypeOf((*int32)(nil)) -) - -// defaultMessage represents information about the default values of a message. -type defaultMessage struct { - scalars []scalarField - nested []int // struct field index of nested messages -} - -type scalarField struct { - index int // struct field index - kind reflect.Kind // element type (the T in *T or []T) - value interface{} // the proto-declared default value, or nil -} - -// t is a struct type. -func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { - sprop := GetProperties(t) - for _, prop := range sprop.Prop { - fi, ok := sprop.decoderTags.get(prop.Tag) - if !ok { - // XXX_unrecognized - continue - } - ft := t.Field(fi).Type - - sf, nested, err := fieldDefault(ft, prop) - switch { - case err != nil: - log.Print(err) - case nested: - dm.nested = append(dm.nested, fi) - case sf != nil: - sf.index = fi - dm.scalars = append(dm.scalars, *sf) - } - } - - return dm -} - -// fieldDefault returns the scalarField for field type ft. -// sf will be nil if the field can not have a default. -// nestedMessage will be true if this is a nested message. -// Note that sf.index is not set on return. -func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { - var canHaveDefault bool - switch ft.Kind() { - case reflect.Struct: - nestedMessage = true // non-nullable - - case reflect.Ptr: - if ft.Elem().Kind() == reflect.Struct { - nestedMessage = true - } else { - canHaveDefault = true // proto2 scalar field - } - - case reflect.Slice: - switch ft.Elem().Kind() { - case reflect.Ptr, reflect.Struct: - nestedMessage = true // repeated message - case reflect.Uint8: - canHaveDefault = true // bytes field - } - - case reflect.Map: - if ft.Elem().Kind() == reflect.Ptr { - nestedMessage = true // map with message values - } - } - - if !canHaveDefault { - if nestedMessage { - return nil, true, nil - } - return nil, false, nil - } - - // We now know that ft is a pointer or slice. - sf = &scalarField{kind: ft.Elem().Kind()} - - // scalar fields without defaults - if !prop.HasDefault { - return sf, false, nil - } - - // a scalar field: either *T or []byte - switch ft.Elem().Kind() { - case reflect.Bool: - x, err := strconv.ParseBool(prop.Default) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Float32: - x, err := strconv.ParseFloat(prop.Default, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) - } - sf.value = float32(x) - case reflect.Float64: - x, err := strconv.ParseFloat(prop.Default, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Int32: - x, err := strconv.ParseInt(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) - } - sf.value = int32(x) - case reflect.Int64: - x, err := strconv.ParseInt(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.String: - sf.value = prop.Default - case reflect.Uint8: - // []byte (not *uint8) - sf.value = []byte(prop.Default) - case reflect.Uint32: - x, err := strconv.ParseUint(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) - } - sf.value = uint32(x) - case reflect.Uint64: - x, err := strconv.ParseUint(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) - } - sf.value = x - default: - return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) - } - - return sf, false, nil -} - -// mapKeys returns a sort.Interface to be used for sorting the map keys. -// Map fields may have key types of non-float scalars, strings and enums. -func mapKeys(vs []reflect.Value) sort.Interface { - s := mapKeySorter{vs: vs} - - // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. - if len(vs) == 0 { - return s - } - switch vs[0].Kind() { - case reflect.Int32, reflect.Int64: - s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } - case reflect.Uint32, reflect.Uint64: - s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } - case reflect.Bool: - s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true - case reflect.String: - s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } - default: - panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) - } - - return s -} - -type mapKeySorter struct { - vs []reflect.Value - less func(a, b reflect.Value) bool -} - -func (s mapKeySorter) Len() int { return len(s.vs) } -func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } -func (s mapKeySorter) Less(i, j int) bool { - return s.less(s.vs[i], s.vs[j]) -} - -// isProto3Zero reports whether v is a zero proto3 value. -func isProto3Zero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return !v.Bool() - case reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint32, reflect.Uint64: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.String: - return v.String() == "" - } - return false -} - -const ( - // ProtoPackageIsVersion3 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - GoGoProtoPackageIsVersion3 = true - - // ProtoPackageIsVersion2 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - GoGoProtoPackageIsVersion2 = true - - // ProtoPackageIsVersion1 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - GoGoProtoPackageIsVersion1 = true -) - -// InternalMessageInfo is a type used internally by generated .pb.go files. -// This type is not intended to be used by non-generated code. -// This type is not subject to any compatibility guarantee. -type InternalMessageInfo struct { - marshal *marshalInfo - unmarshal *unmarshalInfo - merge *mergeInfo - discard *discardInfo -} diff --git a/ui/wasm/vendor/github.com/gogo/protobuf/proto/lib_gogo.go b/ui/wasm/vendor/github.com/gogo/protobuf/proto/lib_gogo.go deleted file mode 100644 index b3aa39190a1..00000000000 --- a/ui/wasm/vendor/github.com/gogo/protobuf/proto/lib_gogo.go +++ /dev/null @@ -1,50 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "encoding/json" - "strconv" -) - -type Sizer interface { - Size() int -} - -type ProtoSizer interface { - ProtoSize() int -} - -func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) { - s, ok := m[value] - if !ok { - s = strconv.Itoa(int(value)) - } - return json.Marshal(s) -} diff --git a/ui/wasm/vendor/github.com/gogo/protobuf/proto/message_set.go b/ui/wasm/vendor/github.com/gogo/protobuf/proto/message_set.go deleted file mode 100644 index f48a756761e..00000000000 --- a/ui/wasm/vendor/github.com/gogo/protobuf/proto/message_set.go +++ /dev/null @@ -1,181 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Support for message sets. - */ - -import ( - "errors" -) - -// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. -// A message type ID is required for storing a protocol buffer in a message set. -var errNoMessageTypeID = errors.New("proto does not have a message type ID") - -// The first two types (_MessageSet_Item and messageSet) -// model what the protocol compiler produces for the following protocol message: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } -// That is the MessageSet wire format. We can't use a proto to generate these -// because that would introduce a circular dependency between it and this package. - -type _MessageSet_Item struct { - TypeId *int32 `protobuf:"varint,2,req,name=type_id"` - Message []byte `protobuf:"bytes,3,req,name=message"` -} - -type messageSet struct { - Item []*_MessageSet_Item `protobuf:"group,1,rep"` - XXX_unrecognized []byte - // TODO: caching? -} - -// Make sure messageSet is a Message. -var _ Message = (*messageSet)(nil) - -// messageTypeIder is an interface satisfied by a protocol buffer type -// that may be stored in a MessageSet. -type messageTypeIder interface { - MessageTypeId() int32 -} - -func (ms *messageSet) find(pb Message) *_MessageSet_Item { - mti, ok := pb.(messageTypeIder) - if !ok { - return nil - } - id := mti.MessageTypeId() - for _, item := range ms.Item { - if *item.TypeId == id { - return item - } - } - return nil -} - -func (ms *messageSet) Has(pb Message) bool { - return ms.find(pb) != nil -} - -func (ms *messageSet) Unmarshal(pb Message) error { - if item := ms.find(pb); item != nil { - return Unmarshal(item.Message, pb) - } - if _, ok := pb.(messageTypeIder); !ok { - return errNoMessageTypeID - } - return nil // TODO: return error instead? -} - -func (ms *messageSet) Marshal(pb Message) error { - msg, err := Marshal(pb) - if err != nil { - return err - } - if item := ms.find(pb); item != nil { - // reuse existing item - item.Message = msg - return nil - } - - mti, ok := pb.(messageTypeIder) - if !ok { - return errNoMessageTypeID - } - - mtid := mti.MessageTypeId() - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: &mtid, - Message: msg, - }) - return nil -} - -func (ms *messageSet) Reset() { *ms = messageSet{} } -func (ms *messageSet) String() string { return CompactTextString(ms) } -func (*messageSet) ProtoMessage() {} - -// Support for the message_set_wire_format message option. - -func skipVarint(buf []byte) []byte { - i := 0 - for ; buf[i]&0x80 != 0; i++ { - } - return buf[i+1:] -} - -// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func unmarshalMessageSet(buf []byte, exts interface{}) error { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - m = exts.extensionsWrite() - case map[int32]Extension: - m = exts - default: - return errors.New("proto: not an extension map") - } - - ms := new(messageSet) - if err := Unmarshal(buf, ms); err != nil { - return err - } - for _, item := range ms.Item { - id := *item.TypeId - msg := item.Message - - // Restore wire type and field number varint, plus length varint. - // Be careful to preserve duplicate items. - b := EncodeVarint(uint64(id)<<3 | WireBytes) - if ext, ok := m[id]; ok { - // Existing data; rip off the tag and length varint - // so we join the new data correctly. - // We can assume that ext.enc is set because we are unmarshaling. - o := ext.enc[len(b):] // skip wire type and field number - _, n := DecodeVarint(o) // calculate length of length varint - o = o[n:] // skip length varint - msg = append(o, msg...) // join old data and new data - } - b = append(b, EncodeVarint(uint64(len(msg)))...) - b = append(b, msg...) - - m[id] = Extension{enc: b} - } - return nil -} diff --git a/ui/wasm/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go b/ui/wasm/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go deleted file mode 100644 index b6cad90834b..00000000000 --- a/ui/wasm/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go +++ /dev/null @@ -1,357 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build purego appengine js - -// This file contains an implementation of proto field accesses using package reflect. -// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can -// be used on App Engine. - -package proto - -import ( - "reflect" - "sync" -) - -const unsafeAllowed = false - -// A field identifies a field in a struct, accessible from a pointer. -// In this implementation, a field is identified by the sequence of field indices -// passed to reflect's FieldByIndex. -type field []int - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return f.Index -} - -// invalidField is an invalid field identifier. -var invalidField = field(nil) - -// zeroField is a noop when calling pointer.offset. -var zeroField = field([]int{}) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { return f != nil } - -// The pointer type is for the table-driven decoder. -// The implementation here uses a reflect.Value of pointer type to -// create a generic pointer. In pointer_unsafe.go we use unsafe -// instead of reflect to implement the same (but faster) interface. -type pointer struct { - v reflect.Value -} - -// toPointer converts an interface of pointer type to a pointer -// that points to the same target. -func toPointer(i *Message) pointer { - return pointer{v: reflect.ValueOf(*i)} -} - -// toAddrPointer converts an interface to a pointer that points to -// the interface data. -func toAddrPointer(i *interface{}, isptr bool) pointer { - v := reflect.ValueOf(*i) - u := reflect.New(v.Type()) - u.Elem().Set(v) - return pointer{v: u} -} - -// valToPointer converts v to a pointer. v must be of pointer type. -func valToPointer(v reflect.Value) pointer { - return pointer{v: v} -} - -// offset converts from a pointer to a structure to a pointer to -// one of its fields. -func (p pointer) offset(f field) pointer { - return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} -} - -func (p pointer) isNil() bool { - return p.v.IsNil() -} - -// grow updates the slice s in place to make it one element longer. -// s must be addressable. -// Returns the (addressable) new element. -func grow(s reflect.Value) reflect.Value { - n, m := s.Len(), s.Cap() - if n < m { - s.SetLen(n + 1) - } else { - s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) - } - return s.Index(n) -} - -func (p pointer) toInt64() *int64 { - return p.v.Interface().(*int64) -} -func (p pointer) toInt64Ptr() **int64 { - return p.v.Interface().(**int64) -} -func (p pointer) toInt64Slice() *[]int64 { - return p.v.Interface().(*[]int64) -} - -var int32ptr = reflect.TypeOf((*int32)(nil)) - -func (p pointer) toInt32() *int32 { - return p.v.Convert(int32ptr).Interface().(*int32) -} - -// The toInt32Ptr/Slice methods don't work because of enums. -// Instead, we must use set/get methods for the int32ptr/slice case. -/* - func (p pointer) toInt32Ptr() **int32 { - return p.v.Interface().(**int32) -} - func (p pointer) toInt32Slice() *[]int32 { - return p.v.Interface().(*[]int32) -} -*/ -func (p pointer) getInt32Ptr() *int32 { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - return p.v.Elem().Interface().(*int32) - } - // an enum - return p.v.Elem().Convert(int32PtrType).Interface().(*int32) -} -func (p pointer) setInt32Ptr(v int32) { - // Allocate value in a *int32. Possibly convert that to a *enum. - // Then assign it to a **int32 or **enum. - // Note: we can convert *int32 to *enum, but we can't convert - // **int32 to **enum! - p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) -} - -// getInt32Slice copies []int32 from p as a new slice. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) getInt32Slice() []int32 { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - return p.v.Elem().Interface().([]int32) - } - // an enum - // Allocate a []int32, then assign []enum's values into it. - // Note: we can't convert []enum to []int32. - slice := p.v.Elem() - s := make([]int32, slice.Len()) - for i := 0; i < slice.Len(); i++ { - s[i] = int32(slice.Index(i).Int()) - } - return s -} - -// setInt32Slice copies []int32 into p as a new slice. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) setInt32Slice(v []int32) { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - p.v.Elem().Set(reflect.ValueOf(v)) - return - } - // an enum - // Allocate a []enum, then assign []int32's values into it. - // Note: we can't convert []enum to []int32. - slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) - for i, x := range v { - slice.Index(i).SetInt(int64(x)) - } - p.v.Elem().Set(slice) -} -func (p pointer) appendInt32Slice(v int32) { - grow(p.v.Elem()).SetInt(int64(v)) -} - -func (p pointer) toUint64() *uint64 { - return p.v.Interface().(*uint64) -} -func (p pointer) toUint64Ptr() **uint64 { - return p.v.Interface().(**uint64) -} -func (p pointer) toUint64Slice() *[]uint64 { - return p.v.Interface().(*[]uint64) -} -func (p pointer) toUint32() *uint32 { - return p.v.Interface().(*uint32) -} -func (p pointer) toUint32Ptr() **uint32 { - return p.v.Interface().(**uint32) -} -func (p pointer) toUint32Slice() *[]uint32 { - return p.v.Interface().(*[]uint32) -} -func (p pointer) toBool() *bool { - return p.v.Interface().(*bool) -} -func (p pointer) toBoolPtr() **bool { - return p.v.Interface().(**bool) -} -func (p pointer) toBoolSlice() *[]bool { - return p.v.Interface().(*[]bool) -} -func (p pointer) toFloat64() *float64 { - return p.v.Interface().(*float64) -} -func (p pointer) toFloat64Ptr() **float64 { - return p.v.Interface().(**float64) -} -func (p pointer) toFloat64Slice() *[]float64 { - return p.v.Interface().(*[]float64) -} -func (p pointer) toFloat32() *float32 { - return p.v.Interface().(*float32) -} -func (p pointer) toFloat32Ptr() **float32 { - return p.v.Interface().(**float32) -} -func (p pointer) toFloat32Slice() *[]float32 { - return p.v.Interface().(*[]float32) -} -func (p pointer) toString() *string { - return p.v.Interface().(*string) -} -func (p pointer) toStringPtr() **string { - return p.v.Interface().(**string) -} -func (p pointer) toStringSlice() *[]string { - return p.v.Interface().(*[]string) -} -func (p pointer) toBytes() *[]byte { - return p.v.Interface().(*[]byte) -} -func (p pointer) toBytesSlice() *[][]byte { - return p.v.Interface().(*[][]byte) -} -func (p pointer) toExtensions() *XXX_InternalExtensions { - return p.v.Interface().(*XXX_InternalExtensions) -} -func (p pointer) toOldExtensions() *map[int32]Extension { - return p.v.Interface().(*map[int32]Extension) -} -func (p pointer) getPointer() pointer { - return pointer{v: p.v.Elem()} -} -func (p pointer) setPointer(q pointer) { - p.v.Elem().Set(q.v) -} -func (p pointer) appendPointer(q pointer) { - grow(p.v.Elem()).Set(q.v) -} - -// getPointerSlice copies []*T from p as a new []pointer. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) getPointerSlice() []pointer { - if p.v.IsNil() { - return nil - } - n := p.v.Elem().Len() - s := make([]pointer, n) - for i := 0; i < n; i++ { - s[i] = pointer{v: p.v.Elem().Index(i)} - } - return s -} - -// setPointerSlice copies []pointer into p as a new []*T. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) setPointerSlice(v []pointer) { - if v == nil { - p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) - return - } - s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) - for _, p := range v { - s = reflect.Append(s, p.v) - } - p.v.Elem().Set(s) -} - -// getInterfacePointer returns a pointer that points to the -// interface data of the interface pointed by p. -func (p pointer) getInterfacePointer() pointer { - if p.v.Elem().IsNil() { - return pointer{v: p.v.Elem()} - } - return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct -} - -func (p pointer) asPointerTo(t reflect.Type) reflect.Value { - // TODO: check that p.v.Type().Elem() == t? - return p.v -} - -func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} - -var atomicLock sync.Mutex diff --git a/ui/wasm/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go b/ui/wasm/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go deleted file mode 100644 index 7ffd3c29d90..00000000000 --- a/ui/wasm/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go +++ /dev/null @@ -1,59 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build purego appengine js - -// This file contains an implementation of proto field accesses using package reflect. -// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can -// be used on App Engine. - -package proto - -import ( - "reflect" -) - -// TODO: untested, so probably incorrect. - -func (p pointer) getRef() pointer { - return pointer{v: p.v.Addr()} -} - -func (p pointer) appendRef(v pointer, typ reflect.Type) { - slice := p.getSlice(typ) - elem := v.asPointerTo(typ).Elem() - newSlice := reflect.Append(slice, elem) - slice.Set(newSlice) -} - -func (p pointer) getSlice(typ reflect.Type) reflect.Value { - sliceTyp := reflect.SliceOf(typ) - slice := p.asPointerTo(sliceTyp) - slice = slice.Elem() - return slice -} diff --git a/ui/wasm/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go b/ui/wasm/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go deleted file mode 100644 index d55a335d945..00000000000 --- a/ui/wasm/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go +++ /dev/null @@ -1,308 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !purego,!appengine,!js - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "sync/atomic" - "unsafe" -) - -const unsafeAllowed = true - -// A field identifies a field in a struct, accessible from a pointer. -// In this implementation, a field is identified by its byte offset from the start of the struct. -type field uintptr - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return field(f.Offset) -} - -// invalidField is an invalid field identifier. -const invalidField = ^field(0) - -// zeroField is a noop when calling pointer.offset. -const zeroField = field(0) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { - return f != invalidField -} - -// The pointer type below is for the new table-driven encoder/decoder. -// The implementation here uses unsafe.Pointer to create a generic pointer. -// In pointer_reflect.go we use reflect instead of unsafe to implement -// the same (but slower) interface. -type pointer struct { - p unsafe.Pointer -} - -// size of pointer -var ptrSize = unsafe.Sizeof(uintptr(0)) - -// toPointer converts an interface of pointer type to a pointer -// that points to the same target. -func toPointer(i *Message) pointer { - // Super-tricky - read pointer out of data word of interface value. - // Saves ~25ns over the equivalent: - // return valToPointer(reflect.ValueOf(*i)) - return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} -} - -// toAddrPointer converts an interface to a pointer that points to -// the interface data. -func toAddrPointer(i *interface{}, isptr bool) pointer { - // Super-tricky - read or get the address of data word of interface value. - if isptr { - // The interface is of pointer type, thus it is a direct interface. - // The data word is the pointer data itself. We take its address. - return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} - } - // The interface is not of pointer type. The data word is the pointer - // to the data. - return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} -} - -// valToPointer converts v to a pointer. v must be of pointer type. -func valToPointer(v reflect.Value) pointer { - return pointer{p: unsafe.Pointer(v.Pointer())} -} - -// offset converts from a pointer to a structure to a pointer to -// one of its fields. -func (p pointer) offset(f field) pointer { - // For safety, we should panic if !f.IsValid, however calling panic causes - // this to no longer be inlineable, which is a serious performance cost. - /* - if !f.IsValid() { - panic("invalid field") - } - */ - return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} -} - -func (p pointer) isNil() bool { - return p.p == nil -} - -func (p pointer) toInt64() *int64 { - return (*int64)(p.p) -} -func (p pointer) toInt64Ptr() **int64 { - return (**int64)(p.p) -} -func (p pointer) toInt64Slice() *[]int64 { - return (*[]int64)(p.p) -} -func (p pointer) toInt32() *int32 { - return (*int32)(p.p) -} - -// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. -/* - func (p pointer) toInt32Ptr() **int32 { - return (**int32)(p.p) - } - func (p pointer) toInt32Slice() *[]int32 { - return (*[]int32)(p.p) - } -*/ -func (p pointer) getInt32Ptr() *int32 { - return *(**int32)(p.p) -} -func (p pointer) setInt32Ptr(v int32) { - *(**int32)(p.p) = &v -} - -// getInt32Slice loads a []int32 from p. -// The value returned is aliased with the original slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) getInt32Slice() []int32 { - return *(*[]int32)(p.p) -} - -// setInt32Slice stores a []int32 to p. -// The value set is aliased with the input slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) setInt32Slice(v []int32) { - *(*[]int32)(p.p) = v -} - -// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? -func (p pointer) appendInt32Slice(v int32) { - s := (*[]int32)(p.p) - *s = append(*s, v) -} - -func (p pointer) toUint64() *uint64 { - return (*uint64)(p.p) -} -func (p pointer) toUint64Ptr() **uint64 { - return (**uint64)(p.p) -} -func (p pointer) toUint64Slice() *[]uint64 { - return (*[]uint64)(p.p) -} -func (p pointer) toUint32() *uint32 { - return (*uint32)(p.p) -} -func (p pointer) toUint32Ptr() **uint32 { - return (**uint32)(p.p) -} -func (p pointer) toUint32Slice() *[]uint32 { - return (*[]uint32)(p.p) -} -func (p pointer) toBool() *bool { - return (*bool)(p.p) -} -func (p pointer) toBoolPtr() **bool { - return (**bool)(p.p) -} -func (p pointer) toBoolSlice() *[]bool { - return (*[]bool)(p.p) -} -func (p pointer) toFloat64() *float64 { - return (*float64)(p.p) -} -func (p pointer) toFloat64Ptr() **float64 { - return (**float64)(p.p) -} -func (p pointer) toFloat64Slice() *[]float64 { - return (*[]float64)(p.p) -} -func (p pointer) toFloat32() *float32 { - return (*float32)(p.p) -} -func (p pointer) toFloat32Ptr() **float32 { - return (**float32)(p.p) -} -func (p pointer) toFloat32Slice() *[]float32 { - return (*[]float32)(p.p) -} -func (p pointer) toString() *string { - return (*string)(p.p) -} -func (p pointer) toStringPtr() **string { - return (**string)(p.p) -} -func (p pointer) toStringSlice() *[]string { - return (*[]string)(p.p) -} -func (p pointer) toBytes() *[]byte { - return (*[]byte)(p.p) -} -func (p pointer) toBytesSlice() *[][]byte { - return (*[][]byte)(p.p) -} -func (p pointer) toExtensions() *XXX_InternalExtensions { - return (*XXX_InternalExtensions)(p.p) -} -func (p pointer) toOldExtensions() *map[int32]Extension { - return (*map[int32]Extension)(p.p) -} - -// getPointerSlice loads []*T from p as a []pointer. -// The value returned is aliased with the original slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) getPointerSlice() []pointer { - // Super-tricky - p should point to a []*T where T is a - // message type. We load it as []pointer. - return *(*[]pointer)(p.p) -} - -// setPointerSlice stores []pointer into p as a []*T. -// The value set is aliased with the input slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) setPointerSlice(v []pointer) { - // Super-tricky - p should point to a []*T where T is a - // message type. We store it as []pointer. - *(*[]pointer)(p.p) = v -} - -// getPointer loads the pointer at p and returns it. -func (p pointer) getPointer() pointer { - return pointer{p: *(*unsafe.Pointer)(p.p)} -} - -// setPointer stores the pointer q at p. -func (p pointer) setPointer(q pointer) { - *(*unsafe.Pointer)(p.p) = q.p -} - -// append q to the slice pointed to by p. -func (p pointer) appendPointer(q pointer) { - s := (*[]unsafe.Pointer)(p.p) - *s = append(*s, q.p) -} - -// getInterfacePointer returns a pointer that points to the -// interface data of the interface pointed by p. -func (p pointer) getInterfacePointer() pointer { - // Super-tricky - read pointer out of data word of interface value. - return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} -} - -// asPointerTo returns a reflect.Value that is a pointer to an -// object of type t stored at p. -func (p pointer) asPointerTo(t reflect.Type) reflect.Value { - return reflect.NewAt(t, p.p) -} - -func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { - return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { - return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { - return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { - return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} diff --git a/ui/wasm/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/ui/wasm/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go deleted file mode 100644 index aca8eed02a1..00000000000 --- a/ui/wasm/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go +++ /dev/null @@ -1,56 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !purego,!appengine,!js - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "unsafe" -) - -func (p pointer) getRef() pointer { - return pointer{p: (unsafe.Pointer)(&p.p)} -} - -func (p pointer) appendRef(v pointer, typ reflect.Type) { - slice := p.getSlice(typ) - elem := v.asPointerTo(typ).Elem() - newSlice := reflect.Append(slice, elem) - slice.Set(newSlice) -} - -func (p pointer) getSlice(typ reflect.Type) reflect.Value { - sliceTyp := reflect.SliceOf(typ) - slice := p.asPointerTo(sliceTyp) - slice = slice.Elem() - return slice -} diff --git a/ui/wasm/vendor/github.com/gogo/protobuf/proto/properties.go b/ui/wasm/vendor/github.com/gogo/protobuf/proto/properties.go deleted file mode 100644 index 28da1475fb3..00000000000 --- a/ui/wasm/vendor/github.com/gogo/protobuf/proto/properties.go +++ /dev/null @@ -1,610 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "fmt" - "log" - "reflect" - "sort" - "strconv" - "strings" - "sync" -) - -const debug bool = false - -// Constants that identify the encoding of a value on the wire. -const ( - WireVarint = 0 - WireFixed64 = 1 - WireBytes = 2 - WireStartGroup = 3 - WireEndGroup = 4 - WireFixed32 = 5 -) - -// tagMap is an optimization over map[int]int for typical protocol buffer -// use-cases. Encoded protocol buffers are often in tag order with small tag -// numbers. -type tagMap struct { - fastTags []int - slowTags map[int]int -} - -// tagMapFastLimit is the upper bound on the tag number that will be stored in -// the tagMap slice rather than its map. -const tagMapFastLimit = 1024 - -func (p *tagMap) get(t int) (int, bool) { - if t > 0 && t < tagMapFastLimit { - if t >= len(p.fastTags) { - return 0, false - } - fi := p.fastTags[t] - return fi, fi >= 0 - } - fi, ok := p.slowTags[t] - return fi, ok -} - -func (p *tagMap) put(t int, fi int) { - if t > 0 && t < tagMapFastLimit { - for len(p.fastTags) < t+1 { - p.fastTags = append(p.fastTags, -1) - } - p.fastTags[t] = fi - return - } - if p.slowTags == nil { - p.slowTags = make(map[int]int) - } - p.slowTags[t] = fi -} - -// StructProperties represents properties for all the fields of a struct. -// decoderTags and decoderOrigNames should only be used by the decoder. -type StructProperties struct { - Prop []*Properties // properties for each field - reqCount int // required count - decoderTags tagMap // map from proto tag to struct field number - decoderOrigNames map[string]int // map from original name to struct field number - order []int // list of struct field numbers in tag order - - // OneofTypes contains information about the oneof fields in this message. - // It is keyed by the original name of a field. - OneofTypes map[string]*OneofProperties -} - -// OneofProperties represents information about a specific field in a oneof. -type OneofProperties struct { - Type reflect.Type // pointer to generated struct type for this oneof field - Field int // struct field number of the containing oneof in the message - Prop *Properties -} - -// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. -// See encode.go, (*Buffer).enc_struct. - -func (sp *StructProperties) Len() int { return len(sp.order) } -func (sp *StructProperties) Less(i, j int) bool { - return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag -} -func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } - -// Properties represents the protocol-specific behavior of a single struct field. -type Properties struct { - Name string // name of the field, for error messages - OrigName string // original name before protocol compiler (always set) - JSONName string // name to use for JSON; determined by protoc - Wire string - WireType int - Tag int - Required bool - Optional bool - Repeated bool - Packed bool // relevant for repeated primitives only - Enum string // set for enum types only - proto3 bool // whether this is known to be a proto3 field - oneof bool // whether this is a oneof field - - Default string // default value - HasDefault bool // whether an explicit default was provided - CustomType string - CastType string - StdTime bool - StdDuration bool - WktPointer bool - - stype reflect.Type // set for struct types only - ctype reflect.Type // set for custom types only - sprop *StructProperties // set for struct types only - - mtype reflect.Type // set for map types only - MapKeyProp *Properties // set for map types only - MapValProp *Properties // set for map types only -} - -// String formats the properties in the protobuf struct field tag style. -func (p *Properties) String() string { - s := p.Wire - s += "," - s += strconv.Itoa(p.Tag) - if p.Required { - s += ",req" - } - if p.Optional { - s += ",opt" - } - if p.Repeated { - s += ",rep" - } - if p.Packed { - s += ",packed" - } - s += ",name=" + p.OrigName - if p.JSONName != p.OrigName { - s += ",json=" + p.JSONName - } - if p.proto3 { - s += ",proto3" - } - if p.oneof { - s += ",oneof" - } - if len(p.Enum) > 0 { - s += ",enum=" + p.Enum - } - if p.HasDefault { - s += ",def=" + p.Default - } - return s -} - -// Parse populates p by parsing a string in the protobuf struct field tag style. -func (p *Properties) Parse(s string) { - // "bytes,49,opt,name=foo,def=hello!" - fields := strings.Split(s, ",") // breaks def=, but handled below. - if len(fields) < 2 { - log.Printf("proto: tag has too few fields: %q", s) - return - } - - p.Wire = fields[0] - switch p.Wire { - case "varint": - p.WireType = WireVarint - case "fixed32": - p.WireType = WireFixed32 - case "fixed64": - p.WireType = WireFixed64 - case "zigzag32": - p.WireType = WireVarint - case "zigzag64": - p.WireType = WireVarint - case "bytes", "group": - p.WireType = WireBytes - // no numeric converter for non-numeric types - default: - log.Printf("proto: tag has unknown wire type: %q", s) - return - } - - var err error - p.Tag, err = strconv.Atoi(fields[1]) - if err != nil { - return - } - -outer: - for i := 2; i < len(fields); i++ { - f := fields[i] - switch { - case f == "req": - p.Required = true - case f == "opt": - p.Optional = true - case f == "rep": - p.Repeated = true - case f == "packed": - p.Packed = true - case strings.HasPrefix(f, "name="): - p.OrigName = f[5:] - case strings.HasPrefix(f, "json="): - p.JSONName = f[5:] - case strings.HasPrefix(f, "enum="): - p.Enum = f[5:] - case f == "proto3": - p.proto3 = true - case f == "oneof": - p.oneof = true - case strings.HasPrefix(f, "def="): - p.HasDefault = true - p.Default = f[4:] // rest of string - if i+1 < len(fields) { - // Commas aren't escaped, and def is always last. - p.Default += "," + strings.Join(fields[i+1:], ",") - break outer - } - case strings.HasPrefix(f, "embedded="): - p.OrigName = strings.Split(f, "=")[1] - case strings.HasPrefix(f, "customtype="): - p.CustomType = strings.Split(f, "=")[1] - case strings.HasPrefix(f, "casttype="): - p.CastType = strings.Split(f, "=")[1] - case f == "stdtime": - p.StdTime = true - case f == "stdduration": - p.StdDuration = true - case f == "wktptr": - p.WktPointer = true - } - } -} - -var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() - -// setFieldProps initializes the field properties for submessages and maps. -func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - isMap := typ.Kind() == reflect.Map - if len(p.CustomType) > 0 && !isMap { - p.ctype = typ - p.setTag(lockGetProp) - return - } - if p.StdTime && !isMap { - p.setTag(lockGetProp) - return - } - if p.StdDuration && !isMap { - p.setTag(lockGetProp) - return - } - if p.WktPointer && !isMap { - p.setTag(lockGetProp) - return - } - switch t1 := typ; t1.Kind() { - case reflect.Struct: - p.stype = typ - case reflect.Ptr: - if t1.Elem().Kind() == reflect.Struct { - p.stype = t1.Elem() - } - case reflect.Slice: - switch t2 := t1.Elem(); t2.Kind() { - case reflect.Ptr: - switch t3 := t2.Elem(); t3.Kind() { - case reflect.Struct: - p.stype = t3 - } - case reflect.Struct: - p.stype = t2 - } - - case reflect.Map: - - p.mtype = t1 - p.MapKeyProp = &Properties{} - p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) - p.MapValProp = &Properties{} - vtype := p.mtype.Elem() - if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { - // The value type is not a message (*T) or bytes ([]byte), - // so we need encoders for the pointer to this type. - vtype = reflect.PtrTo(vtype) - } - - p.MapValProp.CustomType = p.CustomType - p.MapValProp.StdDuration = p.StdDuration - p.MapValProp.StdTime = p.StdTime - p.MapValProp.WktPointer = p.WktPointer - p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) - } - p.setTag(lockGetProp) -} - -func (p *Properties) setTag(lockGetProp bool) { - if p.stype != nil { - if lockGetProp { - p.sprop = GetProperties(p.stype) - } else { - p.sprop = getPropertiesLocked(p.stype) - } - } -} - -var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() -) - -// Init populates the properties from a protocol buffer struct tag. -func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { - p.init(typ, name, tag, f, true) -} - -func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { - // "bytes,49,opt,def=hello!" - p.Name = name - p.OrigName = name - if tag == "" { - return - } - p.Parse(tag) - p.setFieldProps(typ, f, lockGetProp) -} - -var ( - propertiesMu sync.RWMutex - propertiesMap = make(map[reflect.Type]*StructProperties) -) - -// GetProperties returns the list of properties for the type represented by t. -// t must represent a generated struct type of a protocol message. -func GetProperties(t reflect.Type) *StructProperties { - if t.Kind() != reflect.Struct { - panic("proto: type must have kind struct") - } - - // Most calls to GetProperties in a long-running program will be - // retrieving details for types we have seen before. - propertiesMu.RLock() - sprop, ok := propertiesMap[t] - propertiesMu.RUnlock() - if ok { - return sprop - } - - propertiesMu.Lock() - sprop = getPropertiesLocked(t) - propertiesMu.Unlock() - return sprop -} - -type ( - oneofFuncsIface interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) - } - oneofWrappersIface interface { - XXX_OneofWrappers() []interface{} - } -) - -// getPropertiesLocked requires that propertiesMu is held. -func getPropertiesLocked(t reflect.Type) *StructProperties { - if prop, ok := propertiesMap[t]; ok { - return prop - } - - prop := new(StructProperties) - // in case of recursive protos, fill this in now. - propertiesMap[t] = prop - - // build properties - prop.Prop = make([]*Properties, t.NumField()) - prop.order = make([]int, t.NumField()) - - isOneofMessage := false - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - p := new(Properties) - name := f.Name - p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) - - oneof := f.Tag.Get("protobuf_oneof") // special case - if oneof != "" { - isOneofMessage = true - // Oneof fields don't use the traditional protobuf tag. - p.OrigName = oneof - } - prop.Prop[i] = p - prop.order[i] = i - if debug { - print(i, " ", f.Name, " ", t.String(), " ") - if p.Tag > 0 { - print(p.String()) - } - print("\n") - } - } - - // Re-order prop.order. - sort.Sort(prop) - - if isOneofMessage { - var oots []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oots = m.XXX_OneofFuncs() - case oneofWrappersIface: - oots = m.XXX_OneofWrappers() - } - if len(oots) > 0 { - // Interpret oneof metadata. - prop.OneofTypes = make(map[string]*OneofProperties) - for _, oot := range oots { - oop := &OneofProperties{ - Type: reflect.ValueOf(oot).Type(), // *T - Prop: new(Properties), - } - sft := oop.Type.Elem().Field(0) - oop.Prop.Name = sft.Name - oop.Prop.Parse(sft.Tag.Get("protobuf")) - // There will be exactly one interface field that - // this new value is assignable to. - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Type.Kind() != reflect.Interface { - continue - } - if !oop.Type.AssignableTo(f.Type) { - continue - } - oop.Field = i - break - } - prop.OneofTypes[oop.Prop.OrigName] = oop - } - } - } - - // build required counts - // build tags - reqCount := 0 - prop.decoderOrigNames = make(map[string]int) - for i, p := range prop.Prop { - if strings.HasPrefix(p.Name, "XXX_") { - // Internal fields should not appear in tags/origNames maps. - // They are handled specially when encoding and decoding. - continue - } - if p.Required { - reqCount++ - } - prop.decoderTags.put(p.Tag, i) - prop.decoderOrigNames[p.OrigName] = i - } - prop.reqCount = reqCount - - return prop -} - -// A global registry of enum types. -// The generated code will register the generated maps by calling RegisterEnum. - -var enumValueMaps = make(map[string]map[string]int32) -var enumStringMaps = make(map[string]map[int32]string) - -// RegisterEnum is called from the generated code to install the enum descriptor -// maps into the global table to aid parsing text format protocol buffers. -func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { - if _, ok := enumValueMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumValueMaps[typeName] = valueMap - if _, ok := enumStringMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumStringMaps[typeName] = unusedNameMap -} - -// EnumValueMap returns the mapping from names to integers of the -// enum type enumType, or a nil if not found. -func EnumValueMap(enumType string) map[string]int32 { - return enumValueMaps[enumType] -} - -// A registry of all linked message types. -// The string is a fully-qualified proto name ("pkg.Message"). -var ( - protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers - protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types - revProtoTypes = make(map[reflect.Type]string) -) - -// RegisterType is called from generated code and maps from the fully qualified -// proto name to the type (pointer to struct) of the protocol buffer. -func RegisterType(x Message, name string) { - if _, ok := protoTypedNils[name]; ok { - // TODO: Some day, make this a panic. - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { - // Generated code always calls RegisterType with nil x. - // This check is just for extra safety. - protoTypedNils[name] = x - } else { - protoTypedNils[name] = reflect.Zero(t).Interface().(Message) - } - revProtoTypes[t] = name -} - -// RegisterMapType is called from generated code and maps from the fully qualified -// proto name to the native map type of the proto map definition. -func RegisterMapType(x interface{}, name string) { - if reflect.TypeOf(x).Kind() != reflect.Map { - panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) - } - if _, ok := protoMapTypes[name]; ok { - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - protoMapTypes[name] = t - revProtoTypes[t] = name -} - -// MessageName returns the fully-qualified proto name for the given message type. -func MessageName(x Message) string { - type xname interface { - XXX_MessageName() string - } - if m, ok := x.(xname); ok { - return m.XXX_MessageName() - } - return revProtoTypes[reflect.TypeOf(x)] -} - -// MessageType returns the message type (pointer to struct) for a named message. -// The type is not guaranteed to implement proto.Message if the name refers to a -// map entry. -func MessageType(name string) reflect.Type { - if t, ok := protoTypedNils[name]; ok { - return reflect.TypeOf(t) - } - return protoMapTypes[name] -} - -// A registry of all linked proto files. -var ( - protoFiles = make(map[string][]byte) // file name => fileDescriptor -) - -// RegisterFile is called from generated code and maps from the -// full file name of a .proto file to its compressed FileDescriptorProto. -func RegisterFile(filename string, fileDescriptor []byte) { - protoFiles[filename] = fileDescriptor -} - -// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. -func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/ui/wasm/vendor/github.com/gogo/protobuf/proto/properties_gogo.go b/ui/wasm/vendor/github.com/gogo/protobuf/proto/properties_gogo.go deleted file mode 100644 index 40ea3dd935c..00000000000 --- a/ui/wasm/vendor/github.com/gogo/protobuf/proto/properties_gogo.go +++ /dev/null @@ -1,36 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" -) - -var sizerType = reflect.TypeOf((*Sizer)(nil)).Elem() -var protosizerType = reflect.TypeOf((*ProtoSizer)(nil)).Elem() diff --git a/ui/wasm/vendor/github.com/gogo/protobuf/proto/skip_gogo.go b/ui/wasm/vendor/github.com/gogo/protobuf/proto/skip_gogo.go deleted file mode 100644 index 5a5fd93f7c1..00000000000 --- a/ui/wasm/vendor/github.com/gogo/protobuf/proto/skip_gogo.go +++ /dev/null @@ -1,119 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "io" -) - -func Skip(data []byte) (n int, err error) { - l := len(data) - index := 0 - for index < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[index] - index++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - index++ - if data[index-1] < 0x80 { - break - } - } - return index, nil - case 1: - index += 8 - return index, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[index] - index++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - index += length - return index, nil - case 3: - for { - var innerWire uint64 - var start int = index - for shift := uint(0); ; shift += 7 { - if index >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[index] - index++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := Skip(data[start:]) - if err != nil { - return 0, err - } - index = start + next - } - return index, nil - case 4: - return index, nil - case 5: - index += 4 - return index, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} diff --git a/ui/wasm/vendor/github.com/gogo/protobuf/proto/table_marshal.go b/ui/wasm/vendor/github.com/gogo/protobuf/proto/table_marshal.go deleted file mode 100644 index f8babdefab9..00000000000 --- a/ui/wasm/vendor/github.com/gogo/protobuf/proto/table_marshal.go +++ /dev/null @@ -1,3009 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "errors" - "fmt" - "math" - "reflect" - "sort" - "strconv" - "strings" - "sync" - "sync/atomic" - "unicode/utf8" -) - -// a sizer takes a pointer to a field and the size of its tag, computes the size of -// the encoded data. -type sizer func(pointer, int) int - -// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), -// marshals the field to the end of the slice, returns the slice and error (if any). -type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) - -// marshalInfo is the information used for marshaling a message. -type marshalInfo struct { - typ reflect.Type - fields []*marshalFieldInfo - unrecognized field // offset of XXX_unrecognized - extensions field // offset of XXX_InternalExtensions - v1extensions field // offset of XXX_extensions - sizecache field // offset of XXX_sizecache - initialized int32 // 0 -- only typ is set, 1 -- fully initialized - messageset bool // uses message set wire format - hasmarshaler bool // has custom marshaler - sync.RWMutex // protect extElems map, also for initialization - extElems map[int32]*marshalElemInfo // info of extension elements - - hassizer bool // has custom sizer - hasprotosizer bool // has custom protosizer - - bytesExtensions field // offset of XXX_extensions where the field type is []byte -} - -// marshalFieldInfo is the information used for marshaling a field of a message. -type marshalFieldInfo struct { - field field - wiretag uint64 // tag in wire format - tagsize int // size of tag in wire format - sizer sizer - marshaler marshaler - isPointer bool - required bool // field is required - name string // name of the field, for error reporting - oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements -} - -// marshalElemInfo is the information used for marshaling an extension or oneof element. -type marshalElemInfo struct { - wiretag uint64 // tag in wire format - tagsize int // size of tag in wire format - sizer sizer - marshaler marshaler - isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) -} - -var ( - marshalInfoMap = map[reflect.Type]*marshalInfo{} - marshalInfoLock sync.Mutex - - uint8SliceType = reflect.TypeOf(([]uint8)(nil)).Kind() -) - -// getMarshalInfo returns the information to marshal a given type of message. -// The info it returns may not necessarily initialized. -// t is the type of the message (NOT the pointer to it). -func getMarshalInfo(t reflect.Type) *marshalInfo { - marshalInfoLock.Lock() - u, ok := marshalInfoMap[t] - if !ok { - u = &marshalInfo{typ: t} - marshalInfoMap[t] = u - } - marshalInfoLock.Unlock() - return u -} - -// Size is the entry point from generated code, -// and should be ONLY called by generated code. -// It computes the size of encoded data of msg. -// a is a pointer to a place to store cached marshal info. -func (a *InternalMessageInfo) Size(msg Message) int { - u := getMessageMarshalInfo(msg, a) - ptr := toPointer(&msg) - if ptr.isNil() { - // We get here if msg is a typed nil ((*SomeMessage)(nil)), - // so it satisfies the interface, and msg == nil wouldn't - // catch it. We don't want crash in this case. - return 0 - } - return u.size(ptr) -} - -// Marshal is the entry point from generated code, -// and should be ONLY called by generated code. -// It marshals msg to the end of b. -// a is a pointer to a place to store cached marshal info. -func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { - u := getMessageMarshalInfo(msg, a) - ptr := toPointer(&msg) - if ptr.isNil() { - // We get here if msg is a typed nil ((*SomeMessage)(nil)), - // so it satisfies the interface, and msg == nil wouldn't - // catch it. We don't want crash in this case. - return b, ErrNil - } - return u.marshal(b, ptr, deterministic) -} - -func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { - // u := a.marshal, but atomically. - // We use an atomic here to ensure memory consistency. - u := atomicLoadMarshalInfo(&a.marshal) - if u == nil { - // Get marshal information from type of message. - t := reflect.ValueOf(msg).Type() - if t.Kind() != reflect.Ptr { - panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) - } - u = getMarshalInfo(t.Elem()) - // Store it in the cache for later users. - // a.marshal = u, but atomically. - atomicStoreMarshalInfo(&a.marshal, u) - } - return u -} - -// size is the main function to compute the size of the encoded data of a message. -// ptr is the pointer to the message. -func (u *marshalInfo) size(ptr pointer) int { - if atomic.LoadInt32(&u.initialized) == 0 { - u.computeMarshalInfo() - } - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if u.hasmarshaler { - // Uses the message's Size method if available - if u.hassizer { - s := ptr.asPointerTo(u.typ).Interface().(Sizer) - return s.Size() - } - // Uses the message's ProtoSize method if available - if u.hasprotosizer { - s := ptr.asPointerTo(u.typ).Interface().(ProtoSizer) - return s.ProtoSize() - } - - m := ptr.asPointerTo(u.typ).Interface().(Marshaler) - b, _ := m.Marshal() - return len(b) - } - - n := 0 - for _, f := range u.fields { - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // nil pointer always marshals to nothing - continue - } - n += f.sizer(ptr.offset(f.field), f.tagsize) - } - if u.extensions.IsValid() { - e := ptr.offset(u.extensions).toExtensions() - if u.messageset { - n += u.sizeMessageSet(e) - } else { - n += u.sizeExtensions(e) - } - } - if u.v1extensions.IsValid() { - m := *ptr.offset(u.v1extensions).toOldExtensions() - n += u.sizeV1Extensions(m) - } - if u.bytesExtensions.IsValid() { - s := *ptr.offset(u.bytesExtensions).toBytes() - n += len(s) - } - if u.unrecognized.IsValid() { - s := *ptr.offset(u.unrecognized).toBytes() - n += len(s) - } - - // cache the result for use in marshal - if u.sizecache.IsValid() { - atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) - } - return n -} - -// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), -// fall back to compute the size. -func (u *marshalInfo) cachedsize(ptr pointer) int { - if u.sizecache.IsValid() { - return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) - } - return u.size(ptr) -} - -// marshal is the main function to marshal a message. It takes a byte slice and appends -// the encoded data to the end of the slice, returns the slice and error (if any). -// ptr is the pointer to the message. -// If deterministic is true, map is marshaled in deterministic order. -func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { - if atomic.LoadInt32(&u.initialized) == 0 { - u.computeMarshalInfo() - } - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if u.hasmarshaler { - m := ptr.asPointerTo(u.typ).Interface().(Marshaler) - b1, err := m.Marshal() - b = append(b, b1...) - return b, err - } - - var err, errLater error - // The old marshaler encodes extensions at beginning. - if u.extensions.IsValid() { - e := ptr.offset(u.extensions).toExtensions() - if u.messageset { - b, err = u.appendMessageSet(b, e, deterministic) - } else { - b, err = u.appendExtensions(b, e, deterministic) - } - if err != nil { - return b, err - } - } - if u.v1extensions.IsValid() { - m := *ptr.offset(u.v1extensions).toOldExtensions() - b, err = u.appendV1Extensions(b, m, deterministic) - if err != nil { - return b, err - } - } - if u.bytesExtensions.IsValid() { - s := *ptr.offset(u.bytesExtensions).toBytes() - b = append(b, s...) - } - for _, f := range u.fields { - if f.required { - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // Required field is not set. - // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name} - } - continue - } - } - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // nil pointer always marshals to nothing - continue - } - b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) - if err != nil { - if err1, ok := err.(*RequiredNotSetError); ok { - // Required field in submessage is not set. - // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name + "." + err1.field} - } - continue - } - if err == errRepeatedHasNil { - err = errors.New("proto: repeated field " + f.name + " has nil element") - } - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } - return b, err - } - } - if u.unrecognized.IsValid() { - s := *ptr.offset(u.unrecognized).toBytes() - b = append(b, s...) - } - return b, errLater -} - -// computeMarshalInfo initializes the marshal info. -func (u *marshalInfo) computeMarshalInfo() { - u.Lock() - defer u.Unlock() - if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock - return - } - - t := u.typ - u.unrecognized = invalidField - u.extensions = invalidField - u.v1extensions = invalidField - u.bytesExtensions = invalidField - u.sizecache = invalidField - isOneofMessage := false - - if reflect.PtrTo(t).Implements(sizerType) { - u.hassizer = true - } - if reflect.PtrTo(t).Implements(protosizerType) { - u.hasprotosizer = true - } - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if reflect.PtrTo(t).Implements(marshalerType) { - u.hasmarshaler = true - atomic.StoreInt32(&u.initialized, 1) - return - } - - n := t.NumField() - - // deal with XXX fields first - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Tag.Get("protobuf_oneof") != "" { - isOneofMessage = true - } - if !strings.HasPrefix(f.Name, "XXX_") { - continue - } - switch f.Name { - case "XXX_sizecache": - u.sizecache = toField(&f) - case "XXX_unrecognized": - u.unrecognized = toField(&f) - case "XXX_InternalExtensions": - u.extensions = toField(&f) - u.messageset = f.Tag.Get("protobuf_messageset") == "1" - case "XXX_extensions": - if f.Type.Kind() == reflect.Map { - u.v1extensions = toField(&f) - } else { - u.bytesExtensions = toField(&f) - } - case "XXX_NoUnkeyedLiteral": - // nothing to do - default: - panic("unknown XXX field: " + f.Name) - } - n-- - } - - // get oneof implementers - var oneofImplementers []interface{} - // gogo: isOneofMessage is needed for embedded oneof messages, without a marshaler and unmarshaler - if isOneofMessage { - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oneofImplementers = m.XXX_OneofFuncs() - case oneofWrappersIface: - oneofImplementers = m.XXX_OneofWrappers() - } - } - - // normal fields - fields := make([]marshalFieldInfo, n) // batch allocation - u.fields = make([]*marshalFieldInfo, 0, n) - for i, j := 0, 0; i < t.NumField(); i++ { - f := t.Field(i) - - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - field := &fields[j] - j++ - field.name = f.Name - u.fields = append(u.fields, field) - if f.Tag.Get("protobuf_oneof") != "" { - field.computeOneofFieldInfo(&f, oneofImplementers) - continue - } - if f.Tag.Get("protobuf") == "" { - // field has no tag (not in generated message), ignore it - u.fields = u.fields[:len(u.fields)-1] - j-- - continue - } - field.computeMarshalFieldInfo(&f) - } - - // fields are marshaled in tag order on the wire. - sort.Sort(byTag(u.fields)) - - atomic.StoreInt32(&u.initialized, 1) -} - -// helper for sorting fields by tag -type byTag []*marshalFieldInfo - -func (a byTag) Len() int { return len(a) } -func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } - -// getExtElemInfo returns the information to marshal an extension element. -// The info it returns is initialized. -func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { - // get from cache first - u.RLock() - e, ok := u.extElems[desc.Field] - u.RUnlock() - if ok { - return e - } - - t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct - tags := strings.Split(desc.Tag, ",") - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - sizr, marshalr := typeMarshaler(t, tags, false, false) - e = &marshalElemInfo{ - wiretag: uint64(tag)<<3 | wt, - tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizr, - marshaler: marshalr, - isptr: t.Kind() == reflect.Ptr, - } - - // update cache - u.Lock() - if u.extElems == nil { - u.extElems = make(map[int32]*marshalElemInfo) - } - u.extElems[desc.Field] = e - u.Unlock() - return e -} - -// computeMarshalFieldInfo fills up the information to marshal a field. -func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { - // parse protobuf tag of the field. - // tag has format of "bytes,49,opt,name=foo,def=hello!" - tags := strings.Split(f.Tag.Get("protobuf"), ",") - if tags[0] == "" { - return - } - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - if tags[2] == "req" { - fi.required = true - } - fi.setTag(f, tag, wt) - fi.setMarshaler(f, tags) -} - -func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { - fi.field = toField(f) - fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. - fi.isPointer = true - fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) - fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) - - ityp := f.Type // interface type - for _, o := range oneofImplementers { - t := reflect.TypeOf(o) - if !t.Implements(ityp) { - continue - } - sf := t.Elem().Field(0) // oneof implementer is a struct with a single field - tags := strings.Split(sf.Tag.Get("protobuf"), ",") - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - sizr, marshalr := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value - fi.oneofElems[t.Elem()] = &marshalElemInfo{ - wiretag: uint64(tag)<<3 | wt, - tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizr, - marshaler: marshalr, - } - } -} - -// wiretype returns the wire encoding of the type. -func wiretype(encoding string) uint64 { - switch encoding { - case "fixed32": - return WireFixed32 - case "fixed64": - return WireFixed64 - case "varint", "zigzag32", "zigzag64": - return WireVarint - case "bytes": - return WireBytes - case "group": - return WireStartGroup - } - panic("unknown wire type " + encoding) -} - -// setTag fills up the tag (in wire format) and its size in the info of a field. -func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { - fi.field = toField(f) - fi.wiretag = uint64(tag)<<3 | wt - fi.tagsize = SizeVarint(uint64(tag) << 3) -} - -// setMarshaler fills up the sizer and marshaler in the info of a field. -func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { - switch f.Type.Kind() { - case reflect.Map: - // map field - fi.isPointer = true - fi.sizer, fi.marshaler = makeMapMarshaler(f) - return - case reflect.Ptr, reflect.Slice: - fi.isPointer = true - } - fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) -} - -// typeMarshaler returns the sizer and marshaler of a given field. -// t is the type of the field. -// tags is the generated "protobuf" tag of the field. -// If nozero is true, zero value is not marshaled to the wire. -// If oneof is true, it is a oneof field. -func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { - encoding := tags[0] - - pointer := false - slice := false - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - slice = true - t = t.Elem() - } - if t.Kind() == reflect.Ptr { - pointer = true - t = t.Elem() - } - - packed := false - proto3 := false - ctype := false - isTime := false - isDuration := false - isWktPointer := false - validateUTF8 := true - for i := 2; i < len(tags); i++ { - if tags[i] == "packed" { - packed = true - } - if tags[i] == "proto3" { - proto3 = true - } - if strings.HasPrefix(tags[i], "customtype=") { - ctype = true - } - if tags[i] == "stdtime" { - isTime = true - } - if tags[i] == "stdduration" { - isDuration = true - } - if tags[i] == "wktptr" { - isWktPointer = true - } - } - validateUTF8 = validateUTF8 && proto3 - if !proto3 && !pointer && !slice { - nozero = false - } - - if ctype { - if reflect.PtrTo(t).Implements(customType) { - if slice { - return makeMessageRefSliceMarshaler(getMarshalInfo(t)) - } - if pointer { - return makeCustomPtrMarshaler(getMarshalInfo(t)) - } - return makeCustomMarshaler(getMarshalInfo(t)) - } else { - panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) - } - } - - if isTime { - if pointer { - if slice { - return makeTimePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeTimePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeTimeSliceMarshaler(getMarshalInfo(t)) - } - return makeTimeMarshaler(getMarshalInfo(t)) - } - - if isDuration { - if pointer { - if slice { - return makeDurationPtrSliceMarshaler(getMarshalInfo(t)) - } - return makeDurationPtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeDurationSliceMarshaler(getMarshalInfo(t)) - } - return makeDurationMarshaler(getMarshalInfo(t)) - } - - if isWktPointer { - switch t.Kind() { - case reflect.Float64: - if pointer { - if slice { - return makeStdDoubleValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdDoubleValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdDoubleValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdDoubleValueMarshaler(getMarshalInfo(t)) - case reflect.Float32: - if pointer { - if slice { - return makeStdFloatValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdFloatValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdFloatValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdFloatValueMarshaler(getMarshalInfo(t)) - case reflect.Int64: - if pointer { - if slice { - return makeStdInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdInt64ValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdInt64ValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdInt64ValueMarshaler(getMarshalInfo(t)) - case reflect.Uint64: - if pointer { - if slice { - return makeStdUInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdUInt64ValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdUInt64ValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdUInt64ValueMarshaler(getMarshalInfo(t)) - case reflect.Int32: - if pointer { - if slice { - return makeStdInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdInt32ValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdInt32ValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdInt32ValueMarshaler(getMarshalInfo(t)) - case reflect.Uint32: - if pointer { - if slice { - return makeStdUInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdUInt32ValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdUInt32ValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdUInt32ValueMarshaler(getMarshalInfo(t)) - case reflect.Bool: - if pointer { - if slice { - return makeStdBoolValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdBoolValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdBoolValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdBoolValueMarshaler(getMarshalInfo(t)) - case reflect.String: - if pointer { - if slice { - return makeStdStringValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdStringValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdStringValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdStringValueMarshaler(getMarshalInfo(t)) - case uint8SliceType: - if pointer { - if slice { - return makeStdBytesValuePtrSliceMarshaler(getMarshalInfo(t)) - } - return makeStdBytesValuePtrMarshaler(getMarshalInfo(t)) - } - if slice { - return makeStdBytesValueSliceMarshaler(getMarshalInfo(t)) - } - return makeStdBytesValueMarshaler(getMarshalInfo(t)) - default: - panic(fmt.Sprintf("unknown wktpointer type %#v", t)) - } - } - - switch t.Kind() { - case reflect.Bool: - if pointer { - return sizeBoolPtr, appendBoolPtr - } - if slice { - if packed { - return sizeBoolPackedSlice, appendBoolPackedSlice - } - return sizeBoolSlice, appendBoolSlice - } - if nozero { - return sizeBoolValueNoZero, appendBoolValueNoZero - } - return sizeBoolValue, appendBoolValue - case reflect.Uint32: - switch encoding { - case "fixed32": - if pointer { - return sizeFixed32Ptr, appendFixed32Ptr - } - if slice { - if packed { - return sizeFixed32PackedSlice, appendFixed32PackedSlice - } - return sizeFixed32Slice, appendFixed32Slice - } - if nozero { - return sizeFixed32ValueNoZero, appendFixed32ValueNoZero - } - return sizeFixed32Value, appendFixed32Value - case "varint": - if pointer { - return sizeVarint32Ptr, appendVarint32Ptr - } - if slice { - if packed { - return sizeVarint32PackedSlice, appendVarint32PackedSlice - } - return sizeVarint32Slice, appendVarint32Slice - } - if nozero { - return sizeVarint32ValueNoZero, appendVarint32ValueNoZero - } - return sizeVarint32Value, appendVarint32Value - } - case reflect.Int32: - switch encoding { - case "fixed32": - if pointer { - return sizeFixedS32Ptr, appendFixedS32Ptr - } - if slice { - if packed { - return sizeFixedS32PackedSlice, appendFixedS32PackedSlice - } - return sizeFixedS32Slice, appendFixedS32Slice - } - if nozero { - return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero - } - return sizeFixedS32Value, appendFixedS32Value - case "varint": - if pointer { - return sizeVarintS32Ptr, appendVarintS32Ptr - } - if slice { - if packed { - return sizeVarintS32PackedSlice, appendVarintS32PackedSlice - } - return sizeVarintS32Slice, appendVarintS32Slice - } - if nozero { - return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero - } - return sizeVarintS32Value, appendVarintS32Value - case "zigzag32": - if pointer { - return sizeZigzag32Ptr, appendZigzag32Ptr - } - if slice { - if packed { - return sizeZigzag32PackedSlice, appendZigzag32PackedSlice - } - return sizeZigzag32Slice, appendZigzag32Slice - } - if nozero { - return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero - } - return sizeZigzag32Value, appendZigzag32Value - } - case reflect.Uint64: - switch encoding { - case "fixed64": - if pointer { - return sizeFixed64Ptr, appendFixed64Ptr - } - if slice { - if packed { - return sizeFixed64PackedSlice, appendFixed64PackedSlice - } - return sizeFixed64Slice, appendFixed64Slice - } - if nozero { - return sizeFixed64ValueNoZero, appendFixed64ValueNoZero - } - return sizeFixed64Value, appendFixed64Value - case "varint": - if pointer { - return sizeVarint64Ptr, appendVarint64Ptr - } - if slice { - if packed { - return sizeVarint64PackedSlice, appendVarint64PackedSlice - } - return sizeVarint64Slice, appendVarint64Slice - } - if nozero { - return sizeVarint64ValueNoZero, appendVarint64ValueNoZero - } - return sizeVarint64Value, appendVarint64Value - } - case reflect.Int64: - switch encoding { - case "fixed64": - if pointer { - return sizeFixedS64Ptr, appendFixedS64Ptr - } - if slice { - if packed { - return sizeFixedS64PackedSlice, appendFixedS64PackedSlice - } - return sizeFixedS64Slice, appendFixedS64Slice - } - if nozero { - return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero - } - return sizeFixedS64Value, appendFixedS64Value - case "varint": - if pointer { - return sizeVarintS64Ptr, appendVarintS64Ptr - } - if slice { - if packed { - return sizeVarintS64PackedSlice, appendVarintS64PackedSlice - } - return sizeVarintS64Slice, appendVarintS64Slice - } - if nozero { - return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero - } - return sizeVarintS64Value, appendVarintS64Value - case "zigzag64": - if pointer { - return sizeZigzag64Ptr, appendZigzag64Ptr - } - if slice { - if packed { - return sizeZigzag64PackedSlice, appendZigzag64PackedSlice - } - return sizeZigzag64Slice, appendZigzag64Slice - } - if nozero { - return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero - } - return sizeZigzag64Value, appendZigzag64Value - } - case reflect.Float32: - if pointer { - return sizeFloat32Ptr, appendFloat32Ptr - } - if slice { - if packed { - return sizeFloat32PackedSlice, appendFloat32PackedSlice - } - return sizeFloat32Slice, appendFloat32Slice - } - if nozero { - return sizeFloat32ValueNoZero, appendFloat32ValueNoZero - } - return sizeFloat32Value, appendFloat32Value - case reflect.Float64: - if pointer { - return sizeFloat64Ptr, appendFloat64Ptr - } - if slice { - if packed { - return sizeFloat64PackedSlice, appendFloat64PackedSlice - } - return sizeFloat64Slice, appendFloat64Slice - } - if nozero { - return sizeFloat64ValueNoZero, appendFloat64ValueNoZero - } - return sizeFloat64Value, appendFloat64Value - case reflect.String: - if validateUTF8 { - if pointer { - return sizeStringPtr, appendUTF8StringPtr - } - if slice { - return sizeStringSlice, appendUTF8StringSlice - } - if nozero { - return sizeStringValueNoZero, appendUTF8StringValueNoZero - } - return sizeStringValue, appendUTF8StringValue - } - if pointer { - return sizeStringPtr, appendStringPtr - } - if slice { - return sizeStringSlice, appendStringSlice - } - if nozero { - return sizeStringValueNoZero, appendStringValueNoZero - } - return sizeStringValue, appendStringValue - case reflect.Slice: - if slice { - return sizeBytesSlice, appendBytesSlice - } - if oneof { - // Oneof bytes field may also have "proto3" tag. - // We want to marshal it as a oneof field. Do this - // check before the proto3 check. - return sizeBytesOneof, appendBytesOneof - } - if proto3 { - return sizeBytes3, appendBytes3 - } - return sizeBytes, appendBytes - case reflect.Struct: - switch encoding { - case "group": - if slice { - return makeGroupSliceMarshaler(getMarshalInfo(t)) - } - return makeGroupMarshaler(getMarshalInfo(t)) - case "bytes": - if pointer { - if slice { - return makeMessageSliceMarshaler(getMarshalInfo(t)) - } - return makeMessageMarshaler(getMarshalInfo(t)) - } else { - if slice { - return makeMessageRefSliceMarshaler(getMarshalInfo(t)) - } - return makeMessageRefMarshaler(getMarshalInfo(t)) - } - } - } - panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) -} - -// Below are functions to size/marshal a specific type of a field. -// They are stored in the field's info, and called by function pointers. -// They have type sizer or marshaler. - -func sizeFixed32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFixed32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFixed32Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - return (4 + tagsize) * len(s) -} -func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFixedS32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFixedS32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFixedS32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - return (4 + tagsize) * len(s) -} -func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFloat32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { - v := math.Float32bits(*ptr.toFloat32()) - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFloat32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toFloat32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFloat32Slice(ptr pointer, tagsize int) int { - s := *ptr.toFloat32Slice() - return (4 + tagsize) * len(s) -} -func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toFloat32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFixed64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFixed64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFixed64Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - return (8 + tagsize) * len(s) -} -func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeFixedS64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFixedS64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFixedS64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - return (8 + tagsize) * len(s) -} -func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeFloat64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { - v := math.Float64bits(*ptr.toFloat64()) - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFloat64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toFloat64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFloat64Slice(ptr pointer, tagsize int) int { - s := *ptr.toFloat64Slice() - return (8 + tagsize) * len(s) -} -func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toFloat64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeVarint32Value(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarint32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint32Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarint32Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarintS32Value(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarintS32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarint64Value(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - return SizeVarint(v) + tagsize -} -func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - if v == 0 { - return 0 - } - return SizeVarint(v) + tagsize -} -func sizeVarint64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint64Ptr() - if p == nil { - return 0 - } - return SizeVarint(*p) + tagsize -} -func sizeVarint64Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(v) + tagsize - } - return n -} -func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(v) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarintS64Value(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarintS64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeZigzag32Value(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - v := *p - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize - } - return n -} -func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeZigzag64Value(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - v := *p - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize - } - return n -} -func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeBoolValue(_ pointer, tagsize int) int { - return 1 + tagsize -} -func sizeBoolValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toBool() - if !v { - return 0 - } - return 1 + tagsize -} -func sizeBoolPtr(ptr pointer, tagsize int) int { - p := *ptr.toBoolPtr() - if p == nil { - return 0 - } - return 1 + tagsize -} -func sizeBoolSlice(ptr pointer, tagsize int) int { - s := *ptr.toBoolSlice() - return (1 + tagsize) * len(s) -} -func sizeBoolPackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toBoolSlice() - if len(s) == 0 { - return 0 - } - return len(s) + SizeVarint(uint64(len(s))) + tagsize -} -func sizeStringValue(ptr pointer, tagsize int) int { - v := *ptr.toString() - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toString() - if v == "" { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringPtr(ptr pointer, tagsize int) int { - p := *ptr.toStringPtr() - if p == nil { - return 0 - } - v := *p - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringSlice(ptr pointer, tagsize int) int { - s := *ptr.toStringSlice() - n := 0 - for _, v := range s { - n += len(v) + SizeVarint(uint64(len(v))) + tagsize - } - return n -} -func sizeBytes(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - if v == nil { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytes3(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - if len(v) == 0 { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytesOneof(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytesSlice(ptr pointer, tagsize int) int { - s := *ptr.toBytesSlice() - n := 0 - for _, v := range s { - n += len(v) + SizeVarint(uint64(len(v))) + tagsize - } - return n -} - -// appendFixed32 appends an encoded fixed32 to b. -func appendFixed32(b []byte, v uint32) []byte { - b = append(b, - byte(v), - byte(v>>8), - byte(v>>16), - byte(v>>24)) - return b -} - -// appendFixed64 appends an encoded fixed64 to b. -func appendFixed64(b []byte, v uint64) []byte { - b = append(b, - byte(v), - byte(v>>8), - byte(v>>16), - byte(v>>24), - byte(v>>32), - byte(v>>40), - byte(v>>48), - byte(v>>56)) - return b -} - -// appendVarint appends an encoded varint to b. -func appendVarint(b []byte, v uint64) []byte { - // TODO: make 1-byte (maybe 2-byte) case inline-able, once we - // have non-leaf inliner. - switch { - case v < 1<<7: - b = append(b, byte(v)) - case v < 1<<14: - b = append(b, - byte(v&0x7f|0x80), - byte(v>>7)) - case v < 1<<21: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte(v>>14)) - case v < 1<<28: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte(v>>21)) - case v < 1<<35: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte(v>>28)) - case v < 1<<42: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte(v>>35)) - case v < 1<<49: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte(v>>42)) - case v < 1<<56: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte(v>>49)) - case v < 1<<63: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte(v>>56)) - default: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte((v>>56)&0x7f|0x80), - 1) - } - return b -} - -func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, *p) - return b, nil -} -func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - } - return b, nil -} -func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, v) - } - return b, nil -} -func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - return b, nil -} -func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - return b, nil -} -func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(*p)) - return b, nil -} -func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - } - return b, nil -} -func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, uint32(v)) - } - return b, nil -} -func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float32bits(*ptr.toFloat32()) - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float32bits(*ptr.toFloat32()) - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toFloat32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, math.Float32bits(*p)) - return b, nil -} -func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, math.Float32bits(v)) - } - return b, nil -} -func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, math.Float32bits(v)) - } - return b, nil -} -func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, *p) - return b, nil -} -func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - } - return b, nil -} -func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, v) - } - return b, nil -} -func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - return b, nil -} -func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - return b, nil -} -func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(*p)) - return b, nil -} -func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - } - return b, nil -} -func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, uint64(v)) - } - return b, nil -} -func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float64bits(*ptr.toFloat64()) - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float64bits(*ptr.toFloat64()) - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toFloat64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, math.Float64bits(*p)) - return b, nil -} -func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, math.Float64bits(v)) - } - return b, nil -} -func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, math.Float64bits(v)) - } - return b, nil -} -func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - return b, nil -} -func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - return b, nil -} -func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, *p) - return b, nil -} -func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - } - return b, nil -} -func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(v) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, v) - } - return b, nil -} -func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - v := *p - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - } - return b, nil -} -func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - } - return b, nil -} -func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - v := *p - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - } - return b, nil -} -func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - } - return b, nil -} -func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBool() - b = appendVarint(b, wiretag) - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - return b, nil -} -func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBool() - if !v { - return b, nil - } - b = appendVarint(b, wiretag) - b = append(b, 1) - return b, nil -} - -func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toBoolPtr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - if *p { - b = append(b, 1) - } else { - b = append(b, 0) - } - return b, nil -} -func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBoolSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - } - return b, nil -} -func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBoolSlice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(len(s))) - for _, v := range s { - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - } - return b, nil -} -func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - if v == "" { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toStringPtr() - if p == nil { - return b, nil - } - v := *p - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toStringSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} -func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - v := *ptr.toString() - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - v := *ptr.toString() - if v == "" { - return b, nil - } - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - p := *ptr.toStringPtr() - if p == nil { - return b, nil - } - v := *p - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - s := *ptr.toStringSlice() - for _, v := range s { - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - if v == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - if len(v) == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBytesSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} - -// makeGroupMarshaler returns the sizer and marshaler for a group. -// u is the marshal info of the underlying message. -func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - p := ptr.getPointer() - if p.isNil() { - return 0 - } - return u.size(p) + 2*tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - p := ptr.getPointer() - if p.isNil() { - return b, nil - } - var err error - b = appendVarint(b, wiretag) // start group - b, err = u.marshal(b, p, deterministic) - b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - return b, err - } -} - -// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. -// u is the marshal info of the underlying message. -func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getPointerSlice() - n := 0 - for _, v := range s { - if v.isNil() { - continue - } - n += u.size(v) + 2*tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getPointerSlice() - var err error - var nerr nonFatal - for _, v := range s { - if v.isNil() { - return b, errRepeatedHasNil - } - b = appendVarint(b, wiretag) // start group - b, err = u.marshal(b, v, deterministic) - b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - if !nerr.Merge(err) { - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - return b, nerr.E - } -} - -// makeMessageMarshaler returns the sizer and marshaler for a message field. -// u is the marshal info of the message. -func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - p := ptr.getPointer() - if p.isNil() { - return 0 - } - siz := u.size(p) - return siz + SizeVarint(uint64(siz)) + tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - p := ptr.getPointer() - if p.isNil() { - return b, nil - } - b = appendVarint(b, wiretag) - siz := u.cachedsize(p) - b = appendVarint(b, uint64(siz)) - return u.marshal(b, p, deterministic) - } -} - -// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. -// u is the marshal info of the message. -func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getPointerSlice() - n := 0 - for _, v := range s { - if v.isNil() { - continue - } - siz := u.size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getPointerSlice() - var err error - var nerr nonFatal - for _, v := range s { - if v.isNil() { - return b, errRepeatedHasNil - } - b = appendVarint(b, wiretag) - siz := u.cachedsize(v) - b = appendVarint(b, uint64(siz)) - b, err = u.marshal(b, v, deterministic) - - if !nerr.Merge(err) { - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - return b, nerr.E - } -} - -// makeMapMarshaler returns the sizer and marshaler for a map field. -// f is the pointer to the reflect data structure of the field. -func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { - // figure out key and value type - t := f.Type - keyType := t.Key() - valType := t.Elem() - tags := strings.Split(f.Tag.Get("protobuf"), ",") - keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") - valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") - stdOptions := false - for _, t := range tags { - if strings.HasPrefix(t, "customtype=") { - valTags = append(valTags, t) - } - if t == "stdtime" { - valTags = append(valTags, t) - stdOptions = true - } - if t == "stdduration" { - valTags = append(valTags, t) - stdOptions = true - } - if t == "wktptr" { - valTags = append(valTags, t) - } - } - keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map - valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map - keyWireTag := 1<<3 | wiretype(keyTags[0]) - valWireTag := 2<<3 | wiretype(valTags[0]) - - // We create an interface to get the addresses of the map key and value. - // If value is pointer-typed, the interface is a direct interface, the - // idata itself is the value. Otherwise, the idata is the pointer to the - // value. - // Key cannot be pointer-typed. - valIsPtr := valType.Kind() == reflect.Ptr - - // If value is a message with nested maps, calling - // valSizer in marshal may be quadratic. We should use - // cached version in marshal (but not in size). - // If value is not message type, we don't have size cache, - // but it cannot be nested either. Just use valSizer. - valCachedSizer := valSizer - if valIsPtr && !stdOptions && valType.Elem().Kind() == reflect.Struct { - u := getMarshalInfo(valType.Elem()) - valCachedSizer = func(ptr pointer, tagsize int) int { - // Same as message sizer, but use cache. - p := ptr.getPointer() - if p.isNil() { - return 0 - } - siz := u.cachedsize(p) - return siz + SizeVarint(uint64(siz)) + tagsize - } - } - return func(ptr pointer, tagsize int) int { - m := ptr.asPointerTo(t).Elem() // the map - n := 0 - for _, k := range m.MapKeys() { - ki := k.Interface() - vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value - siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { - m := ptr.asPointerTo(t).Elem() // the map - var err error - keys := m.MapKeys() - if len(keys) > 1 && deterministic { - sort.Sort(mapKeys(keys)) - } - - var nerr nonFatal - for _, k := range keys { - ki := k.Interface() - vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value - b = appendVarint(b, tag) - siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) - b = appendVarint(b, uint64(siz)) - b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) - if !nerr.Merge(err) { - return b, err - } - b, err = valMarshaler(b, vaddr, valWireTag, deterministic) - if err != ErrNil && !nerr.Merge(err) { // allow nil value in map - return b, err - } - } - return b, nerr.E - } -} - -// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. -// fi is the marshal info of the field. -// f is the pointer to the reflect data structure of the field. -func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { - // Oneof field is an interface. We need to get the actual data type on the fly. - t := f.Type - return func(ptr pointer, _ int) int { - p := ptr.getInterfacePointer() - if p.isNil() { - return 0 - } - v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct - telem := v.Type() - e := fi.oneofElems[telem] - return e.sizer(p, e.tagsize) - }, - func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { - p := ptr.getInterfacePointer() - if p.isNil() { - return b, nil - } - v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct - telem := v.Type() - if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { - return b, errOneofHasNil - } - e := fi.oneofElems[telem] - return e.marshaler(b, p, e.wiretag, deterministic) - } -} - -// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. -func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { - m, mu := ext.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - - n := 0 - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - n += ei.sizer(p, ei.tagsize) - } - mu.Unlock() - return n -} - -// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. -func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { - m, mu := ext.extensionsRead() - if m == nil { - return b, nil - } - mu.Lock() - defer mu.Unlock() - - var err error - var nerr nonFatal - - // Fast-path for common cases: zero or one extensions. - // Don't bother sorting the keys. - if len(m) <= 1 { - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E - } - - // Sort the keys to provide a deterministic encoding. - // Not sure this is required, but the old code does it. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - e := m[int32(k)] - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// message set format is: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } - -// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field -// in message set format (above). -func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { - m, mu := ext.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - - n := 0 - for id, e := range m { - n += 2 // start group, end group. tag = 1 (size=1) - n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - siz := len(msgWithLen) - n += siz + 1 // message, tag = 3 (size=1) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - n += ei.sizer(p, 1) // message, tag = 3 (size=1) - } - mu.Unlock() - return n -} - -// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) -// to the end of byte slice b. -func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { - m, mu := ext.extensionsRead() - if m == nil { - return b, nil - } - mu.Lock() - defer mu.Unlock() - - var err error - var nerr nonFatal - - // Fast-path for common cases: zero or one extensions. - // Don't bother sorting the keys. - if len(m) <= 1 { - for id, e := range m { - b = append(b, 1<<3|WireStartGroup) - b = append(b, 2<<3|WireVarint) - b = appendVarint(b, uint64(id)) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - b = append(b, 3<<3|WireBytes) - b = append(b, msgWithLen...) - b = append(b, 1<<3|WireEndGroup) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - if !nerr.Merge(err) { - return b, err - } - b = append(b, 1<<3|WireEndGroup) - } - return b, nerr.E - } - - // Sort the keys to provide a deterministic encoding. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, id := range keys { - e := m[int32(id)] - b = append(b, 1<<3|WireStartGroup) - b = append(b, 2<<3|WireVarint) - b = appendVarint(b, uint64(id)) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - b = append(b, 3<<3|WireBytes) - b = append(b, msgWithLen...) - b = append(b, 1<<3|WireEndGroup) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - b = append(b, 1<<3|WireEndGroup) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// sizeV1Extensions computes the size of encoded data for a V1-API extension field. -func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { - if m == nil { - return 0 - } - - n := 0 - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - n += ei.sizer(p, ei.tagsize) - } - return n -} - -// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. -func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { - if m == nil { - return b, nil - } - - // Sort the keys to provide a deterministic encoding. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - var err error - var nerr nonFatal - for _, k := range keys { - e := m[int32(k)] - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// newMarshaler is the interface representing objects that can marshal themselves. -// -// This exists to support protoc-gen-go generated messages. -// The proto package will stop type-asserting to this interface in the future. -// -// DO NOT DEPEND ON THIS. -type newMarshaler interface { - XXX_Size() int - XXX_Marshal(b []byte, deterministic bool) ([]byte, error) -} - -// Size returns the encoded size of a protocol buffer message. -// This is the main entry point. -func Size(pb Message) int { - if m, ok := pb.(newMarshaler); ok { - return m.XXX_Size() - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - b, _ := m.Marshal() - return len(b) - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return 0 - } - var info InternalMessageInfo - return info.Size(pb) -} - -// Marshal takes a protocol buffer message -// and encodes it into the wire format, returning the data. -// This is the main entry point. -func Marshal(pb Message) ([]byte, error) { - if m, ok := pb.(newMarshaler); ok { - siz := m.XXX_Size() - b := make([]byte, 0, siz) - return m.XXX_Marshal(b, false) - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - return m.Marshal() - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return nil, ErrNil - } - var info InternalMessageInfo - siz := info.Size(pb) - b := make([]byte, 0, siz) - return info.Marshal(b, pb, false) -} - -// Marshal takes a protocol buffer message -// and encodes it into the wire format, writing the result to the -// Buffer. -// This is an alternative entry point. It is not necessary to use -// a Buffer for most applications. -func (p *Buffer) Marshal(pb Message) error { - var err error - if p.deterministic { - if _, ok := pb.(Marshaler); ok { - return fmt.Errorf("proto: deterministic not supported by the Marshal method of %T", pb) - } - } - if m, ok := pb.(newMarshaler); ok { - siz := m.XXX_Size() - p.grow(siz) // make sure buf has enough capacity - pp := p.buf[len(p.buf) : len(p.buf) : len(p.buf)+siz] - pp, err = m.XXX_Marshal(pp, p.deterministic) - p.buf = append(p.buf, pp...) - return err - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - var b []byte - b, err = m.Marshal() - p.buf = append(p.buf, b...) - return err - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return ErrNil - } - var info InternalMessageInfo - siz := info.Size(pb) - p.grow(siz) // make sure buf has enough capacity - p.buf, err = info.Marshal(p.buf, pb, p.deterministic) - return err -} - -// grow grows the buffer's capacity, if necessary, to guarantee space for -// another n bytes. After grow(n), at least n bytes can be written to the -// buffer without another allocation. -func (p *Buffer) grow(n int) { - need := len(p.buf) + n - if need <= cap(p.buf) { - return - } - newCap := len(p.buf) * 2 - if newCap < need { - newCap = need - } - p.buf = append(make([]byte, 0, newCap), p.buf...) -} diff --git a/ui/wasm/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go b/ui/wasm/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go deleted file mode 100644 index 997f57c1e10..00000000000 --- a/ui/wasm/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go +++ /dev/null @@ -1,388 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" - "time" -) - -// makeMessageRefMarshaler differs a bit from makeMessageMarshaler -// It marshal a message T instead of a *T -func makeMessageRefMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - siz := u.size(ptr) - return siz + SizeVarint(uint64(siz)) + tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - b = appendVarint(b, wiretag) - siz := u.cachedsize(ptr) - b = appendVarint(b, uint64(siz)) - return u.marshal(b, ptr, deterministic) - } -} - -// makeMessageRefSliceMarshaler differs quite a lot from makeMessageSliceMarshaler -// It marshals a slice of messages []T instead of []*T -func makeMessageRefSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - e := elem.Interface() - v := toAddrPointer(&e, false) - siz := u.size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - var err, errreq error - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - e := elem.Interface() - v := toAddrPointer(&e, false) - b = appendVarint(b, wiretag) - siz := u.size(v) - b = appendVarint(b, uint64(siz)) - b, err = u.marshal(b, v, deterministic) - - if err != nil { - if _, ok := err.(*RequiredNotSetError); ok { - // Required field in submessage is not set. - // We record the error but keep going, to give a complete marshaling. - if errreq == nil { - errreq = err - } - continue - } - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - - return b, errreq - } -} - -func makeCustomPtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) - siz := m.Size() - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) - siz := m.Size() - buf, err := m.Marshal() - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - return b, nil - } -} - -func makeCustomMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - m := ptr.asPointerTo(u.typ).Interface().(custom) - siz := m.Size() - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - m := ptr.asPointerTo(u.typ).Interface().(custom) - siz := m.Size() - buf, err := m.Marshal() - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - return b, nil - } -} - -func makeTimeMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return 0 - } - siz := Size(ts) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return nil, err - } - buf, err := Marshal(ts) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeTimePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return 0 - } - siz := Size(ts) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return nil, err - } - buf, err := Marshal(ts) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeTimeSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(time.Time) - ts, err := timestampProto(t) - if err != nil { - return 0 - } - siz := Size(ts) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(time.Time) - ts, err := timestampProto(t) - if err != nil { - return nil, err - } - siz := Size(ts) - buf, err := Marshal(ts) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeTimePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return 0 - } - siz := Size(ts) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*time.Time) - ts, err := timestampProto(*t) - if err != nil { - return nil, err - } - siz := Size(ts) - buf, err := Marshal(ts) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeDurationMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) - dur := durationProto(*d) - siz := Size(dur) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) - dur := durationProto(*d) - buf, err := Marshal(dur) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeDurationPtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) - dur := durationProto(*d) - siz := Size(dur) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) - dur := durationProto(*d) - buf, err := Marshal(dur) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeDurationSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - d := elem.Interface().(time.Duration) - dur := durationProto(d) - siz := Size(dur) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - d := elem.Interface().(time.Duration) - dur := durationProto(d) - siz := Size(dur) - buf, err := Marshal(dur) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeDurationPtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - d := elem.Interface().(*time.Duration) - dur := durationProto(*d) - siz := Size(dur) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - d := elem.Interface().(*time.Duration) - dur := durationProto(*d) - siz := Size(dur) - buf, err := Marshal(dur) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} diff --git a/ui/wasm/vendor/github.com/gogo/protobuf/proto/table_merge.go b/ui/wasm/vendor/github.com/gogo/protobuf/proto/table_merge.go deleted file mode 100644 index 60dcf70d1e6..00000000000 --- a/ui/wasm/vendor/github.com/gogo/protobuf/proto/table_merge.go +++ /dev/null @@ -1,676 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" - "strings" - "sync" - "sync/atomic" -) - -// Merge merges the src message into dst. -// This assumes that dst and src of the same type and are non-nil. -func (a *InternalMessageInfo) Merge(dst, src Message) { - mi := atomicLoadMergeInfo(&a.merge) - if mi == nil { - mi = getMergeInfo(reflect.TypeOf(dst).Elem()) - atomicStoreMergeInfo(&a.merge, mi) - } - mi.merge(toPointer(&dst), toPointer(&src)) -} - -type mergeInfo struct { - typ reflect.Type - - initialized int32 // 0: only typ is valid, 1: everything is valid - lock sync.Mutex - - fields []mergeFieldInfo - unrecognized field // Offset of XXX_unrecognized -} - -type mergeFieldInfo struct { - field field // Offset of field, guaranteed to be valid - - // isPointer reports whether the value in the field is a pointer. - // This is true for the following situations: - // * Pointer to struct - // * Pointer to basic type (proto2 only) - // * Slice (first value in slice header is a pointer) - // * String (first value in string header is a pointer) - isPointer bool - - // basicWidth reports the width of the field assuming that it is directly - // embedded in the struct (as is the case for basic types in proto3). - // The possible values are: - // 0: invalid - // 1: bool - // 4: int32, uint32, float32 - // 8: int64, uint64, float64 - basicWidth int - - // Where dst and src are pointers to the types being merged. - merge func(dst, src pointer) -} - -var ( - mergeInfoMap = map[reflect.Type]*mergeInfo{} - mergeInfoLock sync.Mutex -) - -func getMergeInfo(t reflect.Type) *mergeInfo { - mergeInfoLock.Lock() - defer mergeInfoLock.Unlock() - mi := mergeInfoMap[t] - if mi == nil { - mi = &mergeInfo{typ: t} - mergeInfoMap[t] = mi - } - return mi -} - -// merge merges src into dst assuming they are both of type *mi.typ. -func (mi *mergeInfo) merge(dst, src pointer) { - if dst.isNil() { - panic("proto: nil destination") - } - if src.isNil() { - return // Nothing to do. - } - - if atomic.LoadInt32(&mi.initialized) == 0 { - mi.computeMergeInfo() - } - - for _, fi := range mi.fields { - sfp := src.offset(fi.field) - - // As an optimization, we can avoid the merge function call cost - // if we know for sure that the source will have no effect - // by checking if it is the zero value. - if unsafeAllowed { - if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string - continue - } - if fi.basicWidth > 0 { - switch { - case fi.basicWidth == 1 && !*sfp.toBool(): - continue - case fi.basicWidth == 4 && *sfp.toUint32() == 0: - continue - case fi.basicWidth == 8 && *sfp.toUint64() == 0: - continue - } - } - } - - dfp := dst.offset(fi.field) - fi.merge(dfp, sfp) - } - - // TODO: Make this faster? - out := dst.asPointerTo(mi.typ).Elem() - in := src.asPointerTo(mi.typ).Elem() - if emIn, err := extendable(in.Addr().Interface()); err == nil { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - if mi.unrecognized.IsValid() { - if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { - *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) - } - } -} - -func (mi *mergeInfo) computeMergeInfo() { - mi.lock.Lock() - defer mi.lock.Unlock() - if mi.initialized != 0 { - return - } - t := mi.typ - n := t.NumField() - - props := GetProperties(t) - for i := 0; i < n; i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - - mfi := mergeFieldInfo{field: toField(&f)} - tf := f.Type - - // As an optimization, we can avoid the merge function call cost - // if we know for sure that the source will have no effect - // by checking if it is the zero value. - if unsafeAllowed { - switch tf.Kind() { - case reflect.Ptr, reflect.Slice, reflect.String: - // As a special case, we assume slices and strings are pointers - // since we know that the first field in the SliceSlice or - // StringHeader is a data pointer. - mfi.isPointer = true - case reflect.Bool: - mfi.basicWidth = 1 - case reflect.Int32, reflect.Uint32, reflect.Float32: - mfi.basicWidth = 4 - case reflect.Int64, reflect.Uint64, reflect.Float64: - mfi.basicWidth = 8 - } - } - - // Unwrap tf to get at its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic("both pointer and slice for basic type in " + tf.Name()) - } - - switch tf.Kind() { - case reflect.Int32: - switch { - case isSlice: // E.g., []int32 - mfi.merge = func(dst, src pointer) { - // NOTE: toInt32Slice is not defined (see pointer_reflect.go). - /* - sfsp := src.toInt32Slice() - if *sfsp != nil { - dfsp := dst.toInt32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []int64{} - } - } - */ - sfs := src.getInt32Slice() - if sfs != nil { - dfs := dst.getInt32Slice() - dfs = append(dfs, sfs...) - if dfs == nil { - dfs = []int32{} - } - dst.setInt32Slice(dfs) - } - } - case isPointer: // E.g., *int32 - mfi.merge = func(dst, src pointer) { - // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). - /* - sfpp := src.toInt32Ptr() - if *sfpp != nil { - dfpp := dst.toInt32Ptr() - if *dfpp == nil { - *dfpp = Int32(**sfpp) - } else { - **dfpp = **sfpp - } - } - */ - sfp := src.getInt32Ptr() - if sfp != nil { - dfp := dst.getInt32Ptr() - if dfp == nil { - dst.setInt32Ptr(*sfp) - } else { - *dfp = *sfp - } - } - } - default: // E.g., int32 - mfi.merge = func(dst, src pointer) { - if v := *src.toInt32(); v != 0 { - *dst.toInt32() = v - } - } - } - case reflect.Int64: - switch { - case isSlice: // E.g., []int64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toInt64Slice() - if *sfsp != nil { - dfsp := dst.toInt64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []int64{} - } - } - } - case isPointer: // E.g., *int64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toInt64Ptr() - if *sfpp != nil { - dfpp := dst.toInt64Ptr() - if *dfpp == nil { - *dfpp = Int64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., int64 - mfi.merge = func(dst, src pointer) { - if v := *src.toInt64(); v != 0 { - *dst.toInt64() = v - } - } - } - case reflect.Uint32: - switch { - case isSlice: // E.g., []uint32 - mfi.merge = func(dst, src pointer) { - sfsp := src.toUint32Slice() - if *sfsp != nil { - dfsp := dst.toUint32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []uint32{} - } - } - } - case isPointer: // E.g., *uint32 - mfi.merge = func(dst, src pointer) { - sfpp := src.toUint32Ptr() - if *sfpp != nil { - dfpp := dst.toUint32Ptr() - if *dfpp == nil { - *dfpp = Uint32(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., uint32 - mfi.merge = func(dst, src pointer) { - if v := *src.toUint32(); v != 0 { - *dst.toUint32() = v - } - } - } - case reflect.Uint64: - switch { - case isSlice: // E.g., []uint64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toUint64Slice() - if *sfsp != nil { - dfsp := dst.toUint64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []uint64{} - } - } - } - case isPointer: // E.g., *uint64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toUint64Ptr() - if *sfpp != nil { - dfpp := dst.toUint64Ptr() - if *dfpp == nil { - *dfpp = Uint64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., uint64 - mfi.merge = func(dst, src pointer) { - if v := *src.toUint64(); v != 0 { - *dst.toUint64() = v - } - } - } - case reflect.Float32: - switch { - case isSlice: // E.g., []float32 - mfi.merge = func(dst, src pointer) { - sfsp := src.toFloat32Slice() - if *sfsp != nil { - dfsp := dst.toFloat32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []float32{} - } - } - } - case isPointer: // E.g., *float32 - mfi.merge = func(dst, src pointer) { - sfpp := src.toFloat32Ptr() - if *sfpp != nil { - dfpp := dst.toFloat32Ptr() - if *dfpp == nil { - *dfpp = Float32(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., float32 - mfi.merge = func(dst, src pointer) { - if v := *src.toFloat32(); v != 0 { - *dst.toFloat32() = v - } - } - } - case reflect.Float64: - switch { - case isSlice: // E.g., []float64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toFloat64Slice() - if *sfsp != nil { - dfsp := dst.toFloat64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []float64{} - } - } - } - case isPointer: // E.g., *float64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toFloat64Ptr() - if *sfpp != nil { - dfpp := dst.toFloat64Ptr() - if *dfpp == nil { - *dfpp = Float64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., float64 - mfi.merge = func(dst, src pointer) { - if v := *src.toFloat64(); v != 0 { - *dst.toFloat64() = v - } - } - } - case reflect.Bool: - switch { - case isSlice: // E.g., []bool - mfi.merge = func(dst, src pointer) { - sfsp := src.toBoolSlice() - if *sfsp != nil { - dfsp := dst.toBoolSlice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []bool{} - } - } - } - case isPointer: // E.g., *bool - mfi.merge = func(dst, src pointer) { - sfpp := src.toBoolPtr() - if *sfpp != nil { - dfpp := dst.toBoolPtr() - if *dfpp == nil { - *dfpp = Bool(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., bool - mfi.merge = func(dst, src pointer) { - if v := *src.toBool(); v { - *dst.toBool() = v - } - } - } - case reflect.String: - switch { - case isSlice: // E.g., []string - mfi.merge = func(dst, src pointer) { - sfsp := src.toStringSlice() - if *sfsp != nil { - dfsp := dst.toStringSlice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []string{} - } - } - } - case isPointer: // E.g., *string - mfi.merge = func(dst, src pointer) { - sfpp := src.toStringPtr() - if *sfpp != nil { - dfpp := dst.toStringPtr() - if *dfpp == nil { - *dfpp = String(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., string - mfi.merge = func(dst, src pointer) { - if v := *src.toString(); v != "" { - *dst.toString() = v - } - } - } - case reflect.Slice: - isProto3 := props.Prop[i].proto3 - switch { - case isPointer: - panic("bad pointer in byte slice case in " + tf.Name()) - case tf.Elem().Kind() != reflect.Uint8: - panic("bad element kind in byte slice case in " + tf.Name()) - case isSlice: // E.g., [][]byte - mfi.merge = func(dst, src pointer) { - sbsp := src.toBytesSlice() - if *sbsp != nil { - dbsp := dst.toBytesSlice() - for _, sb := range *sbsp { - if sb == nil { - *dbsp = append(*dbsp, nil) - } else { - *dbsp = append(*dbsp, append([]byte{}, sb...)) - } - } - if *dbsp == nil { - *dbsp = [][]byte{} - } - } - } - default: // E.g., []byte - mfi.merge = func(dst, src pointer) { - sbp := src.toBytes() - if *sbp != nil { - dbp := dst.toBytes() - if !isProto3 || len(*sbp) > 0 { - *dbp = append([]byte{}, *sbp...) - } - } - } - } - case reflect.Struct: - switch { - case isSlice && !isPointer: // E.g. []pb.T - mergeInfo := getMergeInfo(tf) - zero := reflect.Zero(tf) - mfi.merge = func(dst, src pointer) { - // TODO: Make this faster? - dstsp := dst.asPointerTo(f.Type) - dsts := dstsp.Elem() - srcs := src.asPointerTo(f.Type).Elem() - for i := 0; i < srcs.Len(); i++ { - dsts = reflect.Append(dsts, zero) - srcElement := srcs.Index(i).Addr() - dstElement := dsts.Index(dsts.Len() - 1).Addr() - mergeInfo.merge(valToPointer(dstElement), valToPointer(srcElement)) - } - if dsts.IsNil() { - dsts = reflect.MakeSlice(f.Type, 0, 0) - } - dstsp.Elem().Set(dsts) - } - case !isPointer: - mergeInfo := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - mergeInfo.merge(dst, src) - } - case isSlice: // E.g., []*pb.T - mergeInfo := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - sps := src.getPointerSlice() - if sps != nil { - dps := dst.getPointerSlice() - for _, sp := range sps { - var dp pointer - if !sp.isNil() { - dp = valToPointer(reflect.New(tf)) - mergeInfo.merge(dp, sp) - } - dps = append(dps, dp) - } - if dps == nil { - dps = []pointer{} - } - dst.setPointerSlice(dps) - } - } - default: // E.g., *pb.T - mergeInfo := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - sp := src.getPointer() - if !sp.isNil() { - dp := dst.getPointer() - if dp.isNil() { - dp = valToPointer(reflect.New(tf)) - dst.setPointer(dp) - } - mergeInfo.merge(dp, sp) - } - } - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic("bad pointer or slice in map case in " + tf.Name()) - default: // E.g., map[K]V - mfi.merge = func(dst, src pointer) { - sm := src.asPointerTo(tf).Elem() - if sm.Len() == 0 { - return - } - dm := dst.asPointerTo(tf).Elem() - if dm.IsNil() { - dm.Set(reflect.MakeMap(tf)) - } - - switch tf.Elem().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - val = reflect.ValueOf(Clone(val.Interface().(Message))) - dm.SetMapIndex(key, val) - } - case reflect.Slice: // E.g. Bytes type (e.g., []byte) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - dm.SetMapIndex(key, val) - } - default: // Basic type (e.g., string) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - dm.SetMapIndex(key, val) - } - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic("bad pointer or slice in interface case in " + tf.Name()) - default: // E.g., interface{} - // TODO: Make this faster? - mfi.merge = func(dst, src pointer) { - su := src.asPointerTo(tf).Elem() - if !su.IsNil() { - du := dst.asPointerTo(tf).Elem() - typ := su.Elem().Type() - if du.IsNil() || du.Elem().Type() != typ { - du.Set(reflect.New(typ.Elem())) // Initialize interface if empty - } - sv := su.Elem().Elem().Field(0) - if sv.Kind() == reflect.Ptr && sv.IsNil() { - return - } - dv := du.Elem().Elem().Field(0) - if dv.Kind() == reflect.Ptr && dv.IsNil() { - dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty - } - switch sv.Type().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - Merge(dv.Interface().(Message), sv.Interface().(Message)) - case reflect.Slice: // E.g. Bytes type (e.g., []byte) - dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) - default: // Basic type (e.g., string) - dv.Set(sv) - } - } - } - } - default: - panic(fmt.Sprintf("merger not found for type:%s", tf)) - } - mi.fields = append(mi.fields, mfi) - } - - mi.unrecognized = invalidField - if f, ok := t.FieldByName("XXX_unrecognized"); ok { - if f.Type != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - mi.unrecognized = toField(&f) - } - - atomic.StoreInt32(&mi.initialized, 1) -} diff --git a/ui/wasm/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go b/ui/wasm/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go deleted file mode 100644 index 937229386a2..00000000000 --- a/ui/wasm/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go +++ /dev/null @@ -1,2249 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "errors" - "fmt" - "io" - "math" - "reflect" - "strconv" - "strings" - "sync" - "sync/atomic" - "unicode/utf8" -) - -// Unmarshal is the entry point from the generated .pb.go files. -// This function is not intended to be used by non-generated code. -// This function is not subject to any compatibility guarantee. -// msg contains a pointer to a protocol buffer struct. -// b is the data to be unmarshaled into the protocol buffer. -// a is a pointer to a place to store cached unmarshal information. -func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { - // Load the unmarshal information for this message type. - // The atomic load ensures memory consistency. - u := atomicLoadUnmarshalInfo(&a.unmarshal) - if u == nil { - // Slow path: find unmarshal info for msg, update a with it. - u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) - atomicStoreUnmarshalInfo(&a.unmarshal, u) - } - // Then do the unmarshaling. - err := u.unmarshal(toPointer(&msg), b) - return err -} - -type unmarshalInfo struct { - typ reflect.Type // type of the protobuf struct - - // 0 = only typ field is initialized - // 1 = completely initialized - initialized int32 - lock sync.Mutex // prevents double initialization - dense []unmarshalFieldInfo // fields indexed by tag # - sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # - reqFields []string // names of required fields - reqMask uint64 // 1< 0 { - // Read tag and wire type. - // Special case 1 and 2 byte varints. - var x uint64 - if b[0] < 128 { - x = uint64(b[0]) - b = b[1:] - } else if len(b) >= 2 && b[1] < 128 { - x = uint64(b[0]&0x7f) + uint64(b[1])<<7 - b = b[2:] - } else { - var n int - x, n = decodeVarint(b) - if n == 0 { - return io.ErrUnexpectedEOF - } - b = b[n:] - } - tag := x >> 3 - wire := int(x) & 7 - - // Dispatch on the tag to one of the unmarshal* functions below. - var f unmarshalFieldInfo - if tag < uint64(len(u.dense)) { - f = u.dense[tag] - } else { - f = u.sparse[tag] - } - if fn := f.unmarshal; fn != nil { - var err error - b, err = fn(b, m.offset(f.field), wire) - if err == nil { - reqMask |= f.reqMask - continue - } - if r, ok := err.(*RequiredNotSetError); ok { - // Remember this error, but keep parsing. We need to produce - // a full parse even if a required field is missing. - if errLater == nil { - errLater = r - } - reqMask |= f.reqMask - continue - } - if err != errInternalBadWireType { - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } - return err - } - // Fragments with bad wire type are treated as unknown fields. - } - - // Unknown tag. - if !u.unrecognized.IsValid() { - // Don't keep unrecognized data; just skip it. - var err error - b, err = skipField(b, wire) - if err != nil { - return err - } - continue - } - // Keep unrecognized data around. - // maybe in extensions, maybe in the unrecognized field. - z := m.offset(u.unrecognized).toBytes() - var emap map[int32]Extension - var e Extension - for _, r := range u.extensionRanges { - if uint64(r.Start) <= tag && tag <= uint64(r.End) { - if u.extensions.IsValid() { - mp := m.offset(u.extensions).toExtensions() - emap = mp.extensionsWrite() - e = emap[int32(tag)] - z = &e.enc - break - } - if u.oldExtensions.IsValid() { - p := m.offset(u.oldExtensions).toOldExtensions() - emap = *p - if emap == nil { - emap = map[int32]Extension{} - *p = emap - } - e = emap[int32(tag)] - z = &e.enc - break - } - if u.bytesExtensions.IsValid() { - z = m.offset(u.bytesExtensions).toBytes() - break - } - panic("no extensions field available") - } - } - // Use wire type to skip data. - var err error - b0 := b - b, err = skipField(b, wire) - if err != nil { - return err - } - *z = encodeVarint(*z, tag<<3|uint64(wire)) - *z = append(*z, b0[:len(b0)-len(b)]...) - - if emap != nil { - emap[int32(tag)] = e - } - } - if reqMask != u.reqMask && errLater == nil { - // A required field of this message is missing. - for _, n := range u.reqFields { - if reqMask&1 == 0 { - errLater = &RequiredNotSetError{n} - } - reqMask >>= 1 - } - } - return errLater -} - -// computeUnmarshalInfo fills in u with information for use -// in unmarshaling protocol buffers of type u.typ. -func (u *unmarshalInfo) computeUnmarshalInfo() { - u.lock.Lock() - defer u.lock.Unlock() - if u.initialized != 0 { - return - } - t := u.typ - n := t.NumField() - - // Set up the "not found" value for the unrecognized byte buffer. - // This is the default for proto3. - u.unrecognized = invalidField - u.extensions = invalidField - u.oldExtensions = invalidField - u.bytesExtensions = invalidField - - // List of the generated type and offset for each oneof field. - type oneofField struct { - ityp reflect.Type // interface type of oneof field - field field // offset in containing message - } - var oneofFields []oneofField - - for i := 0; i < n; i++ { - f := t.Field(i) - if f.Name == "XXX_unrecognized" { - // The byte slice used to hold unrecognized input is special. - if f.Type != reflect.TypeOf(([]byte)(nil)) { - panic("bad type for XXX_unrecognized field: " + f.Type.Name()) - } - u.unrecognized = toField(&f) - continue - } - if f.Name == "XXX_InternalExtensions" { - // Ditto here. - if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { - panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) - } - u.extensions = toField(&f) - if f.Tag.Get("protobuf_messageset") == "1" { - u.isMessageSet = true - } - continue - } - if f.Name == "XXX_extensions" { - // An older form of the extensions field. - if f.Type == reflect.TypeOf((map[int32]Extension)(nil)) { - u.oldExtensions = toField(&f) - continue - } else if f.Type == reflect.TypeOf(([]byte)(nil)) { - u.bytesExtensions = toField(&f) - continue - } - panic("bad type for XXX_extensions field: " + f.Type.Name()) - } - if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { - continue - } - - oneof := f.Tag.Get("protobuf_oneof") - if oneof != "" { - oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) - // The rest of oneof processing happens below. - continue - } - - tags := f.Tag.Get("protobuf") - tagArray := strings.Split(tags, ",") - if len(tagArray) < 2 { - panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) - } - tag, err := strconv.Atoi(tagArray[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tagArray[1]) - } - - name := "" - for _, tag := range tagArray[3:] { - if strings.HasPrefix(tag, "name=") { - name = tag[5:] - } - } - - // Extract unmarshaling function from the field (its type and tags). - unmarshal := fieldUnmarshaler(&f) - - // Required field? - var reqMask uint64 - if tagArray[2] == "req" { - bit := len(u.reqFields) - u.reqFields = append(u.reqFields, name) - reqMask = uint64(1) << uint(bit) - // TODO: if we have more than 64 required fields, we end up - // not verifying that all required fields are present. - // Fix this, perhaps using a count of required fields? - } - - // Store the info in the correct slot in the message. - u.setTag(tag, toField(&f), unmarshal, reqMask, name) - } - - // Find any types associated with oneof fields. - // gogo: len(oneofFields) > 0 is needed for embedded oneof messages, without a marshaler and unmarshaler - if len(oneofFields) > 0 { - var oneofImplementers []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oneofImplementers = m.XXX_OneofFuncs() - case oneofWrappersIface: - oneofImplementers = m.XXX_OneofWrappers() - } - for _, v := range oneofImplementers { - tptr := reflect.TypeOf(v) // *Msg_X - typ := tptr.Elem() // Msg_X - - f := typ.Field(0) // oneof implementers have one field - baseUnmarshal := fieldUnmarshaler(&f) - tags := strings.Split(f.Tag.Get("protobuf"), ",") - fieldNum, err := strconv.Atoi(tags[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tags[1]) - } - var name string - for _, tag := range tags { - if strings.HasPrefix(tag, "name=") { - name = strings.TrimPrefix(tag, "name=") - break - } - } - - // Find the oneof field that this struct implements. - // Might take O(n^2) to process all of the oneofs, but who cares. - for _, of := range oneofFields { - if tptr.Implements(of.ityp) { - // We have found the corresponding interface for this struct. - // That lets us know where this struct should be stored - // when we encounter it during unmarshaling. - unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) - u.setTag(fieldNum, of.field, unmarshal, 0, name) - } - } - - } - } - - // Get extension ranges, if any. - fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") - if fn.IsValid() { - if !u.extensions.IsValid() && !u.oldExtensions.IsValid() && !u.bytesExtensions.IsValid() { - panic("a message with extensions, but no extensions field in " + t.Name()) - } - u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) - } - - // Explicitly disallow tag 0. This will ensure we flag an error - // when decoding a buffer of all zeros. Without this code, we - // would decode and skip an all-zero buffer of even length. - // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. - u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { - return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) - }, 0, "") - - // Set mask for required field check. - u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? - for len(u.dense) <= tag { - u.dense = append(u.dense, unmarshalFieldInfo{}) - } - u.dense[tag] = i - return - } - if u.sparse == nil { - u.sparse = map[uint64]unmarshalFieldInfo{} - } - u.sparse[uint64(tag)] = i -} - -// fieldUnmarshaler returns an unmarshaler for the given field. -func fieldUnmarshaler(f *reflect.StructField) unmarshaler { - if f.Type.Kind() == reflect.Map { - return makeUnmarshalMap(f) - } - return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) -} - -// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. -func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { - tagArray := strings.Split(tags, ",") - encoding := tagArray[0] - name := "unknown" - ctype := false - isTime := false - isDuration := false - isWktPointer := false - proto3 := false - validateUTF8 := true - for _, tag := range tagArray[3:] { - if strings.HasPrefix(tag, "name=") { - name = tag[5:] - } - if tag == "proto3" { - proto3 = true - } - if strings.HasPrefix(tag, "customtype=") { - ctype = true - } - if tag == "stdtime" { - isTime = true - } - if tag == "stdduration" { - isDuration = true - } - if tag == "wktptr" { - isWktPointer = true - } - } - validateUTF8 = validateUTF8 && proto3 - - // Figure out packaging (pointer, slice, or both) - slice := false - pointer := false - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - slice = true - t = t.Elem() - } - if t.Kind() == reflect.Ptr { - pointer = true - t = t.Elem() - } - - if ctype { - if reflect.PtrTo(t).Implements(customType) { - if slice { - return makeUnmarshalCustomSlice(getUnmarshalInfo(t), name) - } - if pointer { - return makeUnmarshalCustomPtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalCustom(getUnmarshalInfo(t), name) - } else { - panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) - } - } - - if isTime { - if pointer { - if slice { - return makeUnmarshalTimePtrSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalTimePtr(getUnmarshalInfo(t), name) - } - if slice { - return makeUnmarshalTimeSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalTime(getUnmarshalInfo(t), name) - } - - if isDuration { - if pointer { - if slice { - return makeUnmarshalDurationPtrSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalDurationPtr(getUnmarshalInfo(t), name) - } - if slice { - return makeUnmarshalDurationSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalDuration(getUnmarshalInfo(t), name) - } - - if isWktPointer { - switch t.Kind() { - case reflect.Float64: - if pointer { - if slice { - return makeStdDoubleValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdDoubleValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdDoubleValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdDoubleValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Float32: - if pointer { - if slice { - return makeStdFloatValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdFloatValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdFloatValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdFloatValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Int64: - if pointer { - if slice { - return makeStdInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdInt64ValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Uint64: - if pointer { - if slice { - return makeStdUInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdUInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdUInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdUInt64ValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Int32: - if pointer { - if slice { - return makeStdInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdInt32ValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Uint32: - if pointer { - if slice { - return makeStdUInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdUInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdUInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdUInt32ValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.Bool: - if pointer { - if slice { - return makeStdBoolValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdBoolValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdBoolValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdBoolValueUnmarshaler(getUnmarshalInfo(t), name) - case reflect.String: - if pointer { - if slice { - return makeStdStringValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdStringValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdStringValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdStringValueUnmarshaler(getUnmarshalInfo(t), name) - case uint8SliceType: - if pointer { - if slice { - return makeStdBytesValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdBytesValuePtrUnmarshaler(getUnmarshalInfo(t), name) - } - if slice { - return makeStdBytesValueSliceUnmarshaler(getUnmarshalInfo(t), name) - } - return makeStdBytesValueUnmarshaler(getUnmarshalInfo(t), name) - default: - panic(fmt.Sprintf("unknown wktpointer type %#v", t)) - } - } - - // We'll never have both pointer and slice for basic types. - if pointer && slice && t.Kind() != reflect.Struct { - panic("both pointer and slice for basic type in " + t.Name()) - } - - switch t.Kind() { - case reflect.Bool: - if pointer { - return unmarshalBoolPtr - } - if slice { - return unmarshalBoolSlice - } - return unmarshalBoolValue - case reflect.Int32: - switch encoding { - case "fixed32": - if pointer { - return unmarshalFixedS32Ptr - } - if slice { - return unmarshalFixedS32Slice - } - return unmarshalFixedS32Value - case "varint": - // this could be int32 or enum - if pointer { - return unmarshalInt32Ptr - } - if slice { - return unmarshalInt32Slice - } - return unmarshalInt32Value - case "zigzag32": - if pointer { - return unmarshalSint32Ptr - } - if slice { - return unmarshalSint32Slice - } - return unmarshalSint32Value - } - case reflect.Int64: - switch encoding { - case "fixed64": - if pointer { - return unmarshalFixedS64Ptr - } - if slice { - return unmarshalFixedS64Slice - } - return unmarshalFixedS64Value - case "varint": - if pointer { - return unmarshalInt64Ptr - } - if slice { - return unmarshalInt64Slice - } - return unmarshalInt64Value - case "zigzag64": - if pointer { - return unmarshalSint64Ptr - } - if slice { - return unmarshalSint64Slice - } - return unmarshalSint64Value - } - case reflect.Uint32: - switch encoding { - case "fixed32": - if pointer { - return unmarshalFixed32Ptr - } - if slice { - return unmarshalFixed32Slice - } - return unmarshalFixed32Value - case "varint": - if pointer { - return unmarshalUint32Ptr - } - if slice { - return unmarshalUint32Slice - } - return unmarshalUint32Value - } - case reflect.Uint64: - switch encoding { - case "fixed64": - if pointer { - return unmarshalFixed64Ptr - } - if slice { - return unmarshalFixed64Slice - } - return unmarshalFixed64Value - case "varint": - if pointer { - return unmarshalUint64Ptr - } - if slice { - return unmarshalUint64Slice - } - return unmarshalUint64Value - } - case reflect.Float32: - if pointer { - return unmarshalFloat32Ptr - } - if slice { - return unmarshalFloat32Slice - } - return unmarshalFloat32Value - case reflect.Float64: - if pointer { - return unmarshalFloat64Ptr - } - if slice { - return unmarshalFloat64Slice - } - return unmarshalFloat64Value - case reflect.Map: - panic("map type in typeUnmarshaler in " + t.Name()) - case reflect.Slice: - if pointer { - panic("bad pointer in slice case in " + t.Name()) - } - if slice { - return unmarshalBytesSlice - } - return unmarshalBytesValue - case reflect.String: - if validateUTF8 { - if pointer { - return unmarshalUTF8StringPtr - } - if slice { - return unmarshalUTF8StringSlice - } - return unmarshalUTF8StringValue - } - if pointer { - return unmarshalStringPtr - } - if slice { - return unmarshalStringSlice - } - return unmarshalStringValue - case reflect.Struct: - // message or group field - if !pointer { - switch encoding { - case "bytes": - if slice { - return makeUnmarshalMessageSlice(getUnmarshalInfo(t), name) - } - return makeUnmarshalMessage(getUnmarshalInfo(t), name) - } - } - switch encoding { - case "bytes": - if slice { - return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) - case "group": - if slice { - return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) - } - } - panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) -} - -// Below are all the unmarshalers for individual fields of various types. - -func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - *f.toInt64() = v - return b, nil -} - -func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - *f.toInt64Ptr() = &v - return b, nil -} - -func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - s := f.toInt64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - s := f.toInt64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - *f.toInt64() = v - return b, nil -} - -func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - *f.toInt64Ptr() = &v - return b, nil -} - -func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - s := f.toInt64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - s := f.toInt64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - *f.toUint64() = v - return b, nil -} - -func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - *f.toUint64Ptr() = &v - return b, nil -} - -func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - s := f.toUint64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - s := f.toUint64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - *f.toInt32() = v - return b, nil -} - -func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.setInt32Ptr(v) - return b, nil -} - -func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.appendInt32Slice(v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.appendInt32Slice(v) - return b, nil -} - -func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - *f.toInt32() = v - return b, nil -} - -func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.setInt32Ptr(v) - return b, nil -} - -func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.appendInt32Slice(v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.appendInt32Slice(v) - return b, nil -} - -func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - *f.toUint32() = v - return b, nil -} - -func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - *f.toUint32Ptr() = &v - return b, nil -} - -func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - s := f.toUint32Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - s := f.toUint32Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - *f.toUint64() = v - return b[8:], nil -} - -func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - *f.toUint64Ptr() = &v - return b[8:], nil -} - -func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - s := f.toUint64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - s := f.toUint64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - *f.toInt64() = v - return b[8:], nil -} - -func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - *f.toInt64Ptr() = &v - return b[8:], nil -} - -func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - s := f.toInt64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - s := f.toInt64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - *f.toUint32() = v - return b[4:], nil -} - -func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - *f.toUint32Ptr() = &v - return b[4:], nil -} - -func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - s := f.toUint32Slice() - *s = append(*s, v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - s := f.toUint32Slice() - *s = append(*s, v) - return b[4:], nil -} - -func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - *f.toInt32() = v - return b[4:], nil -} - -func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.setInt32Ptr(v) - return b[4:], nil -} - -func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.appendInt32Slice(v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.appendInt32Slice(v) - return b[4:], nil -} - -func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - // Note: any length varint is allowed, even though any sane - // encoder will use one byte. - // See https://github.com/golang/protobuf/issues/76 - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - // TODO: check if x>1? Tests seem to indicate no. - v := x != 0 - *f.toBool() = v - return b[n:], nil -} - -func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - *f.toBoolPtr() = &v - return b[n:], nil -} - -func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - s := f.toBoolSlice() - *s = append(*s, v) - b = b[n:] - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - s := f.toBoolSlice() - *s = append(*s, v) - return b[n:], nil -} - -func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - *f.toFloat64() = v - return b[8:], nil -} - -func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - *f.toFloat64Ptr() = &v - return b[8:], nil -} - -func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - s := f.toFloat64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - s := f.toFloat64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - *f.toFloat32() = v - return b[4:], nil -} - -func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - *f.toFloat32Ptr() = &v - return b[4:], nil -} - -func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - s := f.toFloat32Slice() - *s = append(*s, v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - s := f.toFloat32Slice() - *s = append(*s, v) - return b[4:], nil -} - -func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toString() = v - return b[x:], nil -} - -func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toStringPtr() = &v - return b[x:], nil -} - -func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - s := f.toStringSlice() - *s = append(*s, v) - return b[x:], nil -} - -func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toString() = v - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toStringPtr() = &v - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - s := f.toStringSlice() - *s = append(*s, v) - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -var emptyBuf [0]byte - -func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // The use of append here is a trick which avoids the zeroing - // that would be required if we used a make/copy pair. - // We append to emptyBuf instead of nil because we want - // a non-nil result even when the length is 0. - v := append(emptyBuf[:], b[:x]...) - *f.toBytes() = v - return b[x:], nil -} - -func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := append(emptyBuf[:], b[:x]...) - s := f.toBytesSlice() - *s = append(*s, v) - return b[x:], nil -} - -func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // First read the message field to see if something is there. - // The semantics of multiple submessages are weird. Instead of - // the last one winning (as it is for all other fields), multiple - // submessages are merged. - v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[x:], err - } -} - -func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendPointer(v) - return b[x:], err - } -} - -func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireStartGroup { - return b, errInternalBadWireType - } - x, y := findEndGroup(b) - if x < 0 { - return nil, io.ErrUnexpectedEOF - } - v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[y:], err - } -} - -func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireStartGroup { - return b, errInternalBadWireType - } - x, y := findEndGroup(b) - if x < 0 { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendPointer(v) - return b[y:], err - } -} - -func makeUnmarshalMap(f *reflect.StructField) unmarshaler { - t := f.Type - kt := t.Key() - vt := t.Elem() - tagArray := strings.Split(f.Tag.Get("protobuf"), ",") - valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") - for _, t := range tagArray { - if strings.HasPrefix(t, "customtype=") { - valTags = append(valTags, t) - } - if t == "stdtime" { - valTags = append(valTags, t) - } - if t == "stdduration" { - valTags = append(valTags, t) - } - if t == "wktptr" { - valTags = append(valTags, t) - } - } - unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) - unmarshalVal := typeUnmarshaler(vt, strings.Join(valTags, ",")) - return func(b []byte, f pointer, w int) ([]byte, error) { - // The map entry is a submessage. Figure out how big it is. - if w != WireBytes { - return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - r := b[x:] // unused data to return - b = b[:x] // data for map entry - - // Note: we could use #keys * #values ~= 200 functions - // to do map decoding without reflection. Probably not worth it. - // Maps will be somewhat slow. Oh well. - - // Read key and value from data. - var nerr nonFatal - k := reflect.New(kt) - v := reflect.New(vt) - for len(b) > 0 { - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - wire := int(x) & 7 - b = b[n:] - - var err error - switch x >> 3 { - case 1: - b, err = unmarshalKey(b, valToPointer(k), wire) - case 2: - b, err = unmarshalVal(b, valToPointer(v), wire) - default: - err = errInternalBadWireType // skip unknown tag - } - - if nerr.Merge(err) { - continue - } - if err != errInternalBadWireType { - return nil, err - } - - // Skip past unknown fields. - b, err = skipField(b, wire) - if err != nil { - return nil, err - } - } - - // Get map, allocate if needed. - m := f.asPointerTo(t).Elem() // an addressable map[K]T - if m.IsNil() { - m.Set(reflect.MakeMap(t)) - } - - // Insert into map. - m.SetMapIndex(k.Elem(), v.Elem()) - - return r, nerr.E - } -} - -// makeUnmarshalOneof makes an unmarshaler for oneof fields. -// for: -// message Msg { -// oneof F { -// int64 X = 1; -// float64 Y = 2; -// } -// } -// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). -// ityp is the interface type of the oneof field (e.g. isMsg_F). -// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). -// Note that this function will be called once for each case in the oneof. -func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { - sf := typ.Field(0) - field0 := toField(&sf) - return func(b []byte, f pointer, w int) ([]byte, error) { - // Allocate holder for value. - v := reflect.New(typ) - - // Unmarshal data into holder. - // We unmarshal into the first field of the holder object. - var err error - var nerr nonFatal - b, err = unmarshal(b, valToPointer(v).offset(field0), w) - if !nerr.Merge(err) { - return nil, err - } - - // Write pointer to holder into target field. - f.asPointerTo(ityp).Elem().Set(v) - - return b, nerr.E - } -} - -// Error used by decode internally. -var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") - -// skipField skips past a field of type wire and returns the remaining bytes. -func skipField(b []byte, wire int) ([]byte, error) { - switch wire { - case WireVarint: - _, k := decodeVarint(b) - if k == 0 { - return b, io.ErrUnexpectedEOF - } - b = b[k:] - case WireFixed32: - if len(b) < 4 { - return b, io.ErrUnexpectedEOF - } - b = b[4:] - case WireFixed64: - if len(b) < 8 { - return b, io.ErrUnexpectedEOF - } - b = b[8:] - case WireBytes: - m, k := decodeVarint(b) - if k == 0 || uint64(len(b)-k) < m { - return b, io.ErrUnexpectedEOF - } - b = b[uint64(k)+m:] - case WireStartGroup: - _, i := findEndGroup(b) - if i == -1 { - return b, io.ErrUnexpectedEOF - } - b = b[i:] - default: - return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) - } - return b, nil -} - -// findEndGroup finds the index of the next EndGroup tag. -// Groups may be nested, so the "next" EndGroup tag is the first -// unpaired EndGroup. -// findEndGroup returns the indexes of the start and end of the EndGroup tag. -// Returns (-1,-1) if it can't find one. -func findEndGroup(b []byte) (int, int) { - depth := 1 - i := 0 - for { - x, n := decodeVarint(b[i:]) - if n == 0 { - return -1, -1 - } - j := i - i += n - switch x & 7 { - case WireVarint: - _, k := decodeVarint(b[i:]) - if k == 0 { - return -1, -1 - } - i += k - case WireFixed32: - if len(b)-4 < i { - return -1, -1 - } - i += 4 - case WireFixed64: - if len(b)-8 < i { - return -1, -1 - } - i += 8 - case WireBytes: - m, k := decodeVarint(b[i:]) - if k == 0 { - return -1, -1 - } - i += k - if uint64(len(b)-i) < m { - return -1, -1 - } - i += int(m) - case WireStartGroup: - depth++ - case WireEndGroup: - depth-- - if depth == 0 { - return j, i - } - default: - return -1, -1 - } - } -} - -// encodeVarint appends a varint-encoded integer to b and returns the result. -func encodeVarint(b []byte, x uint64) []byte { - for x >= 1<<7 { - b = append(b, byte(x&0x7f|0x80)) - x >>= 7 - } - return append(b, byte(x)) -} - -// decodeVarint reads a varint-encoded integer from b. -// Returns the decoded integer and the number of bytes read. -// If there is an error, it returns 0,0. -func decodeVarint(b []byte) (uint64, int) { - var x, y uint64 - if len(b) == 0 { - goto bad - } - x = uint64(b[0]) - if x < 0x80 { - return x, 1 - } - x -= 0x80 - - if len(b) <= 1 { - goto bad - } - y = uint64(b[1]) - x += y << 7 - if y < 0x80 { - return x, 2 - } - x -= 0x80 << 7 - - if len(b) <= 2 { - goto bad - } - y = uint64(b[2]) - x += y << 14 - if y < 0x80 { - return x, 3 - } - x -= 0x80 << 14 - - if len(b) <= 3 { - goto bad - } - y = uint64(b[3]) - x += y << 21 - if y < 0x80 { - return x, 4 - } - x -= 0x80 << 21 - - if len(b) <= 4 { - goto bad - } - y = uint64(b[4]) - x += y << 28 - if y < 0x80 { - return x, 5 - } - x -= 0x80 << 28 - - if len(b) <= 5 { - goto bad - } - y = uint64(b[5]) - x += y << 35 - if y < 0x80 { - return x, 6 - } - x -= 0x80 << 35 - - if len(b) <= 6 { - goto bad - } - y = uint64(b[6]) - x += y << 42 - if y < 0x80 { - return x, 7 - } - x -= 0x80 << 42 - - if len(b) <= 7 { - goto bad - } - y = uint64(b[7]) - x += y << 49 - if y < 0x80 { - return x, 8 - } - x -= 0x80 << 49 - - if len(b) <= 8 { - goto bad - } - y = uint64(b[8]) - x += y << 56 - if y < 0x80 { - return x, 9 - } - x -= 0x80 << 56 - - if len(b) <= 9 { - goto bad - } - y = uint64(b[9]) - x += y << 63 - if y < 2 { - return x, 10 - } - -bad: - return 0, 0 -} diff --git a/ui/wasm/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go b/ui/wasm/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go deleted file mode 100644 index 00d6c7ad937..00000000000 --- a/ui/wasm/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go +++ /dev/null @@ -1,385 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "io" - "reflect" -) - -func makeUnmarshalMessage(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // First read the message field to see if something is there. - // The semantics of multiple submessages are weird. Instead of - // the last one winning (as it is for all other fields), multiple - // submessages are merged. - v := f // gogo: changed from v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[x:], err - } -} - -func makeUnmarshalMessageSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendRef(v, sub.typ) // gogo: changed from f.appendPointer(v) - return b[x:], err - } -} - -func makeUnmarshalCustomPtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.New(sub.typ)) - m := s.Interface().(custom) - if err := m.Unmarshal(b[:x]); err != nil { - return nil, err - } - return b[x:], nil - } -} - -func makeUnmarshalCustomSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := reflect.New(sub.typ) - c := m.Interface().(custom) - if err := c.Unmarshal(b[:x]); err != nil { - return nil, err - } - v := valToPointer(m) - f.appendRef(v, sub.typ) - return b[x:], nil - } -} - -func makeUnmarshalCustom(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - - m := f.asPointerTo(sub.typ).Interface().(custom) - if err := m.Unmarshal(b[:x]); err != nil { - return nil, err - } - return b[x:], nil - } -} - -func makeUnmarshalTime(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := ×tamp{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - t, err := timestampFromProto(m) - if err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(t)) - return b[x:], nil - } -} - -func makeUnmarshalTimePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := ×tamp{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - t, err := timestampFromProto(m) - if err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&t)) - return b[x:], nil - } -} - -func makeUnmarshalTimePtrSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := ×tamp{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - t, err := timestampFromProto(m) - if err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&t)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeUnmarshalTimeSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := ×tamp{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - t, err := timestampFromProto(m) - if err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(t)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeUnmarshalDurationPtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &duration{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - d, err := durationFromProto(m) - if err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&d)) - return b[x:], nil - } -} - -func makeUnmarshalDuration(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &duration{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - d, err := durationFromProto(m) - if err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(d)) - return b[x:], nil - } -} - -func makeUnmarshalDurationPtrSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &duration{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - d, err := durationFromProto(m) - if err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&d)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeUnmarshalDurationSlice(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &duration{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - d, err := durationFromProto(m) - if err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(d)) - slice.Set(newSlice) - return b[x:], nil - } -} diff --git a/ui/wasm/vendor/github.com/gogo/protobuf/proto/text.go b/ui/wasm/vendor/github.com/gogo/protobuf/proto/text.go deleted file mode 100644 index 87416afe955..00000000000 --- a/ui/wasm/vendor/github.com/gogo/protobuf/proto/text.go +++ /dev/null @@ -1,930 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for writing the text protocol buffer format. - -import ( - "bufio" - "bytes" - "encoding" - "errors" - "fmt" - "io" - "log" - "math" - "reflect" - "sort" - "strings" - "sync" - "time" -) - -var ( - newline = []byte("\n") - spaces = []byte(" ") - endBraceNewline = []byte("}\n") - backslashN = []byte{'\\', 'n'} - backslashR = []byte{'\\', 'r'} - backslashT = []byte{'\\', 't'} - backslashDQ = []byte{'\\', '"'} - backslashBS = []byte{'\\', '\\'} - posInf = []byte("inf") - negInf = []byte("-inf") - nan = []byte("nan") -) - -type writer interface { - io.Writer - WriteByte(byte) error -} - -// textWriter is an io.Writer that tracks its indentation level. -type textWriter struct { - ind int - complete bool // if the current position is a complete line - compact bool // whether to write out as a one-liner - w writer -} - -func (w *textWriter) WriteString(s string) (n int, err error) { - if !strings.Contains(s, "\n") { - if !w.compact && w.complete { - w.writeIndent() - } - w.complete = false - return io.WriteString(w.w, s) - } - // WriteString is typically called without newlines, so this - // codepath and its copy are rare. We copy to avoid - // duplicating all of Write's logic here. - return w.Write([]byte(s)) -} - -func (w *textWriter) Write(p []byte) (n int, err error) { - newlines := bytes.Count(p, newline) - if newlines == 0 { - if !w.compact && w.complete { - w.writeIndent() - } - n, err = w.w.Write(p) - w.complete = false - return n, err - } - - frags := bytes.SplitN(p, newline, newlines+1) - if w.compact { - for i, frag := range frags { - if i > 0 { - if err := w.w.WriteByte(' '); err != nil { - return n, err - } - n++ - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - } - return n, nil - } - - for i, frag := range frags { - if w.complete { - w.writeIndent() - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - if i+1 < len(frags) { - if err := w.w.WriteByte('\n'); err != nil { - return n, err - } - n++ - } - } - w.complete = len(frags[len(frags)-1]) == 0 - return n, nil -} - -func (w *textWriter) WriteByte(c byte) error { - if w.compact && c == '\n' { - c = ' ' - } - if !w.compact && w.complete { - w.writeIndent() - } - err := w.w.WriteByte(c) - w.complete = c == '\n' - return err -} - -func (w *textWriter) indent() { w.ind++ } - -func (w *textWriter) unindent() { - if w.ind == 0 { - log.Print("proto: textWriter unindented too far") - return - } - w.ind-- -} - -func writeName(w *textWriter, props *Properties) error { - if _, err := w.WriteString(props.OrigName); err != nil { - return err - } - if props.Wire != "group" { - return w.WriteByte(':') - } - return nil -} - -func requiresQuotes(u string) bool { - // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. - for _, ch := range u { - switch { - case ch == '.' || ch == '/' || ch == '_': - continue - case '0' <= ch && ch <= '9': - continue - case 'A' <= ch && ch <= 'Z': - continue - case 'a' <= ch && ch <= 'z': - continue - default: - return true - } - } - return false -} - -// isAny reports whether sv is a google.protobuf.Any message -func isAny(sv reflect.Value) bool { - type wkt interface { - XXX_WellKnownType() string - } - t, ok := sv.Addr().Interface().(wkt) - return ok && t.XXX_WellKnownType() == "Any" -} - -// writeProto3Any writes an expanded google.protobuf.Any message. -// -// It returns (false, nil) if sv value can't be unmarshaled (e.g. because -// required messages are not linked in). -// -// It returns (true, error) when sv was written in expanded format or an error -// was encountered. -func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { - turl := sv.FieldByName("TypeUrl") - val := sv.FieldByName("Value") - if !turl.IsValid() || !val.IsValid() { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - b, ok := val.Interface().([]byte) - if !ok { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - parts := strings.Split(turl.String(), "/") - mt := MessageType(parts[len(parts)-1]) - if mt == nil { - return false, nil - } - m := reflect.New(mt.Elem()) - if err := Unmarshal(b, m.Interface().(Message)); err != nil { - return false, nil - } - w.Write([]byte("[")) - u := turl.String() - if requiresQuotes(u) { - writeString(w, u) - } else { - w.Write([]byte(u)) - } - if w.compact { - w.Write([]byte("]:<")) - } else { - w.Write([]byte("]: <\n")) - w.ind++ - } - if err := tm.writeStruct(w, m.Elem()); err != nil { - return true, err - } - if w.compact { - w.Write([]byte("> ")) - } else { - w.ind-- - w.Write([]byte(">\n")) - } - return true, nil -} - -func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { - if tm.ExpandAny && isAny(sv) { - if canExpand, err := tm.writeProto3Any(w, sv); canExpand { - return err - } - } - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < sv.NumField(); i++ { - fv := sv.Field(i) - props := sprops.Prop[i] - name := st.Field(i).Name - - if name == "XXX_NoUnkeyedLiteral" { - continue - } - - if strings.HasPrefix(name, "XXX_") { - // There are two XXX_ fields: - // XXX_unrecognized []byte - // XXX_extensions map[int32]proto.Extension - // The first is handled here; - // the second is handled at the bottom of this function. - if name == "XXX_unrecognized" && !fv.IsNil() { - if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Field not filled in. This could be an optional field or - // a required field that wasn't filled in. Either way, there - // isn't anything we can show for it. - continue - } - if fv.Kind() == reflect.Slice && fv.IsNil() { - // Repeated field that is empty, or a bytes field that is unused. - continue - } - - if props.Repeated && fv.Kind() == reflect.Slice { - // Repeated field. - for j := 0; j < fv.Len(); j++ { - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - v := fv.Index(j) - if v.Kind() == reflect.Ptr && v.IsNil() { - // A nil message in a repeated field is not valid, - // but we can handle that more gracefully than panicking. - if _, err := w.Write([]byte("\n")); err != nil { - return err - } - continue - } - if len(props.Enum) > 0 { - if err := tm.writeEnum(w, v, props); err != nil { - return err - } - } else if err := tm.writeAny(w, v, props); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Map { - // Map fields are rendered as a repeated struct with key/value fields. - keys := fv.MapKeys() - sort.Sort(mapKeys(keys)) - for _, key := range keys { - val := fv.MapIndex(key) - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - // open struct - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - // key - if _, err := w.WriteString("key:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, key, props.MapKeyProp); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - // nil values aren't legal, but we can avoid panicking because of them. - if val.Kind() != reflect.Ptr || !val.IsNil() { - // value - if _, err := w.WriteString("value:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, val, props.MapValProp); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - // close struct - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { - // empty bytes field - continue - } - if props.proto3 && fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { - // proto3 non-repeated scalar field; skip if zero value - if isProto3Zero(fv) { - continue - } - } - - if fv.Kind() == reflect.Interface { - // Check if it is a oneof. - if st.Field(i).Tag.Get("protobuf_oneof") != "" { - // fv is nil, or holds a pointer to generated struct. - // That generated struct has exactly one field, - // which has a protobuf struct tag. - if fv.IsNil() { - continue - } - inner := fv.Elem().Elem() // interface -> *T -> T - tag := inner.Type().Field(0).Tag.Get("protobuf") - props = new(Properties) // Overwrite the outer props var, but not its pointee. - props.Parse(tag) - // Write the value in the oneof, not the oneof itself. - fv = inner.Field(0) - - // Special case to cope with malformed messages gracefully: - // If the value in the oneof is a nil pointer, don't panic - // in writeAny. - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Use errors.New so writeAny won't render quotes. - msg := errors.New("/* nil */") - fv = reflect.ValueOf(&msg).Elem() - } - } - } - - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - - if len(props.Enum) > 0 { - if err := tm.writeEnum(w, fv, props); err != nil { - return err - } - } else if err := tm.writeAny(w, fv, props); err != nil { - return err - } - - if err := w.WriteByte('\n'); err != nil { - return err - } - } - - // Extensions (the XXX_extensions field). - pv := sv - if pv.CanAddr() { - pv = sv.Addr() - } else { - pv = reflect.New(sv.Type()) - pv.Elem().Set(sv) - } - if _, err := extendable(pv.Interface()); err == nil { - if err := tm.writeExtensions(w, pv); err != nil { - return err - } - } - - return nil -} - -var textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() - -// writeAny writes an arbitrary field. -func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { - v = reflect.Indirect(v) - - if props != nil { - if len(props.CustomType) > 0 { - custom, ok := v.Interface().(Marshaler) - if ok { - data, err := custom.Marshal() - if err != nil { - return err - } - if err := writeString(w, string(data)); err != nil { - return err - } - return nil - } - } else if len(props.CastType) > 0 { - if _, ok := v.Interface().(interface { - String() string - }); ok { - switch v.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - _, err := fmt.Fprintf(w, "%d", v.Interface()) - return err - } - } - } else if props.StdTime { - t, ok := v.Interface().(time.Time) - if !ok { - return fmt.Errorf("stdtime is not time.Time, but %T", v.Interface()) - } - tproto, err := timestampProto(t) - if err != nil { - return err - } - propsCopy := *props // Make a copy so that this is goroutine-safe - propsCopy.StdTime = false - err = tm.writeAny(w, reflect.ValueOf(tproto), &propsCopy) - return err - } else if props.StdDuration { - d, ok := v.Interface().(time.Duration) - if !ok { - return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface()) - } - dproto := durationProto(d) - propsCopy := *props // Make a copy so that this is goroutine-safe - propsCopy.StdDuration = false - err := tm.writeAny(w, reflect.ValueOf(dproto), &propsCopy) - return err - } - } - - // Floats have special cases. - if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { - x := v.Float() - var b []byte - switch { - case math.IsInf(x, 1): - b = posInf - case math.IsInf(x, -1): - b = negInf - case math.IsNaN(x): - b = nan - } - if b != nil { - _, err := w.Write(b) - return err - } - // Other values are handled below. - } - - // We don't attempt to serialise every possible value type; only those - // that can occur in protocol buffers. - switch v.Kind() { - case reflect.Slice: - // Should only be a []byte; repeated fields are handled in writeStruct. - if err := writeString(w, string(v.Bytes())); err != nil { - return err - } - case reflect.String: - if err := writeString(w, v.String()); err != nil { - return err - } - case reflect.Struct: - // Required/optional group/message. - var bra, ket byte = '<', '>' - if props != nil && props.Wire == "group" { - bra, ket = '{', '}' - } - if err := w.WriteByte(bra); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if v.CanAddr() { - // Calling v.Interface on a struct causes the reflect package to - // copy the entire struct. This is racy with the new Marshaler - // since we atomically update the XXX_sizecache. - // - // Thus, we retrieve a pointer to the struct if possible to avoid - // a race since v.Interface on the pointer doesn't copy the struct. - // - // If v is not addressable, then we are not worried about a race - // since it implies that the binary Marshaler cannot possibly be - // mutating this value. - v = v.Addr() - } - if v.Type().Implements(textMarshalerType) { - text, err := v.Interface().(encoding.TextMarshaler).MarshalText() - if err != nil { - return err - } - if _, err = w.Write(text); err != nil { - return err - } - } else { - if v.Kind() == reflect.Ptr { - v = v.Elem() - } - if err := tm.writeStruct(w, v); err != nil { - return err - } - } - w.unindent() - if err := w.WriteByte(ket); err != nil { - return err - } - default: - _, err := fmt.Fprint(w, v.Interface()) - return err - } - return nil -} - -// equivalent to C's isprint. -func isprint(c byte) bool { - return c >= 0x20 && c < 0x7f -} - -// writeString writes a string in the protocol buffer text format. -// It is similar to strconv.Quote except we don't use Go escape sequences, -// we treat the string as a byte sequence, and we use octal escapes. -// These differences are to maintain interoperability with the other -// languages' implementations of the text format. -func writeString(w *textWriter, s string) error { - // use WriteByte here to get any needed indent - if err := w.WriteByte('"'); err != nil { - return err - } - // Loop over the bytes, not the runes. - for i := 0; i < len(s); i++ { - var err error - // Divergence from C++: we don't escape apostrophes. - // There's no need to escape them, and the C++ parser - // copes with a naked apostrophe. - switch c := s[i]; c { - case '\n': - _, err = w.w.Write(backslashN) - case '\r': - _, err = w.w.Write(backslashR) - case '\t': - _, err = w.w.Write(backslashT) - case '"': - _, err = w.w.Write(backslashDQ) - case '\\': - _, err = w.w.Write(backslashBS) - default: - if isprint(c) { - err = w.w.WriteByte(c) - } else { - _, err = fmt.Fprintf(w.w, "\\%03o", c) - } - } - if err != nil { - return err - } - } - return w.WriteByte('"') -} - -func writeUnknownStruct(w *textWriter, data []byte) (err error) { - if !w.compact { - if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { - return err - } - } - b := NewBuffer(data) - for b.index < len(b.buf) { - x, err := b.DecodeVarint() - if err != nil { - _, ferr := fmt.Fprintf(w, "/* %v */\n", err) - return ferr - } - wire, tag := x&7, x>>3 - if wire == WireEndGroup { - w.unindent() - if _, werr := w.Write(endBraceNewline); werr != nil { - return werr - } - continue - } - if _, ferr := fmt.Fprint(w, tag); ferr != nil { - return ferr - } - if wire != WireStartGroup { - if err = w.WriteByte(':'); err != nil { - return err - } - } - if !w.compact || wire == WireStartGroup { - if err = w.WriteByte(' '); err != nil { - return err - } - } - switch wire { - case WireBytes: - buf, e := b.DecodeRawBytes(false) - if e == nil { - _, err = fmt.Fprintf(w, "%q", buf) - } else { - _, err = fmt.Fprintf(w, "/* %v */", e) - } - case WireFixed32: - x, err = b.DecodeFixed32() - err = writeUnknownInt(w, x, err) - case WireFixed64: - x, err = b.DecodeFixed64() - err = writeUnknownInt(w, x, err) - case WireStartGroup: - err = w.WriteByte('{') - w.indent() - case WireVarint: - x, err = b.DecodeVarint() - err = writeUnknownInt(w, x, err) - default: - _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) - } - if err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - return nil -} - -func writeUnknownInt(w *textWriter, x uint64, err error) error { - if err == nil { - _, err = fmt.Fprint(w, x) - } else { - _, err = fmt.Fprintf(w, "/* %v */", err) - } - return err -} - -type int32Slice []int32 - -func (s int32Slice) Len() int { return len(s) } -func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } -func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// writeExtensions writes all the extensions in pv. -// pv is assumed to be a pointer to a protocol message struct that is extendable. -func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { - emap := extensionMaps[pv.Type().Elem()] - e := pv.Interface().(Message) - - var m map[int32]Extension - var mu sync.Locker - if em, ok := e.(extensionsBytes); ok { - eb := em.GetExtensions() - var err error - m, err = BytesToExtensionsMap(*eb) - if err != nil { - return err - } - mu = notLocker{} - } else if _, ok := e.(extendableProto); ok { - ep, _ := extendable(e) - m, mu = ep.extensionsRead() - if m == nil { - return nil - } - } - - // Order the extensions by ID. - // This isn't strictly necessary, but it will give us - // canonical output, which will also make testing easier. - - mu.Lock() - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) - mu.Unlock() - - for _, extNum := range ids { - ext := m[extNum] - var desc *ExtensionDesc - if emap != nil { - desc = emap[extNum] - } - if desc == nil { - // Unknown extension. - if err := writeUnknownStruct(w, ext.enc); err != nil { - return err - } - continue - } - - pb, err := GetExtension(e, desc) - if err != nil { - return fmt.Errorf("failed getting extension: %v", err) - } - - // Repeated extensions will appear as a slice. - if !desc.repeated() { - if err := tm.writeExtension(w, desc.Name, pb); err != nil { - return err - } - } else { - v := reflect.ValueOf(pb) - for i := 0; i < v.Len(); i++ { - if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { - return err - } - } - } - } - return nil -} - -func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { - if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - return nil -} - -func (w *textWriter) writeIndent() { - if !w.complete { - return - } - remain := w.ind * 2 - for remain > 0 { - n := remain - if n > len(spaces) { - n = len(spaces) - } - w.w.Write(spaces[:n]) - remain -= n - } - w.complete = false -} - -// TextMarshaler is a configurable text format marshaler. -type TextMarshaler struct { - Compact bool // use compact text format (one line). - ExpandAny bool // expand google.protobuf.Any messages of known types -} - -// Marshal writes a given protocol buffer in text format. -// The only errors returned are from w. -func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { - val := reflect.ValueOf(pb) - if pb == nil || val.IsNil() { - w.Write([]byte("")) - return nil - } - var bw *bufio.Writer - ww, ok := w.(writer) - if !ok { - bw = bufio.NewWriter(w) - ww = bw - } - aw := &textWriter{ - w: ww, - complete: true, - compact: tm.Compact, - } - - if etm, ok := pb.(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = aw.Write(text); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil - } - // Dereference the received pointer so we don't have outer < and >. - v := reflect.Indirect(val) - if err := tm.writeStruct(aw, v); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil -} - -// Text is the same as Marshal, but returns the string directly. -func (tm *TextMarshaler) Text(pb Message) string { - var buf bytes.Buffer - tm.Marshal(&buf, pb) - return buf.String() -} - -var ( - defaultTextMarshaler = TextMarshaler{} - compactTextMarshaler = TextMarshaler{Compact: true} -) - -// TODO: consider removing some of the Marshal functions below. - -// MarshalText writes a given protocol buffer in text format. -// The only errors returned are from w. -func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } - -// MarshalTextString is the same as MarshalText, but returns the string directly. -func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } - -// CompactText writes a given protocol buffer in compact text format (one line). -func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } - -// CompactTextString is the same as CompactText, but returns the string directly. -func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/ui/wasm/vendor/github.com/gogo/protobuf/proto/text_gogo.go b/ui/wasm/vendor/github.com/gogo/protobuf/proto/text_gogo.go deleted file mode 100644 index 1d6c6aa0e41..00000000000 --- a/ui/wasm/vendor/github.com/gogo/protobuf/proto/text_gogo.go +++ /dev/null @@ -1,57 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" -) - -func (tm *TextMarshaler) writeEnum(w *textWriter, v reflect.Value, props *Properties) error { - m, ok := enumStringMaps[props.Enum] - if !ok { - if err := tm.writeAny(w, v, props); err != nil { - return err - } - } - key := int32(0) - if v.Kind() == reflect.Ptr { - key = int32(v.Elem().Int()) - } else { - key = int32(v.Int()) - } - s, ok := m[key] - if !ok { - if err := tm.writeAny(w, v, props); err != nil { - return err - } - } - _, err := fmt.Fprint(w, s) - return err -} diff --git a/ui/wasm/vendor/github.com/gogo/protobuf/proto/text_parser.go b/ui/wasm/vendor/github.com/gogo/protobuf/proto/text_parser.go deleted file mode 100644 index f85c0cc81a7..00000000000 --- a/ui/wasm/vendor/github.com/gogo/protobuf/proto/text_parser.go +++ /dev/null @@ -1,1018 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for parsing the Text protocol buffer format. -// TODO: message sets. - -import ( - "encoding" - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "time" - "unicode/utf8" -) - -// Error string emitted when deserializing Any and fields are already set -const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" - -type ParseError struct { - Message string - Line int // 1-based line number - Offset int // 0-based byte offset from start of input -} - -func (p *ParseError) Error() string { - if p.Line == 1 { - // show offset only for first line - return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) - } - return fmt.Sprintf("line %d: %v", p.Line, p.Message) -} - -type token struct { - value string - err *ParseError - line int // line number - offset int // byte number from start of input, not start of line - unquoted string // the unquoted version of value, if it was a quoted string -} - -func (t *token) String() string { - if t.err == nil { - return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) - } - return fmt.Sprintf("parse error: %v", t.err) -} - -type textParser struct { - s string // remaining input - done bool // whether the parsing is finished (success or error) - backed bool // whether back() was called - offset, line int - cur token -} - -func newTextParser(s string) *textParser { - p := new(textParser) - p.s = s - p.line = 1 - p.cur.line = 1 - return p -} - -func (p *textParser) errorf(format string, a ...interface{}) *ParseError { - pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} - p.cur.err = pe - p.done = true - return pe -} - -// Numbers and identifiers are matched by [-+._A-Za-z0-9] -func isIdentOrNumberChar(c byte) bool { - switch { - case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': - return true - case '0' <= c && c <= '9': - return true - } - switch c { - case '-', '+', '.', '_': - return true - } - return false -} - -func isWhitespace(c byte) bool { - switch c { - case ' ', '\t', '\n', '\r': - return true - } - return false -} - -func isQuote(c byte) bool { - switch c { - case '"', '\'': - return true - } - return false -} - -func (p *textParser) skipWhitespace() { - i := 0 - for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { - if p.s[i] == '#' { - // comment; skip to end of line or input - for i < len(p.s) && p.s[i] != '\n' { - i++ - } - if i == len(p.s) { - break - } - } - if p.s[i] == '\n' { - p.line++ - } - i++ - } - p.offset += i - p.s = p.s[i:len(p.s)] - if len(p.s) == 0 { - p.done = true - } -} - -func (p *textParser) advance() { - // Skip whitespace - p.skipWhitespace() - if p.done { - return - } - - // Start of non-whitespace - p.cur.err = nil - p.cur.offset, p.cur.line = p.offset, p.line - p.cur.unquoted = "" - switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': - // Single symbol - p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] - case '"', '\'': - // Quoted string - i := 1 - for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { - if p.s[i] == '\\' && i+1 < len(p.s) { - // skip escaped char - i++ - } - i++ - } - if i >= len(p.s) || p.s[i] != p.s[0] { - p.errorf("unmatched quote") - return - } - unq, err := unquoteC(p.s[1:i], rune(p.s[0])) - if err != nil { - p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) - return - } - p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] - p.cur.unquoted = unq - default: - i := 0 - for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { - i++ - } - if i == 0 { - p.errorf("unexpected byte %#x", p.s[0]) - return - } - p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] - } - p.offset += len(p.cur.value) -} - -var ( - errBadUTF8 = errors.New("proto: bad UTF-8") -) - -func unquoteC(s string, quote rune) (string, error) { - // This is based on C++'s tokenizer.cc. - // Despite its name, this is *not* parsing C syntax. - // For instance, "\0" is an invalid quoted string. - - // Avoid allocation in trivial cases. - simple := true - for _, r := range s { - if r == '\\' || r == quote { - simple = false - break - } - } - if simple { - return s, nil - } - - buf := make([]byte, 0, 3*len(s)/2) - for len(s) > 0 { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", errBadUTF8 - } - s = s[n:] - if r != '\\' { - if r < utf8.RuneSelf { - buf = append(buf, byte(r)) - } else { - buf = append(buf, string(r)...) - } - continue - } - - ch, tail, err := unescape(s) - if err != nil { - return "", err - } - buf = append(buf, ch...) - s = tail - } - return string(buf), nil -} - -func unescape(s string) (ch string, tail string, err error) { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", "", errBadUTF8 - } - s = s[n:] - switch r { - case 'a': - return "\a", s, nil - case 'b': - return "\b", s, nil - case 'f': - return "\f", s, nil - case 'n': - return "\n", s, nil - case 'r': - return "\r", s, nil - case 't': - return "\t", s, nil - case 'v': - return "\v", s, nil - case '?': - return "?", s, nil // trigraph workaround - case '\'', '"', '\\': - return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7': - if len(s) < 2 { - return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) - } - ss := string(r) + s[:2] - s = s[2:] - i, err := strconv.ParseUint(ss, 8, 8) - if err != nil { - return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) - } - return string([]byte{byte(i)}), s, nil - case 'x', 'X', 'u', 'U': - var n int - switch r { - case 'x', 'X': - n = 2 - case 'u': - n = 4 - case 'U': - n = 8 - } - if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) - } - ss := s[:n] - s = s[n:] - i, err := strconv.ParseUint(ss, 16, 64) - if err != nil { - return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) - } - if r == 'x' || r == 'X' { - return string([]byte{byte(i)}), s, nil - } - if i > utf8.MaxRune { - return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) - } - return string(rune(i)), s, nil - } - return "", "", fmt.Errorf(`unknown escape \%c`, r) -} - -// Back off the parser by one token. Can only be done between calls to next(). -// It makes the next advance() a no-op. -func (p *textParser) back() { p.backed = true } - -// Advances the parser and returns the new current token. -func (p *textParser) next() *token { - if p.backed || p.done { - p.backed = false - return &p.cur - } - p.advance() - if p.done { - p.cur.value = "" - } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { - // Look for multiple quoted strings separated by whitespace, - // and concatenate them. - cat := p.cur - for { - p.skipWhitespace() - if p.done || !isQuote(p.s[0]) { - break - } - p.advance() - if p.cur.err != nil { - return &p.cur - } - cat.value += " " + p.cur.value - cat.unquoted += p.cur.unquoted - } - p.done = false // parser may have seen EOF, but we want to return cat - p.cur = cat - } - return &p.cur -} - -func (p *textParser) consumeToken(s string) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != s { - p.back() - return p.errorf("expected %q, found %q", s, tok.value) - } - return nil -} - -// Return a RequiredNotSetError indicating which required field was not set. -func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < st.NumField(); i++ { - if !isNil(sv.Field(i)) { - continue - } - - props := sprops.Prop[i] - if props.Required { - return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} - } - } - return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen -} - -// Returns the index in the struct for the named field, as well as the parsed tag properties. -func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { - i, ok := sprops.decoderOrigNames[name] - if ok { - return i, sprops.Prop[i], true - } - return -1, nil, false -} - -// Consume a ':' from the input stream (if the next token is a colon), -// returning an error if a colon is needed but not present. -func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ":" { - // Colon is optional when the field is a group or message. - needColon := true - switch props.Wire { - case "group": - needColon = false - case "bytes": - // A "bytes" field is either a message, a string, or a repeated field; - // those three become *T, *string and []T respectively, so we can check for - // this field being a pointer to a non-string. - if typ.Kind() == reflect.Ptr { - // *T or *string - if typ.Elem().Kind() == reflect.String { - break - } - } else if typ.Kind() == reflect.Slice { - // []T or []*T - if typ.Elem().Kind() != reflect.Ptr { - break - } - } else if typ.Kind() == reflect.String { - // The proto3 exception is for a string field, - // which requires a colon. - break - } - needColon = false - } - if needColon { - return p.errorf("expected ':', found %q", tok.value) - } - p.back() - } - return nil -} - -func (p *textParser) readStruct(sv reflect.Value, terminator string) error { - st := sv.Type() - sprops := GetProperties(st) - reqCount := sprops.reqCount - var reqFieldErr error - fieldSet := make(map[string]bool) - // A struct is a sequence of "name: value", terminated by one of - // '>' or '}', or the end of the input. A name may also be - // "[extension]" or "[type/url]". - // - // The whole struct can also be an expanded Any message, like: - // [type/url] < ... struct contents ... > - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - if tok.value == "[" { - // Looks like an extension or an Any. - // - // TODO: Check whether we need to handle - // namespace rooted names (e.g. ".something.Foo"). - extName, err := p.consumeExtName() - if err != nil { - return err - } - - if s := strings.LastIndex(extName, "/"); s >= 0 { - // If it contains a slash, it's an Any type URL. - messageName := extName[s+1:] - mt := MessageType(messageName) - if mt == nil { - return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) - } - tok = p.next() - if tok.err != nil { - return tok.err - } - // consume an optional colon - if tok.value == ":" { - tok = p.next() - if tok.err != nil { - return tok.err - } - } - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - v := reflect.New(mt.Elem()) - if pe := p.readStruct(v.Elem(), terminator); pe != nil { - return pe - } - b, err := Marshal(v.Interface().(Message)) - if err != nil { - return p.errorf("failed to marshal message of type %q: %v", messageName, err) - } - if fieldSet["type_url"] { - return p.errorf(anyRepeatedlyUnpacked, "type_url") - } - if fieldSet["value"] { - return p.errorf(anyRepeatedlyUnpacked, "value") - } - sv.FieldByName("TypeUrl").SetString(extName) - sv.FieldByName("Value").SetBytes(b) - fieldSet["type_url"] = true - fieldSet["value"] = true - continue - } - - var desc *ExtensionDesc - // This could be faster, but it's functional. - // TODO: Do something smarter than a linear scan. - for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { - if d.Name == extName { - desc = d - break - } - } - if desc == nil { - return p.errorf("unrecognized extension %q", extName) - } - - props := &Properties{} - props.Parse(desc.Tag) - - typ := reflect.TypeOf(desc.ExtensionType) - if err := p.checkForColon(props, typ); err != nil { - return err - } - - rep := desc.repeated() - - // Read the extension structure, and set it in - // the value we're constructing. - var ext reflect.Value - if !rep { - ext = reflect.New(typ).Elem() - } else { - ext = reflect.New(typ.Elem()).Elem() - } - if err := p.readAny(ext, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - ep := sv.Addr().Interface().(Message) - if !rep { - SetExtension(ep, desc, ext.Interface()) - } else { - old, err := GetExtension(ep, desc) - var sl reflect.Value - if err == nil { - sl = reflect.ValueOf(old) // existing slice - } else { - sl = reflect.MakeSlice(typ, 0, 1) - } - sl = reflect.Append(sl, ext) - SetExtension(ep, desc, sl.Interface()) - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - continue - } - - // This is a normal, non-extension field. - name := tok.value - var dst reflect.Value - fi, props, ok := structFieldByName(sprops, name) - if ok { - dst = sv.Field(fi) - } else if oop, ok := sprops.OneofTypes[name]; ok { - // It is a oneof. - props = oop.Prop - nv := reflect.New(oop.Type.Elem()) - dst = nv.Elem().Field(0) - field := sv.Field(oop.Field) - if !field.IsNil() { - return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) - } - field.Set(nv) - } - if !dst.IsValid() { - return p.errorf("unknown field name %q in %v", name, st) - } - - if dst.Kind() == reflect.Map { - // Consume any colon. - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Construct the map if it doesn't already exist. - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - key := reflect.New(dst.Type().Key()).Elem() - val := reflect.New(dst.Type().Elem()).Elem() - - // The map entry should be this sequence of tokens: - // < key : KEY value : VALUE > - // However, implementations may omit key or value, and technically - // we should support them in any order. See b/28924776 for a time - // this went wrong. - - tok := p.next() - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - switch tok.value { - case "key": - if err := p.consumeToken(":"); err != nil { - return err - } - if err := p.readAny(key, props.MapKeyProp); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - case "value": - if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil { - return err - } - if err := p.readAny(val, props.MapValProp); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - default: - p.back() - return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) - } - } - - dst.SetMapIndex(key, val) - continue - } - - // Check that it's not already set if it's not a repeated field. - if !props.Repeated && fieldSet[name] { - return p.errorf("non-repeated field %q was repeated", name) - } - - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Parse into the field. - fieldSet[name] = true - if err := p.readAny(dst, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - if props.Required { - reqCount-- - } - - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - - } - - if reqCount > 0 { - return p.missingRequiredFieldError(sv) - } - return reqFieldErr -} - -// consumeExtName consumes extension name or expanded Any type URL and the -// following ']'. It returns the name or URL consumed. -func (p *textParser) consumeExtName() (string, error) { - tok := p.next() - if tok.err != nil { - return "", tok.err - } - - // If extension name or type url is quoted, it's a single token. - if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { - name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) - if err != nil { - return "", err - } - return name, p.consumeToken("]") - } - - // Consume everything up to "]" - var parts []string - for tok.value != "]" { - parts = append(parts, tok.value) - tok = p.next() - if tok.err != nil { - return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) - } - if p.done && tok.value != "]" { - return "", p.errorf("unclosed type_url or extension name") - } - } - return strings.Join(parts, ""), nil -} - -// consumeOptionalSeparator consumes an optional semicolon or comma. -// It is used in readStruct to provide backward compatibility. -func (p *textParser) consumeOptionalSeparator() error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ";" && tok.value != "," { - p.back() - } - return nil -} - -func (p *textParser) readAny(v reflect.Value, props *Properties) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "" { - return p.errorf("unexpected EOF") - } - if len(props.CustomType) > 0 { - if props.Repeated { - t := reflect.TypeOf(v.Interface()) - if t.Kind() == reflect.Slice { - tc := reflect.TypeOf(new(Marshaler)) - ok := t.Elem().Implements(tc.Elem()) - if ok { - fv := v - flen := fv.Len() - if flen == fv.Cap() { - nav := reflect.MakeSlice(v.Type(), flen, 2*flen+1) - reflect.Copy(nav, fv) - fv.Set(nav) - } - fv.SetLen(flen + 1) - - // Read one. - p.back() - return p.readAny(fv.Index(flen), props) - } - } - } - if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { - custom := reflect.New(props.ctype.Elem()).Interface().(Unmarshaler) - err := custom.Unmarshal([]byte(tok.unquoted)) - if err != nil { - return p.errorf("%v %v: %v", err, v.Type(), tok.value) - } - v.Set(reflect.ValueOf(custom)) - } else { - custom := reflect.New(reflect.TypeOf(v.Interface())).Interface().(Unmarshaler) - err := custom.Unmarshal([]byte(tok.unquoted)) - if err != nil { - return p.errorf("%v %v: %v", err, v.Type(), tok.value) - } - v.Set(reflect.Indirect(reflect.ValueOf(custom))) - } - return nil - } - if props.StdTime { - fv := v - p.back() - props.StdTime = false - tproto := ×tamp{} - err := p.readAny(reflect.ValueOf(tproto).Elem(), props) - props.StdTime = true - if err != nil { - return err - } - tim, err := timestampFromProto(tproto) - if err != nil { - return err - } - if props.Repeated { - t := reflect.TypeOf(v.Interface()) - if t.Kind() == reflect.Slice { - if t.Elem().Kind() == reflect.Ptr { - ts := fv.Interface().([]*time.Time) - ts = append(ts, &tim) - fv.Set(reflect.ValueOf(ts)) - return nil - } else { - ts := fv.Interface().([]time.Time) - ts = append(ts, tim) - fv.Set(reflect.ValueOf(ts)) - return nil - } - } - } - if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { - v.Set(reflect.ValueOf(&tim)) - } else { - v.Set(reflect.Indirect(reflect.ValueOf(&tim))) - } - return nil - } - if props.StdDuration { - fv := v - p.back() - props.StdDuration = false - dproto := &duration{} - err := p.readAny(reflect.ValueOf(dproto).Elem(), props) - props.StdDuration = true - if err != nil { - return err - } - dur, err := durationFromProto(dproto) - if err != nil { - return err - } - if props.Repeated { - t := reflect.TypeOf(v.Interface()) - if t.Kind() == reflect.Slice { - if t.Elem().Kind() == reflect.Ptr { - ds := fv.Interface().([]*time.Duration) - ds = append(ds, &dur) - fv.Set(reflect.ValueOf(ds)) - return nil - } else { - ds := fv.Interface().([]time.Duration) - ds = append(ds, dur) - fv.Set(reflect.ValueOf(ds)) - return nil - } - } - } - if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { - v.Set(reflect.ValueOf(&dur)) - } else { - v.Set(reflect.Indirect(reflect.ValueOf(&dur))) - } - return nil - } - switch fv := v; fv.Kind() { - case reflect.Slice: - at := v.Type() - if at.Elem().Kind() == reflect.Uint8 { - // Special case for []byte - if tok.value[0] != '"' && tok.value[0] != '\'' { - // Deliberately written out here, as the error after - // this switch statement would write "invalid []byte: ...", - // which is not as user-friendly. - return p.errorf("invalid string: %v", tok.value) - } - bytes := []byte(tok.unquoted) - fv.Set(reflect.ValueOf(bytes)) - return nil - } - // Repeated field. - if tok.value == "[" { - // Repeated field with list notation, like [1,2,3]. - for { - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - err := p.readAny(fv.Index(fv.Len()-1), props) - if err != nil { - return err - } - ntok := p.next() - if ntok.err != nil { - return ntok.err - } - if ntok.value == "]" { - break - } - if ntok.value != "," { - return p.errorf("Expected ']' or ',' found %q", ntok.value) - } - } - return nil - } - // One value of the repeated field. - p.back() - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - return p.readAny(fv.Index(fv.Len()-1), props) - case reflect.Bool: - // true/1/t/True or false/f/0/False. - switch tok.value { - case "true", "1", "t", "True": - fv.SetBool(true) - return nil - case "false", "0", "f", "False": - fv.SetBool(false) - return nil - } - case reflect.Float32, reflect.Float64: - v := tok.value - // Ignore 'f' for compatibility with output generated by C++, but don't - // remove 'f' when the value is "-inf" or "inf". - if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { - v = v[:len(v)-1] - } - if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { - fv.SetFloat(f) - return nil - } - case reflect.Int8: - if x, err := strconv.ParseInt(tok.value, 0, 8); err == nil { - fv.SetInt(x) - return nil - } - case reflect.Int16: - if x, err := strconv.ParseInt(tok.value, 0, 16); err == nil { - fv.SetInt(x) - return nil - } - case reflect.Int32: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - fv.SetInt(x) - return nil - } - - if len(props.Enum) == 0 { - break - } - m, ok := enumValueMaps[props.Enum] - if !ok { - break - } - x, ok := m[tok.value] - if !ok { - break - } - fv.SetInt(int64(x)) - return nil - case reflect.Int64: - if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { - fv.SetInt(x) - return nil - } - - case reflect.Ptr: - // A basic field (indirected through pointer), or a repeated message/group - p.back() - fv.Set(reflect.New(fv.Type().Elem())) - return p.readAny(fv.Elem(), props) - case reflect.String: - if tok.value[0] == '"' || tok.value[0] == '\'' { - fv.SetString(tok.unquoted) - return nil - } - case reflect.Struct: - var terminator string - switch tok.value { - case "{": - terminator = "}" - case "<": - terminator = ">" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - // TODO: Handle nested messages which implement encoding.TextUnmarshaler. - return p.readStruct(fv, terminator) - case reflect.Uint8: - if x, err := strconv.ParseUint(tok.value, 0, 8); err == nil { - fv.SetUint(x) - return nil - } - case reflect.Uint16: - if x, err := strconv.ParseUint(tok.value, 0, 16); err == nil { - fv.SetUint(x) - return nil - } - case reflect.Uint32: - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(uint64(x)) - return nil - } - case reflect.Uint64: - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - fv.SetUint(x) - return nil - } - } - return p.errorf("invalid %v: %v", v.Type(), tok.value) -} - -// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb -// before starting to unmarshal, so any existing data in pb is always removed. -// If a required field is not set and no other error occurs, -// UnmarshalText returns *RequiredNotSetError. -func UnmarshalText(s string, pb Message) error { - if um, ok := pb.(encoding.TextUnmarshaler); ok { - return um.UnmarshalText([]byte(s)) - } - pb.Reset() - v := reflect.ValueOf(pb) - return newTextParser(s).readStruct(v.Elem(), "") -} diff --git a/ui/wasm/vendor/github.com/gogo/protobuf/proto/timestamp.go b/ui/wasm/vendor/github.com/gogo/protobuf/proto/timestamp.go deleted file mode 100644 index 9324f6542bc..00000000000 --- a/ui/wasm/vendor/github.com/gogo/protobuf/proto/timestamp.go +++ /dev/null @@ -1,113 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// This file implements operations on google.protobuf.Timestamp. - -import ( - "errors" - "fmt" - "time" -) - -const ( - // Seconds field of the earliest valid Timestamp. - // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - minValidSeconds = -62135596800 - // Seconds field just after the latest valid Timestamp. - // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - maxValidSeconds = 253402300800 -) - -// validateTimestamp determines whether a Timestamp is valid. -// A valid timestamp represents a time in the range -// [0001-01-01, 10000-01-01) and has a Nanos field -// in the range [0, 1e9). -// -// If the Timestamp is valid, validateTimestamp returns nil. -// Otherwise, it returns an error that describes -// the problem. -// -// Every valid Timestamp can be represented by a time.Time, but the converse is not true. -func validateTimestamp(ts *timestamp) error { - if ts == nil { - return errors.New("timestamp: nil Timestamp") - } - if ts.Seconds < minValidSeconds { - return fmt.Errorf("timestamp: %#v before 0001-01-01", ts) - } - if ts.Seconds >= maxValidSeconds { - return fmt.Errorf("timestamp: %#v after 10000-01-01", ts) - } - if ts.Nanos < 0 || ts.Nanos >= 1e9 { - return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts) - } - return nil -} - -// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time. -// It returns an error if the argument is invalid. -// -// Unlike most Go functions, if Timestamp returns an error, the first return value -// is not the zero time.Time. Instead, it is the value obtained from the -// time.Unix function when passed the contents of the Timestamp, in the UTC -// locale. This may or may not be a meaningful time; many invalid Timestamps -// do map to valid time.Times. -// -// A nil Timestamp returns an error. The first return value in that case is -// undefined. -func timestampFromProto(ts *timestamp) (time.Time, error) { - // Don't return the zero value on error, because corresponds to a valid - // timestamp. Instead return whatever time.Unix gives us. - var t time.Time - if ts == nil { - t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp - } else { - t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() - } - return t, validateTimestamp(ts) -} - -// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. -// It returns an error if the resulting Timestamp is invalid. -func timestampProto(t time.Time) (*timestamp, error) { - seconds := t.Unix() - nanos := int32(t.Sub(time.Unix(seconds, 0))) - ts := ×tamp{ - Seconds: seconds, - Nanos: nanos, - } - if err := validateTimestamp(ts); err != nil { - return nil, err - } - return ts, nil -} diff --git a/ui/wasm/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go b/ui/wasm/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go deleted file mode 100644 index 38439fa9901..00000000000 --- a/ui/wasm/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go +++ /dev/null @@ -1,49 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2016, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" - "time" -) - -var timeType = reflect.TypeOf((*time.Time)(nil)).Elem() - -type timestamp struct { - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` -} - -func (m *timestamp) Reset() { *m = timestamp{} } -func (*timestamp) ProtoMessage() {} -func (*timestamp) String() string { return "timestamp" } - -func init() { - RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp") -} diff --git a/ui/wasm/vendor/github.com/gogo/protobuf/proto/wrappers.go b/ui/wasm/vendor/github.com/gogo/protobuf/proto/wrappers.go deleted file mode 100644 index b175d1b6423..00000000000 --- a/ui/wasm/vendor/github.com/gogo/protobuf/proto/wrappers.go +++ /dev/null @@ -1,1888 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "io" - "reflect" -) - -func makeStdDoubleValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*float64) - v := &float64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*float64) - v := &float64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdDoubleValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) - v := &float64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) - v := &float64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdDoubleValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(float64) - v := &float64Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(float64) - v := &float64Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdDoubleValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*float64) - v := &float64Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*float64) - v := &float64Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdDoubleValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdDoubleValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdDoubleValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdDoubleValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdFloatValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*float32) - v := &float32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*float32) - v := &float32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdFloatValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) - v := &float32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) - v := &float32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdFloatValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(float32) - v := &float32Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(float32) - v := &float32Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdFloatValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*float32) - v := &float32Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*float32) - v := &float32Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdFloatValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdFloatValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdFloatValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdFloatValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &float32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*int64) - v := &int64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*int64) - v := &int64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) - v := &int64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) - v := &int64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(int64) - v := &int64Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(int64) - v := &int64Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*int64) - v := &int64Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*int64) - v := &int64Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdUInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*uint64) - v := &uint64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*uint64) - v := &uint64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdUInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) - v := &uint64Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) - v := &uint64Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdUInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(uint64) - v := &uint64Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(uint64) - v := &uint64Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdUInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*uint64) - v := &uint64Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*uint64) - v := &uint64Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdUInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdUInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdUInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdUInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint64Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*int32) - v := &int32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*int32) - v := &int32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) - v := &int32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) - v := &int32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(int32) - v := &int32Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(int32) - v := &int32Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*int32) - v := &int32Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*int32) - v := &int32Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &int32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdUInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*uint32) - v := &uint32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*uint32) - v := &uint32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdUInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) - v := &uint32Value{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) - v := &uint32Value{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdUInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(uint32) - v := &uint32Value{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(uint32) - v := &uint32Value{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdUInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*uint32) - v := &uint32Value{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*uint32) - v := &uint32Value{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdUInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdUInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdUInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdUInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &uint32Value{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdBoolValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*bool) - v := &boolValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*bool) - v := &boolValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdBoolValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) - v := &boolValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) - v := &boolValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdBoolValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(bool) - v := &boolValue{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(bool) - v := &boolValue{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdBoolValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*bool) - v := &boolValue{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*bool) - v := &boolValue{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdBoolValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &boolValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdBoolValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &boolValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdBoolValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &boolValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdBoolValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &boolValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdStringValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*string) - v := &stringValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*string) - v := &stringValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdStringValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) - v := &stringValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) - v := &stringValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdStringValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(string) - v := &stringValue{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(string) - v := &stringValue{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdStringValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*string) - v := &stringValue{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*string) - v := &stringValue{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdStringValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &stringValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdStringValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &stringValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdStringValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &stringValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdStringValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &stringValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdBytesValueMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - t := ptr.asPointerTo(u.typ).Interface().(*[]byte) - v := &bytesValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - t := ptr.asPointerTo(u.typ).Interface().(*[]byte) - v := &bytesValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdBytesValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - if ptr.isNil() { - return 0 - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) - v := &bytesValue{*t} - siz := Size(v) - return tagsize + SizeVarint(uint64(siz)) + siz - }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - if ptr.isNil() { - return b, nil - } - t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) - v := &bytesValue{*t} - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(buf))) - b = append(b, buf...) - return b, nil - } -} - -func makeStdBytesValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(u.typ) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().([]byte) - v := &bytesValue{t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(u.typ) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().([]byte) - v := &bytesValue{t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdBytesValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - n := 0 - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*[]byte) - v := &bytesValue{*t} - siz := Size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getSlice(reflect.PtrTo(u.typ)) - for i := 0; i < s.Len(); i++ { - elem := s.Index(i) - t := elem.Interface().(*[]byte) - v := &bytesValue{*t} - siz := Size(v) - buf, err := Marshal(v) - if err != nil { - return nil, err - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(siz)) - b = append(b, buf...) - } - - return b, nil - } -} - -func makeStdBytesValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &bytesValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(sub.typ).Elem() - s.Set(reflect.ValueOf(m.Value)) - return b[x:], nil - } -} - -func makeStdBytesValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &bytesValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() - s.Set(reflect.ValueOf(&m.Value)) - return b[x:], nil - } -} - -func makeStdBytesValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &bytesValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(reflect.PtrTo(sub.typ)) - newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} - -func makeStdBytesValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return nil, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - m := &bytesValue{} - if err := Unmarshal(b[:x], m); err != nil { - return nil, err - } - slice := f.getSlice(sub.typ) - newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) - slice.Set(newSlice) - return b[x:], nil - } -} diff --git a/ui/wasm/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go b/ui/wasm/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go deleted file mode 100644 index c1cf7bf85e9..00000000000 --- a/ui/wasm/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go +++ /dev/null @@ -1,113 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2018, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -type float64Value struct { - Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *float64Value) Reset() { *m = float64Value{} } -func (*float64Value) ProtoMessage() {} -func (*float64Value) String() string { return "float64" } - -type float32Value struct { - Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *float32Value) Reset() { *m = float32Value{} } -func (*float32Value) ProtoMessage() {} -func (*float32Value) String() string { return "float32" } - -type int64Value struct { - Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *int64Value) Reset() { *m = int64Value{} } -func (*int64Value) ProtoMessage() {} -func (*int64Value) String() string { return "int64" } - -type uint64Value struct { - Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *uint64Value) Reset() { *m = uint64Value{} } -func (*uint64Value) ProtoMessage() {} -func (*uint64Value) String() string { return "uint64" } - -type int32Value struct { - Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *int32Value) Reset() { *m = int32Value{} } -func (*int32Value) ProtoMessage() {} -func (*int32Value) String() string { return "int32" } - -type uint32Value struct { - Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *uint32Value) Reset() { *m = uint32Value{} } -func (*uint32Value) ProtoMessage() {} -func (*uint32Value) String() string { return "uint32" } - -type boolValue struct { - Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *boolValue) Reset() { *m = boolValue{} } -func (*boolValue) ProtoMessage() {} -func (*boolValue) String() string { return "bool" } - -type stringValue struct { - Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *stringValue) Reset() { *m = stringValue{} } -func (*stringValue) ProtoMessage() {} -func (*stringValue) String() string { return "string" } - -type bytesValue struct { - Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *bytesValue) Reset() { *m = bytesValue{} } -func (*bytesValue) ProtoMessage() {} -func (*bytesValue) String() string { return "[]byte" } - -func init() { - RegisterType((*float64Value)(nil), "gogo.protobuf.proto.DoubleValue") - RegisterType((*float32Value)(nil), "gogo.protobuf.proto.FloatValue") - RegisterType((*int64Value)(nil), "gogo.protobuf.proto.Int64Value") - RegisterType((*uint64Value)(nil), "gogo.protobuf.proto.UInt64Value") - RegisterType((*int32Value)(nil), "gogo.protobuf.proto.Int32Value") - RegisterType((*uint32Value)(nil), "gogo.protobuf.proto.UInt32Value") - RegisterType((*boolValue)(nil), "gogo.protobuf.proto.BoolValue") - RegisterType((*stringValue)(nil), "gogo.protobuf.proto.StringValue") - RegisterType((*bytesValue)(nil), "gogo.protobuf.proto.BytesValue") -} diff --git a/ui/wasm/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go b/ui/wasm/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go deleted file mode 100644 index ceadde6a5e1..00000000000 --- a/ui/wasm/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go +++ /dev/null @@ -1,101 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package sortkeys - -import ( - "sort" -) - -func Strings(l []string) { - sort.Strings(l) -} - -func Float64s(l []float64) { - sort.Float64s(l) -} - -func Float32s(l []float32) { - sort.Sort(Float32Slice(l)) -} - -func Int64s(l []int64) { - sort.Sort(Int64Slice(l)) -} - -func Int32s(l []int32) { - sort.Sort(Int32Slice(l)) -} - -func Uint64s(l []uint64) { - sort.Sort(Uint64Slice(l)) -} - -func Uint32s(l []uint32) { - sort.Sort(Uint32Slice(l)) -} - -func Bools(l []bool) { - sort.Sort(BoolSlice(l)) -} - -type BoolSlice []bool - -func (p BoolSlice) Len() int { return len(p) } -func (p BoolSlice) Less(i, j int) bool { return p[j] } -func (p BoolSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -type Int64Slice []int64 - -func (p Int64Slice) Len() int { return len(p) } -func (p Int64Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p Int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -type Int32Slice []int32 - -func (p Int32Slice) Len() int { return len(p) } -func (p Int32Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p Int32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -type Uint64Slice []uint64 - -func (p Uint64Slice) Len() int { return len(p) } -func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -type Uint32Slice []uint32 - -func (p Uint32Slice) Len() int { return len(p) } -func (p Uint32Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p Uint32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -type Float32Slice []float32 - -func (p Float32Slice) Len() int { return len(p) } -func (p Float32Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p Float32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/ui/wasm/vendor/github.com/golang/mock/AUTHORS b/ui/wasm/vendor/github.com/golang/mock/AUTHORS deleted file mode 100644 index 660b8ccc8ae..00000000000 --- a/ui/wasm/vendor/github.com/golang/mock/AUTHORS +++ /dev/null @@ -1,12 +0,0 @@ -# This is the official list of GoMock authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS files. -# See the latter for an explanation. - -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# Please keep the list sorted. - -Alex Reece -Google Inc. diff --git a/ui/wasm/vendor/github.com/golang/mock/CONTRIBUTORS b/ui/wasm/vendor/github.com/golang/mock/CONTRIBUTORS deleted file mode 100644 index def849cab1b..00000000000 --- a/ui/wasm/vendor/github.com/golang/mock/CONTRIBUTORS +++ /dev/null @@ -1,37 +0,0 @@ -# This is the official list of people who can contribute (and typically -# have contributed) code to the gomock repository. -# The AUTHORS file lists the copyright holders; this file -# lists people. For example, Google employees are listed here -# but not in AUTHORS, because Google holds the copyright. -# -# The submission process automatically checks to make sure -# that people submitting code are listed in this file (by email address). -# -# Names should be added to this file only after verifying that -# the individual or the individual's organization has agreed to -# the appropriate Contributor License Agreement, found here: -# -# http://code.google.com/legal/individual-cla-v1.0.html -# http://code.google.com/legal/corporate-cla-v1.0.html -# -# The agreement for individuals can be filled out on the web. -# -# When adding J Random Contributor's name to this file, -# either J's name or J's organization's name should be -# added to the AUTHORS file, depending on whether the -# individual or corporate CLA was used. - -# Names should be added to this file like so: -# Name -# -# An entry with two email addresses specifies that the -# first address should be used in the submit logs and -# that the second address should be recognized as the -# same person when interacting with Rietveld. - -# Please keep the list sorted. - -Aaron Jacobs -Alex Reece -David Symonds -Ryan Barrett diff --git a/ui/wasm/vendor/github.com/golang/mock/LICENSE b/ui/wasm/vendor/github.com/golang/mock/LICENSE deleted file mode 100644 index d6456956733..00000000000 --- a/ui/wasm/vendor/github.com/golang/mock/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/ui/wasm/vendor/github.com/golang/mock/gomock/call.go b/ui/wasm/vendor/github.com/golang/mock/gomock/call.go deleted file mode 100644 index 13c9f44b1ef..00000000000 --- a/ui/wasm/vendor/github.com/golang/mock/gomock/call.go +++ /dev/null @@ -1,445 +0,0 @@ -// Copyright 2010 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package gomock - -import ( - "fmt" - "reflect" - "strconv" - "strings" -) - -// Call represents an expected call to a mock. -type Call struct { - t TestHelper // for triggering test failures on invalid call setup - - receiver interface{} // the receiver of the method call - method string // the name of the method - methodType reflect.Type // the type of the method - args []Matcher // the args - origin string // file and line number of call setup - - preReqs []*Call // prerequisite calls - - // Expectations - minCalls, maxCalls int - - numCalls int // actual number made - - // actions are called when this Call is called. Each action gets the args and - // can set the return values by returning a non-nil slice. Actions run in the - // order they are created. - actions []func([]interface{}) []interface{} -} - -// newCall creates a *Call. It requires the method type in order to support -// unexported methods. -func newCall(t TestHelper, receiver interface{}, method string, methodType reflect.Type, args ...interface{}) *Call { - t.Helper() - - // TODO: check arity, types. - mArgs := make([]Matcher, len(args)) - for i, arg := range args { - if m, ok := arg.(Matcher); ok { - mArgs[i] = m - } else if arg == nil { - // Handle nil specially so that passing a nil interface value - // will match the typed nils of concrete args. - mArgs[i] = Nil() - } else { - mArgs[i] = Eq(arg) - } - } - - // callerInfo's skip should be updated if the number of calls between the user's test - // and this line changes, i.e. this code is wrapped in another anonymous function. - // 0 is us, 1 is RecordCallWithMethodType(), 2 is the generated recorder, and 3 is the user's test. - origin := callerInfo(3) - actions := []func([]interface{}) []interface{}{func([]interface{}) []interface{} { - // Synthesize the zero value for each of the return args' types. - rets := make([]interface{}, methodType.NumOut()) - for i := 0; i < methodType.NumOut(); i++ { - rets[i] = reflect.Zero(methodType.Out(i)).Interface() - } - return rets - }} - return &Call{t: t, receiver: receiver, method: method, methodType: methodType, - args: mArgs, origin: origin, minCalls: 1, maxCalls: 1, actions: actions} -} - -// AnyTimes allows the expectation to be called 0 or more times -func (c *Call) AnyTimes() *Call { - c.minCalls, c.maxCalls = 0, 1e8 // close enough to infinity - return c -} - -// MinTimes requires the call to occur at least n times. If AnyTimes or MaxTimes have not been called or if MaxTimes -// was previously called with 1, MinTimes also sets the maximum number of calls to infinity. -func (c *Call) MinTimes(n int) *Call { - c.minCalls = n - if c.maxCalls == 1 { - c.maxCalls = 1e8 - } - return c -} - -// MaxTimes limits the number of calls to n times. If AnyTimes or MinTimes have not been called or if MinTimes was -// previously called with 1, MaxTimes also sets the minimum number of calls to 0. -func (c *Call) MaxTimes(n int) *Call { - c.maxCalls = n - if c.minCalls == 1 { - c.minCalls = 0 - } - return c -} - -// DoAndReturn declares the action to run when the call is matched. -// The return values from this function are returned by the mocked function. -// It takes an interface{} argument to support n-arity functions. -func (c *Call) DoAndReturn(f interface{}) *Call { - // TODO: Check arity and types here, rather than dying badly elsewhere. - v := reflect.ValueOf(f) - - c.addAction(func(args []interface{}) []interface{} { - c.t.Helper() - vArgs := make([]reflect.Value, len(args)) - ft := v.Type() - if c.methodType.NumIn() != ft.NumIn() { - c.t.Fatalf("wrong number of arguments in DoAndReturn func for %T.%v: got %d, want %d [%s]", - c.receiver, c.method, ft.NumIn(), c.methodType.NumIn(), c.origin) - return nil - } - for i := 0; i < len(args); i++ { - if args[i] != nil { - vArgs[i] = reflect.ValueOf(args[i]) - } else { - // Use the zero value for the arg. - vArgs[i] = reflect.Zero(ft.In(i)) - } - } - vRets := v.Call(vArgs) - rets := make([]interface{}, len(vRets)) - for i, ret := range vRets { - rets[i] = ret.Interface() - } - return rets - }) - return c -} - -// Do declares the action to run when the call is matched. The function's -// return values are ignored to retain backward compatibility. To use the -// return values call DoAndReturn. -// It takes an interface{} argument to support n-arity functions. -func (c *Call) Do(f interface{}) *Call { - // TODO: Check arity and types here, rather than dying badly elsewhere. - v := reflect.ValueOf(f) - - c.addAction(func(args []interface{}) []interface{} { - c.t.Helper() - if c.methodType.NumIn() != v.Type().NumIn() { - c.t.Fatalf("wrong number of arguments in Do func for %T.%v: got %d, want %d [%s]", - c.receiver, c.method, v.Type().NumIn(), c.methodType.NumIn(), c.origin) - return nil - } - vArgs := make([]reflect.Value, len(args)) - ft := v.Type() - for i := 0; i < len(args); i++ { - if args[i] != nil { - vArgs[i] = reflect.ValueOf(args[i]) - } else { - // Use the zero value for the arg. - vArgs[i] = reflect.Zero(ft.In(i)) - } - } - v.Call(vArgs) - return nil - }) - return c -} - -// Return declares the values to be returned by the mocked function call. -func (c *Call) Return(rets ...interface{}) *Call { - c.t.Helper() - - mt := c.methodType - if len(rets) != mt.NumOut() { - c.t.Fatalf("wrong number of arguments to Return for %T.%v: got %d, want %d [%s]", - c.receiver, c.method, len(rets), mt.NumOut(), c.origin) - } - for i, ret := range rets { - if got, want := reflect.TypeOf(ret), mt.Out(i); got == want { - // Identical types; nothing to do. - } else if got == nil { - // Nil needs special handling. - switch want.Kind() { - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - // ok - default: - c.t.Fatalf("argument %d to Return for %T.%v is nil, but %v is not nillable [%s]", - i, c.receiver, c.method, want, c.origin) - } - } else if got.AssignableTo(want) { - // Assignable type relation. Make the assignment now so that the generated code - // can return the values with a type assertion. - v := reflect.New(want).Elem() - v.Set(reflect.ValueOf(ret)) - rets[i] = v.Interface() - } else { - c.t.Fatalf("wrong type of argument %d to Return for %T.%v: %v is not assignable to %v [%s]", - i, c.receiver, c.method, got, want, c.origin) - } - } - - c.addAction(func([]interface{}) []interface{} { - return rets - }) - - return c -} - -// Times declares the exact number of times a function call is expected to be executed. -func (c *Call) Times(n int) *Call { - c.minCalls, c.maxCalls = n, n - return c -} - -// SetArg declares an action that will set the nth argument's value, -// indirected through a pointer. Or, in the case of a slice, SetArg -// will copy value's elements into the nth argument. -func (c *Call) SetArg(n int, value interface{}) *Call { - c.t.Helper() - - mt := c.methodType - // TODO: This will break on variadic methods. - // We will need to check those at invocation time. - if n < 0 || n >= mt.NumIn() { - c.t.Fatalf("SetArg(%d, ...) called for a method with %d args [%s]", - n, mt.NumIn(), c.origin) - } - // Permit setting argument through an interface. - // In the interface case, we don't (nay, can't) check the type here. - at := mt.In(n) - switch at.Kind() { - case reflect.Ptr: - dt := at.Elem() - if vt := reflect.TypeOf(value); !vt.AssignableTo(dt) { - c.t.Fatalf("SetArg(%d, ...) argument is a %v, not assignable to %v [%s]", - n, vt, dt, c.origin) - } - case reflect.Interface: - // nothing to do - case reflect.Slice: - // nothing to do - default: - c.t.Fatalf("SetArg(%d, ...) referring to argument of non-pointer non-interface non-slice type %v [%s]", - n, at, c.origin) - } - - c.addAction(func(args []interface{}) []interface{} { - v := reflect.ValueOf(value) - switch reflect.TypeOf(args[n]).Kind() { - case reflect.Slice: - setSlice(args[n], v) - default: - reflect.ValueOf(args[n]).Elem().Set(v) - } - return nil - }) - return c -} - -// isPreReq returns true if other is a direct or indirect prerequisite to c. -func (c *Call) isPreReq(other *Call) bool { - for _, preReq := range c.preReqs { - if other == preReq || preReq.isPreReq(other) { - return true - } - } - return false -} - -// After declares that the call may only match after preReq has been exhausted. -func (c *Call) After(preReq *Call) *Call { - c.t.Helper() - - if c == preReq { - c.t.Fatalf("A call isn't allowed to be its own prerequisite") - } - if preReq.isPreReq(c) { - c.t.Fatalf("Loop in call order: %v is a prerequisite to %v (possibly indirectly).", c, preReq) - } - - c.preReqs = append(c.preReqs, preReq) - return c -} - -// Returns true if the minimum number of calls have been made. -func (c *Call) satisfied() bool { - return c.numCalls >= c.minCalls -} - -// Returns true if the maximum number of calls have been made. -func (c *Call) exhausted() bool { - return c.numCalls >= c.maxCalls -} - -func (c *Call) String() string { - args := make([]string, len(c.args)) - for i, arg := range c.args { - args[i] = arg.String() - } - arguments := strings.Join(args, ", ") - return fmt.Sprintf("%T.%v(%s) %s", c.receiver, c.method, arguments, c.origin) -} - -// Tests if the given call matches the expected call. -// If yes, returns nil. If no, returns error with message explaining why it does not match. -func (c *Call) matches(args []interface{}) error { - if !c.methodType.IsVariadic() { - if len(args) != len(c.args) { - return fmt.Errorf("expected call at %s has the wrong number of arguments. Got: %d, want: %d", - c.origin, len(args), len(c.args)) - } - - for i, m := range c.args { - if !m.Matches(args[i]) { - return fmt.Errorf( - "expected call at %s doesn't match the argument at index %d.\nGot: %v\nWant: %v", - c.origin, i, formatGottenArg(m, args[i]), m, - ) - } - } - } else { - if len(c.args) < c.methodType.NumIn()-1 { - return fmt.Errorf("expected call at %s has the wrong number of matchers. Got: %d, want: %d", - c.origin, len(c.args), c.methodType.NumIn()-1) - } - if len(c.args) != c.methodType.NumIn() && len(args) != len(c.args) { - return fmt.Errorf("expected call at %s has the wrong number of arguments. Got: %d, want: %d", - c.origin, len(args), len(c.args)) - } - if len(args) < len(c.args)-1 { - return fmt.Errorf("expected call at %s has the wrong number of arguments. Got: %d, want: greater than or equal to %d", - c.origin, len(args), len(c.args)-1) - } - - for i, m := range c.args { - if i < c.methodType.NumIn()-1 { - // Non-variadic args - if !m.Matches(args[i]) { - return fmt.Errorf("expected call at %s doesn't match the argument at index %s.\nGot: %v\nWant: %v", - c.origin, strconv.Itoa(i), formatGottenArg(m, args[i]), m) - } - continue - } - // The last arg has a possibility of a variadic argument, so let it branch - - // sample: Foo(a int, b int, c ...int) - if i < len(c.args) && i < len(args) { - if m.Matches(args[i]) { - // Got Foo(a, b, c) want Foo(matcherA, matcherB, gomock.Any()) - // Got Foo(a, b, c) want Foo(matcherA, matcherB, someSliceMatcher) - // Got Foo(a, b, c) want Foo(matcherA, matcherB, matcherC) - // Got Foo(a, b) want Foo(matcherA, matcherB) - // Got Foo(a, b, c, d) want Foo(matcherA, matcherB, matcherC, matcherD) - continue - } - } - - // The number of actual args don't match the number of matchers, - // or the last matcher is a slice and the last arg is not. - // If this function still matches it is because the last matcher - // matches all the remaining arguments or the lack of any. - // Convert the remaining arguments, if any, into a slice of the - // expected type. - vArgsType := c.methodType.In(c.methodType.NumIn() - 1) - vArgs := reflect.MakeSlice(vArgsType, 0, len(args)-i) - for _, arg := range args[i:] { - vArgs = reflect.Append(vArgs, reflect.ValueOf(arg)) - } - if m.Matches(vArgs.Interface()) { - // Got Foo(a, b, c, d, e) want Foo(matcherA, matcherB, gomock.Any()) - // Got Foo(a, b, c, d, e) want Foo(matcherA, matcherB, someSliceMatcher) - // Got Foo(a, b) want Foo(matcherA, matcherB, gomock.Any()) - // Got Foo(a, b) want Foo(matcherA, matcherB, someEmptySliceMatcher) - break - } - // Wrong number of matchers or not match. Fail. - // Got Foo(a, b) want Foo(matcherA, matcherB, matcherC, matcherD) - // Got Foo(a, b, c) want Foo(matcherA, matcherB, matcherC, matcherD) - // Got Foo(a, b, c, d) want Foo(matcherA, matcherB, matcherC, matcherD, matcherE) - // Got Foo(a, b, c, d, e) want Foo(matcherA, matcherB, matcherC, matcherD) - // Got Foo(a, b, c) want Foo(matcherA, matcherB) - - return fmt.Errorf("expected call at %s doesn't match the argument at index %s.\nGot: %v\nWant: %v", - c.origin, strconv.Itoa(i), formatGottenArg(m, args[i:]), c.args[i]) - } - } - - // Check that all prerequisite calls have been satisfied. - for _, preReqCall := range c.preReqs { - if !preReqCall.satisfied() { - return fmt.Errorf("expected call at %s doesn't have a prerequisite call satisfied:\n%v\nshould be called before:\n%v", - c.origin, preReqCall, c) - } - } - - // Check that the call is not exhausted. - if c.exhausted() { - return fmt.Errorf("expected call at %s has already been called the max number of times", c.origin) - } - - return nil -} - -// dropPrereqs tells the expected Call to not re-check prerequisite calls any -// longer, and to return its current set. -func (c *Call) dropPrereqs() (preReqs []*Call) { - preReqs = c.preReqs - c.preReqs = nil - return -} - -func (c *Call) call() []func([]interface{}) []interface{} { - c.numCalls++ - return c.actions -} - -// InOrder declares that the given calls should occur in order. -func InOrder(calls ...*Call) { - for i := 1; i < len(calls); i++ { - calls[i].After(calls[i-1]) - } -} - -func setSlice(arg interface{}, v reflect.Value) { - va := reflect.ValueOf(arg) - for i := 0; i < v.Len(); i++ { - va.Index(i).Set(v.Index(i)) - } -} - -func (c *Call) addAction(action func([]interface{}) []interface{}) { - c.actions = append(c.actions, action) -} - -func formatGottenArg(m Matcher, arg interface{}) string { - got := fmt.Sprintf("%v (%T)", arg, arg) - if gs, ok := m.(GotFormatter); ok { - got = gs.Got(arg) - } - return got -} diff --git a/ui/wasm/vendor/github.com/golang/mock/gomock/callset.go b/ui/wasm/vendor/github.com/golang/mock/gomock/callset.go deleted file mode 100644 index 49dba787a40..00000000000 --- a/ui/wasm/vendor/github.com/golang/mock/gomock/callset.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2011 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package gomock - -import ( - "bytes" - "errors" - "fmt" -) - -// callSet represents a set of expected calls, indexed by receiver and method -// name. -type callSet struct { - // Calls that are still expected. - expected map[callSetKey][]*Call - // Calls that have been exhausted. - exhausted map[callSetKey][]*Call -} - -// callSetKey is the key in the maps in callSet -type callSetKey struct { - receiver interface{} - fname string -} - -func newCallSet() *callSet { - return &callSet{make(map[callSetKey][]*Call), make(map[callSetKey][]*Call)} -} - -// Add adds a new expected call. -func (cs callSet) Add(call *Call) { - key := callSetKey{call.receiver, call.method} - m := cs.expected - if call.exhausted() { - m = cs.exhausted - } - m[key] = append(m[key], call) -} - -// Remove removes an expected call. -func (cs callSet) Remove(call *Call) { - key := callSetKey{call.receiver, call.method} - calls := cs.expected[key] - for i, c := range calls { - if c == call { - // maintain order for remaining calls - cs.expected[key] = append(calls[:i], calls[i+1:]...) - cs.exhausted[key] = append(cs.exhausted[key], call) - break - } - } -} - -// FindMatch searches for a matching call. Returns error with explanation message if no call matched. -func (cs callSet) FindMatch(receiver interface{}, method string, args []interface{}) (*Call, error) { - key := callSetKey{receiver, method} - - // Search through the expected calls. - expected := cs.expected[key] - var callsErrors bytes.Buffer - for _, call := range expected { - err := call.matches(args) - if err != nil { - _, _ = fmt.Fprintf(&callsErrors, "\n%v", err) - } else { - return call, nil - } - } - - // If we haven't found a match then search through the exhausted calls so we - // get useful error messages. - exhausted := cs.exhausted[key] - for _, call := range exhausted { - if err := call.matches(args); err != nil { - _, _ = fmt.Fprintf(&callsErrors, "\n%v", err) - continue - } - _, _ = fmt.Fprintf( - &callsErrors, "all expected calls for method %q have been exhausted", method, - ) - } - - if len(expected)+len(exhausted) == 0 { - _, _ = fmt.Fprintf(&callsErrors, "there are no expected calls of the method %q for that receiver", method) - } - - return nil, errors.New(callsErrors.String()) -} - -// Failures returns the calls that are not satisfied. -func (cs callSet) Failures() []*Call { - failures := make([]*Call, 0, len(cs.expected)) - for _, calls := range cs.expected { - for _, call := range calls { - if !call.satisfied() { - failures = append(failures, call) - } - } - } - return failures -} diff --git a/ui/wasm/vendor/github.com/golang/mock/gomock/controller.go b/ui/wasm/vendor/github.com/golang/mock/gomock/controller.go deleted file mode 100644 index f054200d56c..00000000000 --- a/ui/wasm/vendor/github.com/golang/mock/gomock/controller.go +++ /dev/null @@ -1,336 +0,0 @@ -// Copyright 2010 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package gomock is a mock framework for Go. -// -// Standard usage: -// (1) Define an interface that you wish to mock. -// type MyInterface interface { -// SomeMethod(x int64, y string) -// } -// (2) Use mockgen to generate a mock from the interface. -// (3) Use the mock in a test: -// func TestMyThing(t *testing.T) { -// mockCtrl := gomock.NewController(t) -// defer mockCtrl.Finish() -// -// mockObj := something.NewMockMyInterface(mockCtrl) -// mockObj.EXPECT().SomeMethod(4, "blah") -// // pass mockObj to a real object and play with it. -// } -// -// By default, expected calls are not enforced to run in any particular order. -// Call order dependency can be enforced by use of InOrder and/or Call.After. -// Call.After can create more varied call order dependencies, but InOrder is -// often more convenient. -// -// The following examples create equivalent call order dependencies. -// -// Example of using Call.After to chain expected call order: -// -// firstCall := mockObj.EXPECT().SomeMethod(1, "first") -// secondCall := mockObj.EXPECT().SomeMethod(2, "second").After(firstCall) -// mockObj.EXPECT().SomeMethod(3, "third").After(secondCall) -// -// Example of using InOrder to declare expected call order: -// -// gomock.InOrder( -// mockObj.EXPECT().SomeMethod(1, "first"), -// mockObj.EXPECT().SomeMethod(2, "second"), -// mockObj.EXPECT().SomeMethod(3, "third"), -// ) -package gomock - -import ( - "context" - "fmt" - "reflect" - "runtime" - "sync" -) - -// A TestReporter is something that can be used to report test failures. It -// is satisfied by the standard library's *testing.T. -type TestReporter interface { - Errorf(format string, args ...interface{}) - Fatalf(format string, args ...interface{}) -} - -// TestHelper is a TestReporter that has the Helper method. It is satisfied -// by the standard library's *testing.T. -type TestHelper interface { - TestReporter - Helper() -} - -// cleanuper is used to check if TestHelper also has the `Cleanup` method. A -// common pattern is to pass in a `*testing.T` to -// `NewController(t TestReporter)`. In Go 1.14+, `*testing.T` has a cleanup -// method. This can be utilized to call `Finish()` so the caller of this library -// does not have to. -type cleanuper interface { - Cleanup(func()) -} - -// A Controller represents the top-level control of a mock ecosystem. It -// defines the scope and lifetime of mock objects, as well as their -// expectations. It is safe to call Controller's methods from multiple -// goroutines. Each test should create a new Controller and invoke Finish via -// defer. -// -// func TestFoo(t *testing.T) { -// ctrl := gomock.NewController(t) -// defer ctrl.Finish() -// // .. -// } -// -// func TestBar(t *testing.T) { -// t.Run("Sub-Test-1", st) { -// ctrl := gomock.NewController(st) -// defer ctrl.Finish() -// // .. -// }) -// t.Run("Sub-Test-2", st) { -// ctrl := gomock.NewController(st) -// defer ctrl.Finish() -// // .. -// }) -// }) -type Controller struct { - // T should only be called within a generated mock. It is not intended to - // be used in user code and may be changed in future versions. T is the - // TestReporter passed in when creating the Controller via NewController. - // If the TestReporter does not implement a TestHelper it will be wrapped - // with a nopTestHelper. - T TestHelper - mu sync.Mutex - expectedCalls *callSet - finished bool -} - -// NewController returns a new Controller. It is the preferred way to create a -// Controller. -// -// New in go1.14+, if you are passing a *testing.T into this function you no -// longer need to call ctrl.Finish() in your test methods. -func NewController(t TestReporter) *Controller { - h, ok := t.(TestHelper) - if !ok { - h = &nopTestHelper{t} - } - ctrl := &Controller{ - T: h, - expectedCalls: newCallSet(), - } - if c, ok := isCleanuper(ctrl.T); ok { - c.Cleanup(func() { - ctrl.T.Helper() - ctrl.finish(true, nil) - }) - } - - return ctrl -} - -type cancelReporter struct { - t TestHelper - cancel func() -} - -func (r *cancelReporter) Errorf(format string, args ...interface{}) { - r.t.Errorf(format, args...) -} -func (r *cancelReporter) Fatalf(format string, args ...interface{}) { - defer r.cancel() - r.t.Fatalf(format, args...) -} - -func (r *cancelReporter) Helper() { - r.t.Helper() -} - -// WithContext returns a new Controller and a Context, which is cancelled on any -// fatal failure. -func WithContext(ctx context.Context, t TestReporter) (*Controller, context.Context) { - h, ok := t.(TestHelper) - if !ok { - h = &nopTestHelper{t: t} - } - - ctx, cancel := context.WithCancel(ctx) - return NewController(&cancelReporter{t: h, cancel: cancel}), ctx -} - -type nopTestHelper struct { - t TestReporter -} - -func (h *nopTestHelper) Errorf(format string, args ...interface{}) { - h.t.Errorf(format, args...) -} -func (h *nopTestHelper) Fatalf(format string, args ...interface{}) { - h.t.Fatalf(format, args...) -} - -func (h nopTestHelper) Helper() {} - -// RecordCall is called by a mock. It should not be called by user code. -func (ctrl *Controller) RecordCall(receiver interface{}, method string, args ...interface{}) *Call { - ctrl.T.Helper() - - recv := reflect.ValueOf(receiver) - for i := 0; i < recv.Type().NumMethod(); i++ { - if recv.Type().Method(i).Name == method { - return ctrl.RecordCallWithMethodType(receiver, method, recv.Method(i).Type(), args...) - } - } - ctrl.T.Fatalf("gomock: failed finding method %s on %T", method, receiver) - panic("unreachable") -} - -// RecordCallWithMethodType is called by a mock. It should not be called by user code. -func (ctrl *Controller) RecordCallWithMethodType(receiver interface{}, method string, methodType reflect.Type, args ...interface{}) *Call { - ctrl.T.Helper() - - call := newCall(ctrl.T, receiver, method, methodType, args...) - - ctrl.mu.Lock() - defer ctrl.mu.Unlock() - ctrl.expectedCalls.Add(call) - - return call -} - -// Call is called by a mock. It should not be called by user code. -func (ctrl *Controller) Call(receiver interface{}, method string, args ...interface{}) []interface{} { - ctrl.T.Helper() - - // Nest this code so we can use defer to make sure the lock is released. - actions := func() []func([]interface{}) []interface{} { - ctrl.T.Helper() - ctrl.mu.Lock() - defer ctrl.mu.Unlock() - - expected, err := ctrl.expectedCalls.FindMatch(receiver, method, args) - if err != nil { - // callerInfo's skip should be updated if the number of calls between the user's test - // and this line changes, i.e. this code is wrapped in another anonymous function. - // 0 is us, 1 is controller.Call(), 2 is the generated mock, and 3 is the user's test. - origin := callerInfo(3) - ctrl.T.Fatalf("Unexpected call to %T.%v(%v) at %s because: %s", receiver, method, args, origin, err) - } - - // Two things happen here: - // * the matching call no longer needs to check prerequite calls, - // * and the prerequite calls are no longer expected, so remove them. - preReqCalls := expected.dropPrereqs() - for _, preReqCall := range preReqCalls { - ctrl.expectedCalls.Remove(preReqCall) - } - - actions := expected.call() - if expected.exhausted() { - ctrl.expectedCalls.Remove(expected) - } - return actions - }() - - var rets []interface{} - for _, action := range actions { - if r := action(args); r != nil { - rets = r - } - } - - return rets -} - -// Finish checks to see if all the methods that were expected to be called -// were called. It should be invoked for each Controller. It is not idempotent -// and therefore can only be invoked once. -// -// New in go1.14+, if you are passing a *testing.T into NewController function you no -// longer need to call ctrl.Finish() in your test methods. -func (ctrl *Controller) Finish() { - // If we're currently panicking, probably because this is a deferred call. - // This must be recovered in the deferred function. - err := recover() - ctrl.finish(false, err) -} - -func (ctrl *Controller) finish(cleanup bool, panicErr interface{}) { - ctrl.T.Helper() - - ctrl.mu.Lock() - defer ctrl.mu.Unlock() - - if ctrl.finished { - if _, ok := isCleanuper(ctrl.T); !ok { - ctrl.T.Fatalf("Controller.Finish was called more than once. It has to be called exactly once.") - } - return - } - ctrl.finished = true - - // Short-circuit, pass through the panic. - if panicErr != nil { - panic(panicErr) - } - - // Check that all remaining expected calls are satisfied. - failures := ctrl.expectedCalls.Failures() - for _, call := range failures { - ctrl.T.Errorf("missing call(s) to %v", call) - } - if len(failures) != 0 { - if !cleanup { - ctrl.T.Fatalf("aborting test due to missing call(s)") - return - } - ctrl.T.Errorf("aborting test due to missing call(s)") - } -} - -// callerInfo returns the file:line of the call site. skip is the number -// of stack frames to skip when reporting. 0 is callerInfo's call site. -func callerInfo(skip int) string { - if _, file, line, ok := runtime.Caller(skip + 1); ok { - return fmt.Sprintf("%s:%d", file, line) - } - return "unknown file" -} - -// isCleanuper checks it if t's base TestReporter has a Cleanup method. -func isCleanuper(t TestReporter) (cleanuper, bool) { - tr := unwrapTestReporter(t) - c, ok := tr.(cleanuper) - return c, ok -} - -// unwrapTestReporter unwraps TestReporter to the base implementation. -func unwrapTestReporter(t TestReporter) TestReporter { - tr := t - switch nt := t.(type) { - case *cancelReporter: - tr = nt.t - if h, check := tr.(*nopTestHelper); check { - tr = h.t - } - case *nopTestHelper: - tr = nt.t - default: - // not wrapped - } - return tr -} diff --git a/ui/wasm/vendor/github.com/golang/mock/gomock/matchers.go b/ui/wasm/vendor/github.com/golang/mock/gomock/matchers.go deleted file mode 100644 index 2822fb2c8c4..00000000000 --- a/ui/wasm/vendor/github.com/golang/mock/gomock/matchers.go +++ /dev/null @@ -1,341 +0,0 @@ -// Copyright 2010 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package gomock - -import ( - "fmt" - "reflect" - "strings" -) - -// A Matcher is a representation of a class of values. -// It is used to represent the valid or expected arguments to a mocked method. -type Matcher interface { - // Matches returns whether x is a match. - Matches(x interface{}) bool - - // String describes what the matcher matches. - String() string -} - -// WantFormatter modifies the given Matcher's String() method to the given -// Stringer. This allows for control on how the "Want" is formatted when -// printing . -func WantFormatter(s fmt.Stringer, m Matcher) Matcher { - type matcher interface { - Matches(x interface{}) bool - } - - return struct { - matcher - fmt.Stringer - }{ - matcher: m, - Stringer: s, - } -} - -// StringerFunc type is an adapter to allow the use of ordinary functions as -// a Stringer. If f is a function with the appropriate signature, -// StringerFunc(f) is a Stringer that calls f. -type StringerFunc func() string - -// String implements fmt.Stringer. -func (f StringerFunc) String() string { - return f() -} - -// GotFormatter is used to better print failure messages. If a matcher -// implements GotFormatter, it will use the result from Got when printing -// the failure message. -type GotFormatter interface { - // Got is invoked with the received value. The result is used when - // printing the failure message. - Got(got interface{}) string -} - -// GotFormatterFunc type is an adapter to allow the use of ordinary -// functions as a GotFormatter. If f is a function with the appropriate -// signature, GotFormatterFunc(f) is a GotFormatter that calls f. -type GotFormatterFunc func(got interface{}) string - -// Got implements GotFormatter. -func (f GotFormatterFunc) Got(got interface{}) string { - return f(got) -} - -// GotFormatterAdapter attaches a GotFormatter to a Matcher. -func GotFormatterAdapter(s GotFormatter, m Matcher) Matcher { - return struct { - GotFormatter - Matcher - }{ - GotFormatter: s, - Matcher: m, - } -} - -type anyMatcher struct{} - -func (anyMatcher) Matches(interface{}) bool { - return true -} - -func (anyMatcher) String() string { - return "is anything" -} - -type eqMatcher struct { - x interface{} -} - -func (e eqMatcher) Matches(x interface{}) bool { - // In case, some value is nil - if e.x == nil || x == nil { - return reflect.DeepEqual(e.x, x) - } - - // Check if types assignable and convert them to common type - x1Val := reflect.ValueOf(e.x) - x2Val := reflect.ValueOf(x) - - if x1Val.Type().AssignableTo(x2Val.Type()) { - x1ValConverted := x1Val.Convert(x2Val.Type()) - return reflect.DeepEqual(x1ValConverted.Interface(), x2Val.Interface()) - } - - return false -} - -func (e eqMatcher) String() string { - return fmt.Sprintf("is equal to %v (%T)", e.x, e.x) -} - -type nilMatcher struct{} - -func (nilMatcher) Matches(x interface{}) bool { - if x == nil { - return true - } - - v := reflect.ValueOf(x) - switch v.Kind() { - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, - reflect.Ptr, reflect.Slice: - return v.IsNil() - } - - return false -} - -func (nilMatcher) String() string { - return "is nil" -} - -type notMatcher struct { - m Matcher -} - -func (n notMatcher) Matches(x interface{}) bool { - return !n.m.Matches(x) -} - -func (n notMatcher) String() string { - return "not(" + n.m.String() + ")" -} - -type assignableToTypeOfMatcher struct { - targetType reflect.Type -} - -func (m assignableToTypeOfMatcher) Matches(x interface{}) bool { - return reflect.TypeOf(x).AssignableTo(m.targetType) -} - -func (m assignableToTypeOfMatcher) String() string { - return "is assignable to " + m.targetType.Name() -} - -type allMatcher struct { - matchers []Matcher -} - -func (am allMatcher) Matches(x interface{}) bool { - for _, m := range am.matchers { - if !m.Matches(x) { - return false - } - } - return true -} - -func (am allMatcher) String() string { - ss := make([]string, 0, len(am.matchers)) - for _, matcher := range am.matchers { - ss = append(ss, matcher.String()) - } - return strings.Join(ss, "; ") -} - -type lenMatcher struct { - i int -} - -func (m lenMatcher) Matches(x interface{}) bool { - v := reflect.ValueOf(x) - switch v.Kind() { - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == m.i - default: - return false - } -} - -func (m lenMatcher) String() string { - return fmt.Sprintf("has length %d", m.i) -} - -type inAnyOrderMatcher struct { - x interface{} -} - -func (m inAnyOrderMatcher) Matches(x interface{}) bool { - given, ok := m.prepareValue(x) - if !ok { - return false - } - wanted, ok := m.prepareValue(m.x) - if !ok { - return false - } - - if given.Len() != wanted.Len() { - return false - } - - usedFromGiven := make([]bool, given.Len()) - foundFromWanted := make([]bool, wanted.Len()) - for i := 0; i < wanted.Len(); i++ { - wantedMatcher := Eq(wanted.Index(i).Interface()) - for j := 0; j < given.Len(); j++ { - if usedFromGiven[j] { - continue - } - if wantedMatcher.Matches(given.Index(j).Interface()) { - foundFromWanted[i] = true - usedFromGiven[j] = true - break - } - } - } - - missingFromWanted := 0 - for _, found := range foundFromWanted { - if !found { - missingFromWanted++ - } - } - extraInGiven := 0 - for _, used := range usedFromGiven { - if !used { - extraInGiven++ - } - } - - return extraInGiven == 0 && missingFromWanted == 0 -} - -func (m inAnyOrderMatcher) prepareValue(x interface{}) (reflect.Value, bool) { - xValue := reflect.ValueOf(x) - switch xValue.Kind() { - case reflect.Slice, reflect.Array: - return xValue, true - default: - return reflect.Value{}, false - } -} - -func (m inAnyOrderMatcher) String() string { - return fmt.Sprintf("has the same elements as %v", m.x) -} - -// Constructors - -// All returns a composite Matcher that returns true if and only all of the -// matchers return true. -func All(ms ...Matcher) Matcher { return allMatcher{ms} } - -// Any returns a matcher that always matches. -func Any() Matcher { return anyMatcher{} } - -// Eq returns a matcher that matches on equality. -// -// Example usage: -// Eq(5).Matches(5) // returns true -// Eq(5).Matches(4) // returns false -func Eq(x interface{}) Matcher { return eqMatcher{x} } - -// Len returns a matcher that matches on length. This matcher returns false if -// is compared to a type that is not an array, chan, map, slice, or string. -func Len(i int) Matcher { - return lenMatcher{i} -} - -// Nil returns a matcher that matches if the received value is nil. -// -// Example usage: -// var x *bytes.Buffer -// Nil().Matches(x) // returns true -// x = &bytes.Buffer{} -// Nil().Matches(x) // returns false -func Nil() Matcher { return nilMatcher{} } - -// Not reverses the results of its given child matcher. -// -// Example usage: -// Not(Eq(5)).Matches(4) // returns true -// Not(Eq(5)).Matches(5) // returns false -func Not(x interface{}) Matcher { - if m, ok := x.(Matcher); ok { - return notMatcher{m} - } - return notMatcher{Eq(x)} -} - -// AssignableToTypeOf is a Matcher that matches if the parameter to the mock -// function is assignable to the type of the parameter to this function. -// -// Example usage: -// var s fmt.Stringer = &bytes.Buffer{} -// AssignableToTypeOf(s).Matches(time.Second) // returns true -// AssignableToTypeOf(s).Matches(99) // returns false -// -// var ctx = reflect.TypeOf((*context.Context)(nil)).Elem() -// AssignableToTypeOf(ctx).Matches(context.Background()) // returns true -func AssignableToTypeOf(x interface{}) Matcher { - if xt, ok := x.(reflect.Type); ok { - return assignableToTypeOfMatcher{xt} - } - return assignableToTypeOfMatcher{reflect.TypeOf(x)} -} - -// InAnyOrder is a Matcher that returns true for collections of the same elements ignoring the order. -// -// Example usage: -// InAnyOrder([]int{1, 2, 3}).Matches([]int{1, 3, 2}) // returns true -// InAnyOrder([]int{1, 2, 3}).Matches([]int{1, 2}) // returns false -func InAnyOrder(x interface{}) Matcher { - return inAnyOrderMatcher{x} -} diff --git a/ui/wasm/vendor/github.com/golang/protobuf/AUTHORS b/ui/wasm/vendor/github.com/golang/protobuf/AUTHORS deleted file mode 100644 index 15167cd746c..00000000000 --- a/ui/wasm/vendor/github.com/golang/protobuf/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/ui/wasm/vendor/github.com/golang/protobuf/CONTRIBUTORS b/ui/wasm/vendor/github.com/golang/protobuf/CONTRIBUTORS deleted file mode 100644 index 1c4577e9680..00000000000 --- a/ui/wasm/vendor/github.com/golang/protobuf/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/ui/wasm/vendor/github.com/golang/protobuf/LICENSE b/ui/wasm/vendor/github.com/golang/protobuf/LICENSE deleted file mode 100644 index 0f646931a46..00000000000 --- a/ui/wasm/vendor/github.com/golang/protobuf/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright 2010 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/ui/wasm/vendor/github.com/golang/protobuf/proto/buffer.go b/ui/wasm/vendor/github.com/golang/protobuf/proto/buffer.go deleted file mode 100644 index e810e6fea12..00000000000 --- a/ui/wasm/vendor/github.com/golang/protobuf/proto/buffer.go +++ /dev/null @@ -1,324 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "errors" - "fmt" - - "google.golang.org/protobuf/encoding/prototext" - "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/runtime/protoimpl" -) - -const ( - WireVarint = 0 - WireFixed32 = 5 - WireFixed64 = 1 - WireBytes = 2 - WireStartGroup = 3 - WireEndGroup = 4 -) - -// EncodeVarint returns the varint encoded bytes of v. -func EncodeVarint(v uint64) []byte { - return protowire.AppendVarint(nil, v) -} - -// SizeVarint returns the length of the varint encoded bytes of v. -// This is equal to len(EncodeVarint(v)). -func SizeVarint(v uint64) int { - return protowire.SizeVarint(v) -} - -// DecodeVarint parses a varint encoded integer from b, -// returning the integer value and the length of the varint. -// It returns (0, 0) if there is a parse error. -func DecodeVarint(b []byte) (uint64, int) { - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return 0, 0 - } - return v, n -} - -// Buffer is a buffer for encoding and decoding the protobuf wire format. -// It may be reused between invocations to reduce memory usage. -type Buffer struct { - buf []byte - idx int - deterministic bool -} - -// NewBuffer allocates a new Buffer initialized with buf, -// where the contents of buf are considered the unread portion of the buffer. -func NewBuffer(buf []byte) *Buffer { - return &Buffer{buf: buf} -} - -// SetDeterministic specifies whether to use deterministic serialization. -// -// Deterministic serialization guarantees that for a given binary, equal -// messages will always be serialized to the same bytes. This implies: -// -// - Repeated serialization of a message will return the same bytes. -// - Different processes of the same binary (which may be executing on -// different machines) will serialize equal messages to the same bytes. -// -// Note that the deterministic serialization is NOT canonical across -// languages. It is not guaranteed to remain stable over time. It is unstable -// across different builds with schema changes due to unknown fields. -// Users who need canonical serialization (e.g., persistent storage in a -// canonical form, fingerprinting, etc.) should define their own -// canonicalization specification and implement their own serializer rather -// than relying on this API. -// -// If deterministic serialization is requested, map entries will be sorted -// by keys in lexographical order. This is an implementation detail and -// subject to change. -func (b *Buffer) SetDeterministic(deterministic bool) { - b.deterministic = deterministic -} - -// SetBuf sets buf as the internal buffer, -// where the contents of buf are considered the unread portion of the buffer. -func (b *Buffer) SetBuf(buf []byte) { - b.buf = buf - b.idx = 0 -} - -// Reset clears the internal buffer of all written and unread data. -func (b *Buffer) Reset() { - b.buf = b.buf[:0] - b.idx = 0 -} - -// Bytes returns the internal buffer. -func (b *Buffer) Bytes() []byte { - return b.buf -} - -// Unread returns the unread portion of the buffer. -func (b *Buffer) Unread() []byte { - return b.buf[b.idx:] -} - -// Marshal appends the wire-format encoding of m to the buffer. -func (b *Buffer) Marshal(m Message) error { - var err error - b.buf, err = marshalAppend(b.buf, m, b.deterministic) - return err -} - -// Unmarshal parses the wire-format message in the buffer and -// places the decoded results in m. -// It does not reset m before unmarshaling. -func (b *Buffer) Unmarshal(m Message) error { - err := UnmarshalMerge(b.Unread(), m) - b.idx = len(b.buf) - return err -} - -type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields } - -func (m *unknownFields) String() string { panic("not implemented") } -func (m *unknownFields) Reset() { panic("not implemented") } -func (m *unknownFields) ProtoMessage() { panic("not implemented") } - -// DebugPrint dumps the encoded bytes of b with a header and footer including s -// to stdout. This is only intended for debugging. -func (*Buffer) DebugPrint(s string, b []byte) { - m := MessageReflect(new(unknownFields)) - m.SetUnknown(b) - b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface()) - fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s) -} - -// EncodeVarint appends an unsigned varint encoding to the buffer. -func (b *Buffer) EncodeVarint(v uint64) error { - b.buf = protowire.AppendVarint(b.buf, v) - return nil -} - -// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer. -func (b *Buffer) EncodeZigzag32(v uint64) error { - return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) -} - -// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer. -func (b *Buffer) EncodeZigzag64(v uint64) error { - return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63)))) -} - -// EncodeFixed32 appends a 32-bit little-endian integer to the buffer. -func (b *Buffer) EncodeFixed32(v uint64) error { - b.buf = protowire.AppendFixed32(b.buf, uint32(v)) - return nil -} - -// EncodeFixed64 appends a 64-bit little-endian integer to the buffer. -func (b *Buffer) EncodeFixed64(v uint64) error { - b.buf = protowire.AppendFixed64(b.buf, uint64(v)) - return nil -} - -// EncodeRawBytes appends a length-prefixed raw bytes to the buffer. -func (b *Buffer) EncodeRawBytes(v []byte) error { - b.buf = protowire.AppendBytes(b.buf, v) - return nil -} - -// EncodeStringBytes appends a length-prefixed raw bytes to the buffer. -// It does not validate whether v contains valid UTF-8. -func (b *Buffer) EncodeStringBytes(v string) error { - b.buf = protowire.AppendString(b.buf, v) - return nil -} - -// EncodeMessage appends a length-prefixed encoded message to the buffer. -func (b *Buffer) EncodeMessage(m Message) error { - var err error - b.buf = protowire.AppendVarint(b.buf, uint64(Size(m))) - b.buf, err = marshalAppend(b.buf, m, b.deterministic) - return err -} - -// DecodeVarint consumes an encoded unsigned varint from the buffer. -func (b *Buffer) DecodeVarint() (uint64, error) { - v, n := protowire.ConsumeVarint(b.buf[b.idx:]) - if n < 0 { - return 0, protowire.ParseError(n) - } - b.idx += n - return uint64(v), nil -} - -// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer. -func (b *Buffer) DecodeZigzag32() (uint64, error) { - v, err := b.DecodeVarint() - if err != nil { - return 0, err - } - return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil -} - -// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer. -func (b *Buffer) DecodeZigzag64() (uint64, error) { - v, err := b.DecodeVarint() - if err != nil { - return 0, err - } - return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil -} - -// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer. -func (b *Buffer) DecodeFixed32() (uint64, error) { - v, n := protowire.ConsumeFixed32(b.buf[b.idx:]) - if n < 0 { - return 0, protowire.ParseError(n) - } - b.idx += n - return uint64(v), nil -} - -// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer. -func (b *Buffer) DecodeFixed64() (uint64, error) { - v, n := protowire.ConsumeFixed64(b.buf[b.idx:]) - if n < 0 { - return 0, protowire.ParseError(n) - } - b.idx += n - return uint64(v), nil -} - -// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer. -// If alloc is specified, it returns a copy the raw bytes -// rather than a sub-slice of the buffer. -func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) { - v, n := protowire.ConsumeBytes(b.buf[b.idx:]) - if n < 0 { - return nil, protowire.ParseError(n) - } - b.idx += n - if alloc { - v = append([]byte(nil), v...) - } - return v, nil -} - -// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer. -// It does not validate whether the raw bytes contain valid UTF-8. -func (b *Buffer) DecodeStringBytes() (string, error) { - v, n := protowire.ConsumeString(b.buf[b.idx:]) - if n < 0 { - return "", protowire.ParseError(n) - } - b.idx += n - return v, nil -} - -// DecodeMessage consumes a length-prefixed message from the buffer. -// It does not reset m before unmarshaling. -func (b *Buffer) DecodeMessage(m Message) error { - v, err := b.DecodeRawBytes(false) - if err != nil { - return err - } - return UnmarshalMerge(v, m) -} - -// DecodeGroup consumes a message group from the buffer. -// It assumes that the start group marker has already been consumed and -// consumes all bytes until (and including the end group marker). -// It does not reset m before unmarshaling. -func (b *Buffer) DecodeGroup(m Message) error { - v, n, err := consumeGroup(b.buf[b.idx:]) - if err != nil { - return err - } - b.idx += n - return UnmarshalMerge(v, m) -} - -// consumeGroup parses b until it finds an end group marker, returning -// the raw bytes of the message (excluding the end group marker) and the -// the total length of the message (including the end group marker). -func consumeGroup(b []byte) ([]byte, int, error) { - b0 := b - depth := 1 // assume this follows a start group marker - for { - _, wtyp, tagLen := protowire.ConsumeTag(b) - if tagLen < 0 { - return nil, 0, protowire.ParseError(tagLen) - } - b = b[tagLen:] - - var valLen int - switch wtyp { - case protowire.VarintType: - _, valLen = protowire.ConsumeVarint(b) - case protowire.Fixed32Type: - _, valLen = protowire.ConsumeFixed32(b) - case protowire.Fixed64Type: - _, valLen = protowire.ConsumeFixed64(b) - case protowire.BytesType: - _, valLen = protowire.ConsumeBytes(b) - case protowire.StartGroupType: - depth++ - case protowire.EndGroupType: - depth-- - default: - return nil, 0, errors.New("proto: cannot parse reserved wire type") - } - if valLen < 0 { - return nil, 0, protowire.ParseError(valLen) - } - b = b[valLen:] - - if depth == 0 { - return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil - } - } -} diff --git a/ui/wasm/vendor/github.com/golang/protobuf/proto/defaults.go b/ui/wasm/vendor/github.com/golang/protobuf/proto/defaults.go deleted file mode 100644 index d399bf069c3..00000000000 --- a/ui/wasm/vendor/github.com/golang/protobuf/proto/defaults.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "google.golang.org/protobuf/reflect/protoreflect" -) - -// SetDefaults sets unpopulated scalar fields to their default values. -// Fields within a oneof are not set even if they have a default value. -// SetDefaults is recursively called upon any populated message fields. -func SetDefaults(m Message) { - if m != nil { - setDefaults(MessageReflect(m)) - } -} - -func setDefaults(m protoreflect.Message) { - fds := m.Descriptor().Fields() - for i := 0; i < fds.Len(); i++ { - fd := fds.Get(i) - if !m.Has(fd) { - if fd.HasDefault() && fd.ContainingOneof() == nil { - v := fd.Default() - if fd.Kind() == protoreflect.BytesKind { - v = protoreflect.ValueOf(append([]byte(nil), v.Bytes()...)) // copy the default bytes - } - m.Set(fd, v) - } - continue - } - } - - m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { - switch { - // Handle singular message. - case fd.Cardinality() != protoreflect.Repeated: - if fd.Message() != nil { - setDefaults(m.Get(fd).Message()) - } - // Handle list of messages. - case fd.IsList(): - if fd.Message() != nil { - ls := m.Get(fd).List() - for i := 0; i < ls.Len(); i++ { - setDefaults(ls.Get(i).Message()) - } - } - // Handle map of messages. - case fd.IsMap(): - if fd.MapValue().Message() != nil { - ms := m.Get(fd).Map() - ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool { - setDefaults(v.Message()) - return true - }) - } - } - return true - }) -} diff --git a/ui/wasm/vendor/github.com/golang/protobuf/proto/deprecated.go b/ui/wasm/vendor/github.com/golang/protobuf/proto/deprecated.go deleted file mode 100644 index e8db57e097a..00000000000 --- a/ui/wasm/vendor/github.com/golang/protobuf/proto/deprecated.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "encoding/json" - "errors" - "fmt" - "strconv" - - protoV2 "google.golang.org/protobuf/proto" -) - -var ( - // Deprecated: No longer returned. - ErrNil = errors.New("proto: Marshal called with nil") - - // Deprecated: No longer returned. - ErrTooLarge = errors.New("proto: message encodes to over 2 GB") - - // Deprecated: No longer returned. - ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") -) - -// Deprecated: Do not use. -type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } - -// Deprecated: Do not use. -func GetStats() Stats { return Stats{} } - -// Deprecated: Do not use. -func MarshalMessageSet(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: Do not use. -func UnmarshalMessageSet([]byte, interface{}) error { - return errors.New("proto: not implemented") -} - -// Deprecated: Do not use. -func MarshalMessageSetJSON(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: Do not use. -func UnmarshalMessageSetJSON([]byte, interface{}) error { - return errors.New("proto: not implemented") -} - -// Deprecated: Do not use. -func RegisterMessageSetType(Message, int32, string) {} - -// Deprecated: Do not use. -func EnumName(m map[int32]string, v int32) string { - s, ok := m[v] - if ok { - return s - } - return strconv.Itoa(int(v)) -} - -// Deprecated: Do not use. -func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { - if data[0] == '"' { - // New style: enums are strings. - var repr string - if err := json.Unmarshal(data, &repr); err != nil { - return -1, err - } - val, ok := m[repr] - if !ok { - return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) - } - return val, nil - } - // Old style: enums are ints. - var val int32 - if err := json.Unmarshal(data, &val); err != nil { - return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) - } - return val, nil -} - -// Deprecated: Do not use; this type existed for intenal-use only. -type InternalMessageInfo struct{} - -// Deprecated: Do not use; this method existed for intenal-use only. -func (*InternalMessageInfo) DiscardUnknown(m Message) { - DiscardUnknown(m) -} - -// Deprecated: Do not use; this method existed for intenal-use only. -func (*InternalMessageInfo) Marshal(b []byte, m Message, deterministic bool) ([]byte, error) { - return protoV2.MarshalOptions{Deterministic: deterministic}.MarshalAppend(b, MessageV2(m)) -} - -// Deprecated: Do not use; this method existed for intenal-use only. -func (*InternalMessageInfo) Merge(dst, src Message) { - protoV2.Merge(MessageV2(dst), MessageV2(src)) -} - -// Deprecated: Do not use; this method existed for intenal-use only. -func (*InternalMessageInfo) Size(m Message) int { - return protoV2.Size(MessageV2(m)) -} - -// Deprecated: Do not use; this method existed for intenal-use only. -func (*InternalMessageInfo) Unmarshal(m Message, b []byte) error { - return protoV2.UnmarshalOptions{Merge: true}.Unmarshal(b, MessageV2(m)) -} diff --git a/ui/wasm/vendor/github.com/golang/protobuf/proto/discard.go b/ui/wasm/vendor/github.com/golang/protobuf/proto/discard.go deleted file mode 100644 index 2187e877fa4..00000000000 --- a/ui/wasm/vendor/github.com/golang/protobuf/proto/discard.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "google.golang.org/protobuf/reflect/protoreflect" -) - -// DiscardUnknown recursively discards all unknown fields from this message -// and all embedded messages. -// -// When unmarshaling a message with unrecognized fields, the tags and values -// of such fields are preserved in the Message. This allows a later call to -// marshal to be able to produce a message that continues to have those -// unrecognized fields. To avoid this, DiscardUnknown is used to -// explicitly clear the unknown fields after unmarshaling. -func DiscardUnknown(m Message) { - if m != nil { - discardUnknown(MessageReflect(m)) - } -} - -func discardUnknown(m protoreflect.Message) { - m.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool { - switch { - // Handle singular message. - case fd.Cardinality() != protoreflect.Repeated: - if fd.Message() != nil { - discardUnknown(m.Get(fd).Message()) - } - // Handle list of messages. - case fd.IsList(): - if fd.Message() != nil { - ls := m.Get(fd).List() - for i := 0; i < ls.Len(); i++ { - discardUnknown(ls.Get(i).Message()) - } - } - // Handle map of messages. - case fd.IsMap(): - if fd.MapValue().Message() != nil { - ms := m.Get(fd).Map() - ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool { - discardUnknown(v.Message()) - return true - }) - } - } - return true - }) - - // Discard unknown fields. - if len(m.GetUnknown()) > 0 { - m.SetUnknown(nil) - } -} diff --git a/ui/wasm/vendor/github.com/golang/protobuf/proto/extensions.go b/ui/wasm/vendor/github.com/golang/protobuf/proto/extensions.go deleted file mode 100644 index 42fc120c972..00000000000 --- a/ui/wasm/vendor/github.com/golang/protobuf/proto/extensions.go +++ /dev/null @@ -1,356 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "errors" - "fmt" - "reflect" - - "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" - "google.golang.org/protobuf/runtime/protoiface" - "google.golang.org/protobuf/runtime/protoimpl" -) - -type ( - // ExtensionDesc represents an extension descriptor and - // is used to interact with an extension field in a message. - // - // Variables of this type are generated in code by protoc-gen-go. - ExtensionDesc = protoimpl.ExtensionInfo - - // ExtensionRange represents a range of message extensions. - // Used in code generated by protoc-gen-go. - ExtensionRange = protoiface.ExtensionRangeV1 - - // Deprecated: Do not use; this is an internal type. - Extension = protoimpl.ExtensionFieldV1 - - // Deprecated: Do not use; this is an internal type. - XXX_InternalExtensions = protoimpl.ExtensionFields -) - -// ErrMissingExtension reports whether the extension was not present. -var ErrMissingExtension = errors.New("proto: missing extension") - -var errNotExtendable = errors.New("proto: not an extendable proto.Message") - -// HasExtension reports whether the extension field is present in m -// either as an explicitly populated field or as an unknown field. -func HasExtension(m Message, xt *ExtensionDesc) (has bool) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() { - return false - } - - // Check whether any populated known field matches the field number. - xtd := xt.TypeDescriptor() - if isValidExtension(mr.Descriptor(), xtd) { - has = mr.Has(xtd) - } else { - mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { - has = int32(fd.Number()) == xt.Field - return !has - }) - } - - // Check whether any unknown field matches the field number. - for b := mr.GetUnknown(); !has && len(b) > 0; { - num, _, n := protowire.ConsumeField(b) - has = int32(num) == xt.Field - b = b[n:] - } - return has -} - -// ClearExtension removes the extension field from m -// either as an explicitly populated field or as an unknown field. -func ClearExtension(m Message, xt *ExtensionDesc) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() { - return - } - - xtd := xt.TypeDescriptor() - if isValidExtension(mr.Descriptor(), xtd) { - mr.Clear(xtd) - } else { - mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { - if int32(fd.Number()) == xt.Field { - mr.Clear(fd) - return false - } - return true - }) - } - clearUnknown(mr, fieldNum(xt.Field)) -} - -// ClearAllExtensions clears all extensions from m. -// This includes populated fields and unknown fields in the extension range. -func ClearAllExtensions(m Message) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() { - return - } - - mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { - if fd.IsExtension() { - mr.Clear(fd) - } - return true - }) - clearUnknown(mr, mr.Descriptor().ExtensionRanges()) -} - -// GetExtension retrieves a proto2 extended field from m. -// -// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), -// then GetExtension parses the encoded field and returns a Go value of the specified type. -// If the field is not present, then the default value is returned (if one is specified), -// otherwise ErrMissingExtension is reported. -// -// If the descriptor is type incomplete (i.e., ExtensionDesc.ExtensionType is nil), -// then GetExtension returns the raw encoded bytes for the extension field. -func GetExtension(m Message, xt *ExtensionDesc) (interface{}, error) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { - return nil, errNotExtendable - } - - // Retrieve the unknown fields for this extension field. - var bo protoreflect.RawFields - for bi := mr.GetUnknown(); len(bi) > 0; { - num, _, n := protowire.ConsumeField(bi) - if int32(num) == xt.Field { - bo = append(bo, bi[:n]...) - } - bi = bi[n:] - } - - // For type incomplete descriptors, only retrieve the unknown fields. - if xt.ExtensionType == nil { - return []byte(bo), nil - } - - // If the extension field only exists as unknown fields, unmarshal it. - // This is rarely done since proto.Unmarshal eagerly unmarshals extensions. - xtd := xt.TypeDescriptor() - if !isValidExtension(mr.Descriptor(), xtd) { - return nil, fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m) - } - if !mr.Has(xtd) && len(bo) > 0 { - m2 := mr.New() - if err := (proto.UnmarshalOptions{ - Resolver: extensionResolver{xt}, - }.Unmarshal(bo, m2.Interface())); err != nil { - return nil, err - } - if m2.Has(xtd) { - mr.Set(xtd, m2.Get(xtd)) - clearUnknown(mr, fieldNum(xt.Field)) - } - } - - // Check whether the message has the extension field set or a default. - var pv protoreflect.Value - switch { - case mr.Has(xtd): - pv = mr.Get(xtd) - case xtd.HasDefault(): - pv = xtd.Default() - default: - return nil, ErrMissingExtension - } - - v := xt.InterfaceOf(pv) - rv := reflect.ValueOf(v) - if isScalarKind(rv.Kind()) { - rv2 := reflect.New(rv.Type()) - rv2.Elem().Set(rv) - v = rv2.Interface() - } - return v, nil -} - -// extensionResolver is a custom extension resolver that stores a single -// extension type that takes precedence over the global registry. -type extensionResolver struct{ xt protoreflect.ExtensionType } - -func (r extensionResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { - if xtd := r.xt.TypeDescriptor(); xtd.FullName() == field { - return r.xt, nil - } - return protoregistry.GlobalTypes.FindExtensionByName(field) -} - -func (r extensionResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { - if xtd := r.xt.TypeDescriptor(); xtd.ContainingMessage().FullName() == message && xtd.Number() == field { - return r.xt, nil - } - return protoregistry.GlobalTypes.FindExtensionByNumber(message, field) -} - -// GetExtensions returns a list of the extensions values present in m, -// corresponding with the provided list of extension descriptors, xts. -// If an extension is missing in m, the corresponding value is nil. -func GetExtensions(m Message, xts []*ExtensionDesc) ([]interface{}, error) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() { - return nil, errNotExtendable - } - - vs := make([]interface{}, len(xts)) - for i, xt := range xts { - v, err := GetExtension(m, xt) - if err != nil { - if err == ErrMissingExtension { - continue - } - return vs, err - } - vs[i] = v - } - return vs, nil -} - -// SetExtension sets an extension field in m to the provided value. -func SetExtension(m Message, xt *ExtensionDesc, v interface{}) error { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { - return errNotExtendable - } - - rv := reflect.ValueOf(v) - if reflect.TypeOf(v) != reflect.TypeOf(xt.ExtensionType) { - return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", v, xt.ExtensionType) - } - if rv.Kind() == reflect.Ptr { - if rv.IsNil() { - return fmt.Errorf("proto: SetExtension called with nil value of type %T", v) - } - if isScalarKind(rv.Elem().Kind()) { - v = rv.Elem().Interface() - } - } - - xtd := xt.TypeDescriptor() - if !isValidExtension(mr.Descriptor(), xtd) { - return fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m) - } - mr.Set(xtd, xt.ValueOf(v)) - clearUnknown(mr, fieldNum(xt.Field)) - return nil -} - -// SetRawExtension inserts b into the unknown fields of m. -// -// Deprecated: Use Message.ProtoReflect.SetUnknown instead. -func SetRawExtension(m Message, fnum int32, b []byte) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() { - return - } - - // Verify that the raw field is valid. - for b0 := b; len(b0) > 0; { - num, _, n := protowire.ConsumeField(b0) - if int32(num) != fnum { - panic(fmt.Sprintf("mismatching field number: got %d, want %d", num, fnum)) - } - b0 = b0[n:] - } - - ClearExtension(m, &ExtensionDesc{Field: fnum}) - mr.SetUnknown(append(mr.GetUnknown(), b...)) -} - -// ExtensionDescs returns a list of extension descriptors found in m, -// containing descriptors for both populated extension fields in m and -// also unknown fields of m that are in the extension range. -// For the later case, an type incomplete descriptor is provided where only -// the ExtensionDesc.Field field is populated. -// The order of the extension descriptors is undefined. -func ExtensionDescs(m Message) ([]*ExtensionDesc, error) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { - return nil, errNotExtendable - } - - // Collect a set of known extension descriptors. - extDescs := make(map[protoreflect.FieldNumber]*ExtensionDesc) - mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { - if fd.IsExtension() { - xt := fd.(protoreflect.ExtensionTypeDescriptor) - if xd, ok := xt.Type().(*ExtensionDesc); ok { - extDescs[fd.Number()] = xd - } - } - return true - }) - - // Collect a set of unknown extension descriptors. - extRanges := mr.Descriptor().ExtensionRanges() - for b := mr.GetUnknown(); len(b) > 0; { - num, _, n := protowire.ConsumeField(b) - if extRanges.Has(num) && extDescs[num] == nil { - extDescs[num] = nil - } - b = b[n:] - } - - // Transpose the set of descriptors into a list. - var xts []*ExtensionDesc - for num, xt := range extDescs { - if xt == nil { - xt = &ExtensionDesc{Field: int32(num)} - } - xts = append(xts, xt) - } - return xts, nil -} - -// isValidExtension reports whether xtd is a valid extension descriptor for md. -func isValidExtension(md protoreflect.MessageDescriptor, xtd protoreflect.ExtensionTypeDescriptor) bool { - return xtd.ContainingMessage() == md && md.ExtensionRanges().Has(xtd.Number()) -} - -// isScalarKind reports whether k is a protobuf scalar kind (except bytes). -// This function exists for historical reasons since the representation of -// scalars differs between v1 and v2, where v1 uses *T and v2 uses T. -func isScalarKind(k reflect.Kind) bool { - switch k { - case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: - return true - default: - return false - } -} - -// clearUnknown removes unknown fields from m where remover.Has reports true. -func clearUnknown(m protoreflect.Message, remover interface { - Has(protoreflect.FieldNumber) bool -}) { - var bo protoreflect.RawFields - for bi := m.GetUnknown(); len(bi) > 0; { - num, _, n := protowire.ConsumeField(bi) - if !remover.Has(num) { - bo = append(bo, bi[:n]...) - } - bi = bi[n:] - } - if bi := m.GetUnknown(); len(bi) != len(bo) { - m.SetUnknown(bo) - } -} - -type fieldNum protoreflect.FieldNumber - -func (n1 fieldNum) Has(n2 protoreflect.FieldNumber) bool { - return protoreflect.FieldNumber(n1) == n2 -} diff --git a/ui/wasm/vendor/github.com/golang/protobuf/proto/properties.go b/ui/wasm/vendor/github.com/golang/protobuf/proto/properties.go deleted file mode 100644 index dcdc2202fad..00000000000 --- a/ui/wasm/vendor/github.com/golang/protobuf/proto/properties.go +++ /dev/null @@ -1,306 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "fmt" - "reflect" - "strconv" - "strings" - "sync" - - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/runtime/protoimpl" -) - -// StructProperties represents protocol buffer type information for a -// generated protobuf message in the open-struct API. -// -// Deprecated: Do not use. -type StructProperties struct { - // Prop are the properties for each field. - // - // Fields belonging to a oneof are stored in OneofTypes instead, with a - // single Properties representing the parent oneof held here. - // - // The order of Prop matches the order of fields in the Go struct. - // Struct fields that are not related to protobufs have a "XXX_" prefix - // in the Properties.Name and must be ignored by the user. - Prop []*Properties - - // OneofTypes contains information about the oneof fields in this message. - // It is keyed by the protobuf field name. - OneofTypes map[string]*OneofProperties -} - -// Properties represents the type information for a protobuf message field. -// -// Deprecated: Do not use. -type Properties struct { - // Name is a placeholder name with little meaningful semantic value. - // If the name has an "XXX_" prefix, the entire Properties must be ignored. - Name string - // OrigName is the protobuf field name or oneof name. - OrigName string - // JSONName is the JSON name for the protobuf field. - JSONName string - // Enum is a placeholder name for enums. - // For historical reasons, this is neither the Go name for the enum, - // nor the protobuf name for the enum. - Enum string // Deprecated: Do not use. - // Weak contains the full name of the weakly referenced message. - Weak string - // Wire is a string representation of the wire type. - Wire string - // WireType is the protobuf wire type for the field. - WireType int - // Tag is the protobuf field number. - Tag int - // Required reports whether this is a required field. - Required bool - // Optional reports whether this is a optional field. - Optional bool - // Repeated reports whether this is a repeated field. - Repeated bool - // Packed reports whether this is a packed repeated field of scalars. - Packed bool - // Proto3 reports whether this field operates under the proto3 syntax. - Proto3 bool - // Oneof reports whether this field belongs within a oneof. - Oneof bool - - // Default is the default value in string form. - Default string - // HasDefault reports whether the field has a default value. - HasDefault bool - - // MapKeyProp is the properties for the key field for a map field. - MapKeyProp *Properties - // MapValProp is the properties for the value field for a map field. - MapValProp *Properties -} - -// OneofProperties represents the type information for a protobuf oneof. -// -// Deprecated: Do not use. -type OneofProperties struct { - // Type is a pointer to the generated wrapper type for the field value. - // This is nil for messages that are not in the open-struct API. - Type reflect.Type - // Field is the index into StructProperties.Prop for the containing oneof. - Field int - // Prop is the properties for the field. - Prop *Properties -} - -// String formats the properties in the protobuf struct field tag style. -func (p *Properties) String() string { - s := p.Wire - s += "," + strconv.Itoa(p.Tag) - if p.Required { - s += ",req" - } - if p.Optional { - s += ",opt" - } - if p.Repeated { - s += ",rep" - } - if p.Packed { - s += ",packed" - } - s += ",name=" + p.OrigName - if p.JSONName != "" { - s += ",json=" + p.JSONName - } - if len(p.Enum) > 0 { - s += ",enum=" + p.Enum - } - if len(p.Weak) > 0 { - s += ",weak=" + p.Weak - } - if p.Proto3 { - s += ",proto3" - } - if p.Oneof { - s += ",oneof" - } - if p.HasDefault { - s += ",def=" + p.Default - } - return s -} - -// Parse populates p by parsing a string in the protobuf struct field tag style. -func (p *Properties) Parse(tag string) { - // For example: "bytes,49,opt,name=foo,def=hello!" - for len(tag) > 0 { - i := strings.IndexByte(tag, ',') - if i < 0 { - i = len(tag) - } - switch s := tag[:i]; { - case strings.HasPrefix(s, "name="): - p.OrigName = s[len("name="):] - case strings.HasPrefix(s, "json="): - p.JSONName = s[len("json="):] - case strings.HasPrefix(s, "enum="): - p.Enum = s[len("enum="):] - case strings.HasPrefix(s, "weak="): - p.Weak = s[len("weak="):] - case strings.Trim(s, "0123456789") == "": - n, _ := strconv.ParseUint(s, 10, 32) - p.Tag = int(n) - case s == "opt": - p.Optional = true - case s == "req": - p.Required = true - case s == "rep": - p.Repeated = true - case s == "varint" || s == "zigzag32" || s == "zigzag64": - p.Wire = s - p.WireType = WireVarint - case s == "fixed32": - p.Wire = s - p.WireType = WireFixed32 - case s == "fixed64": - p.Wire = s - p.WireType = WireFixed64 - case s == "bytes": - p.Wire = s - p.WireType = WireBytes - case s == "group": - p.Wire = s - p.WireType = WireStartGroup - case s == "packed": - p.Packed = true - case s == "proto3": - p.Proto3 = true - case s == "oneof": - p.Oneof = true - case strings.HasPrefix(s, "def="): - // The default tag is special in that everything afterwards is the - // default regardless of the presence of commas. - p.HasDefault = true - p.Default, i = tag[len("def="):], len(tag) - } - tag = strings.TrimPrefix(tag[i:], ",") - } -} - -// Init populates the properties from a protocol buffer struct tag. -// -// Deprecated: Do not use. -func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { - p.Name = name - p.OrigName = name - if tag == "" { - return - } - p.Parse(tag) - - if typ != nil && typ.Kind() == reflect.Map { - p.MapKeyProp = new(Properties) - p.MapKeyProp.Init(nil, "Key", f.Tag.Get("protobuf_key"), nil) - p.MapValProp = new(Properties) - p.MapValProp.Init(nil, "Value", f.Tag.Get("protobuf_val"), nil) - } -} - -var propertiesCache sync.Map // map[reflect.Type]*StructProperties - -// GetProperties returns the list of properties for the type represented by t, -// which must be a generated protocol buffer message in the open-struct API, -// where protobuf message fields are represented by exported Go struct fields. -// -// Deprecated: Use protobuf reflection instead. -func GetProperties(t reflect.Type) *StructProperties { - if p, ok := propertiesCache.Load(t); ok { - return p.(*StructProperties) - } - p, _ := propertiesCache.LoadOrStore(t, newProperties(t)) - return p.(*StructProperties) -} - -func newProperties(t reflect.Type) *StructProperties { - if t.Kind() != reflect.Struct { - panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t)) - } - - var hasOneof bool - prop := new(StructProperties) - - // Construct a list of properties for each field in the struct. - for i := 0; i < t.NumField(); i++ { - p := new(Properties) - f := t.Field(i) - tagField := f.Tag.Get("protobuf") - p.Init(f.Type, f.Name, tagField, &f) - - tagOneof := f.Tag.Get("protobuf_oneof") - if tagOneof != "" { - hasOneof = true - p.OrigName = tagOneof - } - - // Rename unrelated struct fields with the "XXX_" prefix since so much - // user code simply checks for this to exclude special fields. - if tagField == "" && tagOneof == "" && !strings.HasPrefix(p.Name, "XXX_") { - p.Name = "XXX_" + p.Name - p.OrigName = "XXX_" + p.OrigName - } else if p.Weak != "" { - p.Name = p.OrigName // avoid possible "XXX_" prefix on weak field - } - - prop.Prop = append(prop.Prop, p) - } - - // Construct a mapping of oneof field names to properties. - if hasOneof { - var oneofWrappers []interface{} - if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok { - oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[3].Interface().([]interface{}) - } - if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok { - oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0].Interface().([]interface{}) - } - if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(protoreflect.ProtoMessage); ok { - if m, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *protoimpl.MessageInfo }); ok { - oneofWrappers = m.ProtoMessageInfo().OneofWrappers - } - } - - prop.OneofTypes = make(map[string]*OneofProperties) - for _, wrapper := range oneofWrappers { - p := &OneofProperties{ - Type: reflect.ValueOf(wrapper).Type(), // *T - Prop: new(Properties), - } - f := p.Type.Elem().Field(0) - p.Prop.Name = f.Name - p.Prop.Parse(f.Tag.Get("protobuf")) - - // Determine the struct field that contains this oneof. - // Each wrapper is assignable to exactly one parent field. - var foundOneof bool - for i := 0; i < t.NumField() && !foundOneof; i++ { - if p.Type.AssignableTo(t.Field(i).Type) { - p.Field = i - foundOneof = true - } - } - if !foundOneof { - panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t)) - } - prop.OneofTypes[p.Prop.OrigName] = p - } - } - - return prop -} - -func (sp *StructProperties) Len() int { return len(sp.Prop) } -func (sp *StructProperties) Less(i, j int) bool { return false } -func (sp *StructProperties) Swap(i, j int) { return } diff --git a/ui/wasm/vendor/github.com/golang/protobuf/proto/proto.go b/ui/wasm/vendor/github.com/golang/protobuf/proto/proto.go deleted file mode 100644 index 5aee89c323e..00000000000 --- a/ui/wasm/vendor/github.com/golang/protobuf/proto/proto.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package proto provides functionality for handling protocol buffer messages. -// In particular, it provides marshaling and unmarshaling between a protobuf -// message and the binary wire format. -// -// See https://developers.google.com/protocol-buffers/docs/gotutorial for -// more information. -// -// Deprecated: Use the "google.golang.org/protobuf/proto" package instead. -package proto - -import ( - protoV2 "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/runtime/protoiface" - "google.golang.org/protobuf/runtime/protoimpl" -) - -const ( - ProtoPackageIsVersion1 = true - ProtoPackageIsVersion2 = true - ProtoPackageIsVersion3 = true - ProtoPackageIsVersion4 = true -) - -// GeneratedEnum is any enum type generated by protoc-gen-go -// which is a named int32 kind. -// This type exists for documentation purposes. -type GeneratedEnum interface{} - -// GeneratedMessage is any message type generated by protoc-gen-go -// which is a pointer to a named struct kind. -// This type exists for documentation purposes. -type GeneratedMessage interface{} - -// Message is a protocol buffer message. -// -// This is the v1 version of the message interface and is marginally better -// than an empty interface as it lacks any method to programatically interact -// with the contents of the message. -// -// A v2 message is declared in "google.golang.org/protobuf/proto".Message and -// exposes protobuf reflection as a first-class feature of the interface. -// -// To convert a v1 message to a v2 message, use the MessageV2 function. -// To convert a v2 message to a v1 message, use the MessageV1 function. -type Message = protoiface.MessageV1 - -// MessageV1 converts either a v1 or v2 message to a v1 message. -// It returns nil if m is nil. -func MessageV1(m GeneratedMessage) protoiface.MessageV1 { - return protoimpl.X.ProtoMessageV1Of(m) -} - -// MessageV2 converts either a v1 or v2 message to a v2 message. -// It returns nil if m is nil. -func MessageV2(m GeneratedMessage) protoV2.Message { - return protoimpl.X.ProtoMessageV2Of(m) -} - -// MessageReflect returns a reflective view for a message. -// It returns nil if m is nil. -func MessageReflect(m Message) protoreflect.Message { - return protoimpl.X.MessageOf(m) -} - -// Marshaler is implemented by messages that can marshal themselves. -// This interface is used by the following functions: Size, Marshal, -// Buffer.Marshal, and Buffer.EncodeMessage. -// -// Deprecated: Do not implement. -type Marshaler interface { - // Marshal formats the encoded bytes of the message. - // It should be deterministic and emit valid protobuf wire data. - // The caller takes ownership of the returned buffer. - Marshal() ([]byte, error) -} - -// Unmarshaler is implemented by messages that can unmarshal themselves. -// This interface is used by the following functions: Unmarshal, UnmarshalMerge, -// Buffer.Unmarshal, Buffer.DecodeMessage, and Buffer.DecodeGroup. -// -// Deprecated: Do not implement. -type Unmarshaler interface { - // Unmarshal parses the encoded bytes of the protobuf wire input. - // The provided buffer is only valid for during method call. - // It should not reset the receiver message. - Unmarshal([]byte) error -} - -// Merger is implemented by messages that can merge themselves. -// This interface is used by the following functions: Clone and Merge. -// -// Deprecated: Do not implement. -type Merger interface { - // Merge merges the contents of src into the receiver message. - // It clones all data structures in src such that it aliases no mutable - // memory referenced by src. - Merge(src Message) -} - -// RequiredNotSetError is an error type returned when -// marshaling or unmarshaling a message with missing required fields. -type RequiredNotSetError struct { - err error -} - -func (e *RequiredNotSetError) Error() string { - if e.err != nil { - return e.err.Error() - } - return "proto: required field not set" -} -func (e *RequiredNotSetError) RequiredNotSet() bool { - return true -} - -func checkRequiredNotSet(m protoV2.Message) error { - if err := protoV2.CheckInitialized(m); err != nil { - return &RequiredNotSetError{err: err} - } - return nil -} - -// Clone returns a deep copy of src. -func Clone(src Message) Message { - return MessageV1(protoV2.Clone(MessageV2(src))) -} - -// Merge merges src into dst, which must be messages of the same type. -// -// Populated scalar fields in src are copied to dst, while populated -// singular messages in src are merged into dst by recursively calling Merge. -// The elements of every list field in src is appended to the corresponded -// list fields in dst. The entries of every map field in src is copied into -// the corresponding map field in dst, possibly replacing existing entries. -// The unknown fields of src are appended to the unknown fields of dst. -func Merge(dst, src Message) { - protoV2.Merge(MessageV2(dst), MessageV2(src)) -} - -// Equal reports whether two messages are equal. -// If two messages marshal to the same bytes under deterministic serialization, -// then Equal is guaranteed to report true. -// -// Two messages are equal if they are the same protobuf message type, -// have the same set of populated known and extension field values, -// and the same set of unknown fields values. -// -// Scalar values are compared with the equivalent of the == operator in Go, -// except bytes values which are compared using bytes.Equal and -// floating point values which specially treat NaNs as equal. -// Message values are compared by recursively calling Equal. -// Lists are equal if each element value is also equal. -// Maps are equal if they have the same set of keys, where the pair of values -// for each key is also equal. -func Equal(x, y Message) bool { - return protoV2.Equal(MessageV2(x), MessageV2(y)) -} - -func isMessageSet(md protoreflect.MessageDescriptor) bool { - ms, ok := md.(interface{ IsMessageSet() bool }) - return ok && ms.IsMessageSet() -} diff --git a/ui/wasm/vendor/github.com/golang/protobuf/proto/registry.go b/ui/wasm/vendor/github.com/golang/protobuf/proto/registry.go deleted file mode 100644 index 066b4323b49..00000000000 --- a/ui/wasm/vendor/github.com/golang/protobuf/proto/registry.go +++ /dev/null @@ -1,317 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "bytes" - "compress/gzip" - "fmt" - "io/ioutil" - "reflect" - "strings" - "sync" - - "google.golang.org/protobuf/reflect/protodesc" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" - "google.golang.org/protobuf/runtime/protoimpl" -) - -// filePath is the path to the proto source file. -type filePath = string // e.g., "google/protobuf/descriptor.proto" - -// fileDescGZIP is the compressed contents of the encoded FileDescriptorProto. -type fileDescGZIP = []byte - -var fileCache sync.Map // map[filePath]fileDescGZIP - -// RegisterFile is called from generated code to register the compressed -// FileDescriptorProto with the file path for a proto source file. -// -// Deprecated: Use protoregistry.GlobalFiles.RegisterFile instead. -func RegisterFile(s filePath, d fileDescGZIP) { - // Decompress the descriptor. - zr, err := gzip.NewReader(bytes.NewReader(d)) - if err != nil { - panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err)) - } - b, err := ioutil.ReadAll(zr) - if err != nil { - panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err)) - } - - // Construct a protoreflect.FileDescriptor from the raw descriptor. - // Note that DescBuilder.Build automatically registers the constructed - // file descriptor with the v2 registry. - protoimpl.DescBuilder{RawDescriptor: b}.Build() - - // Locally cache the raw descriptor form for the file. - fileCache.Store(s, d) -} - -// FileDescriptor returns the compressed FileDescriptorProto given the file path -// for a proto source file. It returns nil if not found. -// -// Deprecated: Use protoregistry.GlobalFiles.FindFileByPath instead. -func FileDescriptor(s filePath) fileDescGZIP { - if v, ok := fileCache.Load(s); ok { - return v.(fileDescGZIP) - } - - // Find the descriptor in the v2 registry. - var b []byte - if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil { - b, _ = Marshal(protodesc.ToFileDescriptorProto(fd)) - } - - // Locally cache the raw descriptor form for the file. - if len(b) > 0 { - v, _ := fileCache.LoadOrStore(s, protoimpl.X.CompressGZIP(b)) - return v.(fileDescGZIP) - } - return nil -} - -// enumName is the name of an enum. For historical reasons, the enum name is -// neither the full Go name nor the full protobuf name of the enum. -// The name is the dot-separated combination of just the proto package that the -// enum is declared within followed by the Go type name of the generated enum. -type enumName = string // e.g., "my.proto.package.GoMessage_GoEnum" - -// enumsByName maps enum values by name to their numeric counterpart. -type enumsByName = map[string]int32 - -// enumsByNumber maps enum values by number to their name counterpart. -type enumsByNumber = map[int32]string - -var enumCache sync.Map // map[enumName]enumsByName -var numFilesCache sync.Map // map[protoreflect.FullName]int - -// RegisterEnum is called from the generated code to register the mapping of -// enum value names to enum numbers for the enum identified by s. -// -// Deprecated: Use protoregistry.GlobalTypes.RegisterEnum instead. -func RegisterEnum(s enumName, _ enumsByNumber, m enumsByName) { - if _, ok := enumCache.Load(s); ok { - panic("proto: duplicate enum registered: " + s) - } - enumCache.Store(s, m) - - // This does not forward registration to the v2 registry since this API - // lacks sufficient information to construct a complete v2 enum descriptor. -} - -// EnumValueMap returns the mapping from enum value names to enum numbers for -// the enum of the given name. It returns nil if not found. -// -// Deprecated: Use protoregistry.GlobalTypes.FindEnumByName instead. -func EnumValueMap(s enumName) enumsByName { - if v, ok := enumCache.Load(s); ok { - return v.(enumsByName) - } - - // Check whether the cache is stale. If the number of files in the current - // package differs, then it means that some enums may have been recently - // registered upstream that we do not know about. - var protoPkg protoreflect.FullName - if i := strings.LastIndexByte(s, '.'); i >= 0 { - protoPkg = protoreflect.FullName(s[:i]) - } - v, _ := numFilesCache.Load(protoPkg) - numFiles, _ := v.(int) - if protoregistry.GlobalFiles.NumFilesByPackage(protoPkg) == numFiles { - return nil // cache is up-to-date; was not found earlier - } - - // Update the enum cache for all enums declared in the given proto package. - numFiles = 0 - protoregistry.GlobalFiles.RangeFilesByPackage(protoPkg, func(fd protoreflect.FileDescriptor) bool { - walkEnums(fd, func(ed protoreflect.EnumDescriptor) { - name := protoimpl.X.LegacyEnumName(ed) - if _, ok := enumCache.Load(name); !ok { - m := make(enumsByName) - evs := ed.Values() - for i := evs.Len() - 1; i >= 0; i-- { - ev := evs.Get(i) - m[string(ev.Name())] = int32(ev.Number()) - } - enumCache.LoadOrStore(name, m) - } - }) - numFiles++ - return true - }) - numFilesCache.Store(protoPkg, numFiles) - - // Check cache again for enum map. - if v, ok := enumCache.Load(s); ok { - return v.(enumsByName) - } - return nil -} - -// walkEnums recursively walks all enums declared in d. -func walkEnums(d interface { - Enums() protoreflect.EnumDescriptors - Messages() protoreflect.MessageDescriptors -}, f func(protoreflect.EnumDescriptor)) { - eds := d.Enums() - for i := eds.Len() - 1; i >= 0; i-- { - f(eds.Get(i)) - } - mds := d.Messages() - for i := mds.Len() - 1; i >= 0; i-- { - walkEnums(mds.Get(i), f) - } -} - -// messageName is the full name of protobuf message. -type messageName = string - -var messageTypeCache sync.Map // map[messageName]reflect.Type - -// RegisterType is called from generated code to register the message Go type -// for a message of the given name. -// -// Deprecated: Use protoregistry.GlobalTypes.RegisterMessage instead. -func RegisterType(m Message, s messageName) { - mt := protoimpl.X.LegacyMessageTypeOf(m, protoreflect.FullName(s)) - if err := protoregistry.GlobalTypes.RegisterMessage(mt); err != nil { - panic(err) - } - messageTypeCache.Store(s, reflect.TypeOf(m)) -} - -// RegisterMapType is called from generated code to register the Go map type -// for a protobuf message representing a map entry. -// -// Deprecated: Do not use. -func RegisterMapType(m interface{}, s messageName) { - t := reflect.TypeOf(m) - if t.Kind() != reflect.Map { - panic(fmt.Sprintf("invalid map kind: %v", t)) - } - if _, ok := messageTypeCache.Load(s); ok { - panic(fmt.Errorf("proto: duplicate proto message registered: %s", s)) - } - messageTypeCache.Store(s, t) -} - -// MessageType returns the message type for a named message. -// It returns nil if not found. -// -// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead. -func MessageType(s messageName) reflect.Type { - if v, ok := messageTypeCache.Load(s); ok { - return v.(reflect.Type) - } - - // Derive the message type from the v2 registry. - var t reflect.Type - if mt, _ := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(s)); mt != nil { - t = messageGoType(mt) - } - - // If we could not get a concrete type, it is possible that it is a - // pseudo-message for a map entry. - if t == nil { - d, _ := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(s)) - if md, _ := d.(protoreflect.MessageDescriptor); md != nil && md.IsMapEntry() { - kt := goTypeForField(md.Fields().ByNumber(1)) - vt := goTypeForField(md.Fields().ByNumber(2)) - t = reflect.MapOf(kt, vt) - } - } - - // Locally cache the message type for the given name. - if t != nil { - v, _ := messageTypeCache.LoadOrStore(s, t) - return v.(reflect.Type) - } - return nil -} - -func goTypeForField(fd protoreflect.FieldDescriptor) reflect.Type { - switch k := fd.Kind(); k { - case protoreflect.EnumKind: - if et, _ := protoregistry.GlobalTypes.FindEnumByName(fd.Enum().FullName()); et != nil { - return enumGoType(et) - } - return reflect.TypeOf(protoreflect.EnumNumber(0)) - case protoreflect.MessageKind, protoreflect.GroupKind: - if mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()); mt != nil { - return messageGoType(mt) - } - return reflect.TypeOf((*protoreflect.Message)(nil)).Elem() - default: - return reflect.TypeOf(fd.Default().Interface()) - } -} - -func enumGoType(et protoreflect.EnumType) reflect.Type { - return reflect.TypeOf(et.New(0)) -} - -func messageGoType(mt protoreflect.MessageType) reflect.Type { - return reflect.TypeOf(MessageV1(mt.Zero().Interface())) -} - -// MessageName returns the full protobuf name for the given message type. -// -// Deprecated: Use protoreflect.MessageDescriptor.FullName instead. -func MessageName(m Message) messageName { - if m == nil { - return "" - } - if m, ok := m.(interface{ XXX_MessageName() messageName }); ok { - return m.XXX_MessageName() - } - return messageName(protoimpl.X.MessageDescriptorOf(m).FullName()) -} - -// RegisterExtension is called from the generated code to register -// the extension descriptor. -// -// Deprecated: Use protoregistry.GlobalTypes.RegisterExtension instead. -func RegisterExtension(d *ExtensionDesc) { - if err := protoregistry.GlobalTypes.RegisterExtension(d); err != nil { - panic(err) - } -} - -type extensionsByNumber = map[int32]*ExtensionDesc - -var extensionCache sync.Map // map[messageName]extensionsByNumber - -// RegisteredExtensions returns a map of the registered extensions for the -// provided protobuf message, indexed by the extension field number. -// -// Deprecated: Use protoregistry.GlobalTypes.RangeExtensionsByMessage instead. -func RegisteredExtensions(m Message) extensionsByNumber { - // Check whether the cache is stale. If the number of extensions for - // the given message differs, then it means that some extensions were - // recently registered upstream that we do not know about. - s := MessageName(m) - v, _ := extensionCache.Load(s) - xs, _ := v.(extensionsByNumber) - if protoregistry.GlobalTypes.NumExtensionsByMessage(protoreflect.FullName(s)) == len(xs) { - return xs // cache is up-to-date - } - - // Cache is stale, re-compute the extensions map. - xs = make(extensionsByNumber) - protoregistry.GlobalTypes.RangeExtensionsByMessage(protoreflect.FullName(s), func(xt protoreflect.ExtensionType) bool { - if xd, ok := xt.(*ExtensionDesc); ok { - xs[int32(xt.TypeDescriptor().Number())] = xd - } else { - // TODO: This implies that the protoreflect.ExtensionType is a - // custom type not generated by protoc-gen-go. We could try and - // convert the type to an ExtensionDesc. - } - return true - }) - extensionCache.Store(s, xs) - return xs -} diff --git a/ui/wasm/vendor/github.com/golang/protobuf/proto/text_decode.go b/ui/wasm/vendor/github.com/golang/protobuf/proto/text_decode.go deleted file mode 100644 index 47eb3e44501..00000000000 --- a/ui/wasm/vendor/github.com/golang/protobuf/proto/text_decode.go +++ /dev/null @@ -1,801 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "encoding" - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "unicode/utf8" - - "google.golang.org/protobuf/encoding/prototext" - protoV2 "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" -) - -const wrapTextUnmarshalV2 = false - -// ParseError is returned by UnmarshalText. -type ParseError struct { - Message string - - // Deprecated: Do not use. - Line, Offset int -} - -func (e *ParseError) Error() string { - if wrapTextUnmarshalV2 { - return e.Message - } - if e.Line == 1 { - return fmt.Sprintf("line 1.%d: %v", e.Offset, e.Message) - } - return fmt.Sprintf("line %d: %v", e.Line, e.Message) -} - -// UnmarshalText parses a proto text formatted string into m. -func UnmarshalText(s string, m Message) error { - if u, ok := m.(encoding.TextUnmarshaler); ok { - return u.UnmarshalText([]byte(s)) - } - - m.Reset() - mi := MessageV2(m) - - if wrapTextUnmarshalV2 { - err := prototext.UnmarshalOptions{ - AllowPartial: true, - }.Unmarshal([]byte(s), mi) - if err != nil { - return &ParseError{Message: err.Error()} - } - return checkRequiredNotSet(mi) - } else { - if err := newTextParser(s).unmarshalMessage(mi.ProtoReflect(), ""); err != nil { - return err - } - return checkRequiredNotSet(mi) - } -} - -type textParser struct { - s string // remaining input - done bool // whether the parsing is finished (success or error) - backed bool // whether back() was called - offset, line int - cur token -} - -type token struct { - value string - err *ParseError - line int // line number - offset int // byte number from start of input, not start of line - unquoted string // the unquoted version of value, if it was a quoted string -} - -func newTextParser(s string) *textParser { - p := new(textParser) - p.s = s - p.line = 1 - p.cur.line = 1 - return p -} - -func (p *textParser) unmarshalMessage(m protoreflect.Message, terminator string) (err error) { - md := m.Descriptor() - fds := md.Fields() - - // A struct is a sequence of "name: value", terminated by one of - // '>' or '}', or the end of the input. A name may also be - // "[extension]" or "[type/url]". - // - // The whole struct can also be an expanded Any message, like: - // [type/url] < ... struct contents ... > - seen := make(map[protoreflect.FieldNumber]bool) - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - if tok.value == "[" { - if err := p.unmarshalExtensionOrAny(m, seen); err != nil { - return err - } - continue - } - - // This is a normal, non-extension field. - name := protoreflect.Name(tok.value) - fd := fds.ByName(name) - switch { - case fd == nil: - gd := fds.ByName(protoreflect.Name(strings.ToLower(string(name)))) - if gd != nil && gd.Kind() == protoreflect.GroupKind && gd.Message().Name() == name { - fd = gd - } - case fd.Kind() == protoreflect.GroupKind && fd.Message().Name() != name: - fd = nil - case fd.IsWeak() && fd.Message().IsPlaceholder(): - fd = nil - } - if fd == nil { - typeName := string(md.FullName()) - if m, ok := m.Interface().(Message); ok { - t := reflect.TypeOf(m) - if t.Kind() == reflect.Ptr { - typeName = t.Elem().String() - } - } - return p.errorf("unknown field name %q in %v", name, typeName) - } - if od := fd.ContainingOneof(); od != nil && m.WhichOneof(od) != nil { - return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, od.Name()) - } - if fd.Cardinality() != protoreflect.Repeated && seen[fd.Number()] { - return p.errorf("non-repeated field %q was repeated", fd.Name()) - } - seen[fd.Number()] = true - - // Consume any colon. - if err := p.checkForColon(fd); err != nil { - return err - } - - // Parse into the field. - v := m.Get(fd) - if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) { - v = m.Mutable(fd) - } - if v, err = p.unmarshalValue(v, fd); err != nil { - return err - } - m.Set(fd, v) - - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - } - return nil -} - -func (p *textParser) unmarshalExtensionOrAny(m protoreflect.Message, seen map[protoreflect.FieldNumber]bool) error { - name, err := p.consumeExtensionOrAnyName() - if err != nil { - return err - } - - // If it contains a slash, it's an Any type URL. - if slashIdx := strings.LastIndex(name, "/"); slashIdx >= 0 { - tok := p.next() - if tok.err != nil { - return tok.err - } - // consume an optional colon - if tok.value == ":" { - tok = p.next() - if tok.err != nil { - return tok.err - } - } - - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - - mt, err := protoregistry.GlobalTypes.FindMessageByURL(name) - if err != nil { - return p.errorf("unrecognized message %q in google.protobuf.Any", name[slashIdx+len("/"):]) - } - m2 := mt.New() - if err := p.unmarshalMessage(m2, terminator); err != nil { - return err - } - b, err := protoV2.Marshal(m2.Interface()) - if err != nil { - return p.errorf("failed to marshal message of type %q: %v", name[slashIdx+len("/"):], err) - } - - urlFD := m.Descriptor().Fields().ByName("type_url") - valFD := m.Descriptor().Fields().ByName("value") - if seen[urlFD.Number()] { - return p.errorf("Any message unpacked multiple times, or %q already set", urlFD.Name()) - } - if seen[valFD.Number()] { - return p.errorf("Any message unpacked multiple times, or %q already set", valFD.Name()) - } - m.Set(urlFD, protoreflect.ValueOfString(name)) - m.Set(valFD, protoreflect.ValueOfBytes(b)) - seen[urlFD.Number()] = true - seen[valFD.Number()] = true - return nil - } - - xname := protoreflect.FullName(name) - xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname) - if xt == nil && isMessageSet(m.Descriptor()) { - xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension")) - } - if xt == nil { - return p.errorf("unrecognized extension %q", name) - } - fd := xt.TypeDescriptor() - if fd.ContainingMessage().FullName() != m.Descriptor().FullName() { - return p.errorf("extension field %q does not extend message %q", name, m.Descriptor().FullName()) - } - - if err := p.checkForColon(fd); err != nil { - return err - } - - v := m.Get(fd) - if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) { - v = m.Mutable(fd) - } - v, err = p.unmarshalValue(v, fd) - if err != nil { - return err - } - m.Set(fd, v) - return p.consumeOptionalSeparator() -} - -func (p *textParser) unmarshalValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { - tok := p.next() - if tok.err != nil { - return v, tok.err - } - if tok.value == "" { - return v, p.errorf("unexpected EOF") - } - - switch { - case fd.IsList(): - lv := v.List() - var err error - if tok.value == "[" { - // Repeated field with list notation, like [1,2,3]. - for { - vv := lv.NewElement() - vv, err = p.unmarshalSingularValue(vv, fd) - if err != nil { - return v, err - } - lv.Append(vv) - - tok := p.next() - if tok.err != nil { - return v, tok.err - } - if tok.value == "]" { - break - } - if tok.value != "," { - return v, p.errorf("Expected ']' or ',' found %q", tok.value) - } - } - return v, nil - } - - // One value of the repeated field. - p.back() - vv := lv.NewElement() - vv, err = p.unmarshalSingularValue(vv, fd) - if err != nil { - return v, err - } - lv.Append(vv) - return v, nil - case fd.IsMap(): - // The map entry should be this sequence of tokens: - // < key : KEY value : VALUE > - // However, implementations may omit key or value, and technically - // we should support them in any order. - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return v, p.errorf("expected '{' or '<', found %q", tok.value) - } - - keyFD := fd.MapKey() - valFD := fd.MapValue() - - mv := v.Map() - kv := keyFD.Default() - vv := mv.NewValue() - for { - tok := p.next() - if tok.err != nil { - return v, tok.err - } - if tok.value == terminator { - break - } - var err error - switch tok.value { - case "key": - if err := p.consumeToken(":"); err != nil { - return v, err - } - if kv, err = p.unmarshalSingularValue(kv, keyFD); err != nil { - return v, err - } - if err := p.consumeOptionalSeparator(); err != nil { - return v, err - } - case "value": - if err := p.checkForColon(valFD); err != nil { - return v, err - } - if vv, err = p.unmarshalSingularValue(vv, valFD); err != nil { - return v, err - } - if err := p.consumeOptionalSeparator(); err != nil { - return v, err - } - default: - p.back() - return v, p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) - } - } - mv.Set(kv.MapKey(), vv) - return v, nil - default: - p.back() - return p.unmarshalSingularValue(v, fd) - } -} - -func (p *textParser) unmarshalSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { - tok := p.next() - if tok.err != nil { - return v, tok.err - } - if tok.value == "" { - return v, p.errorf("unexpected EOF") - } - - switch fd.Kind() { - case protoreflect.BoolKind: - switch tok.value { - case "true", "1", "t", "True": - return protoreflect.ValueOfBool(true), nil - case "false", "0", "f", "False": - return protoreflect.ValueOfBool(false), nil - } - case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - return protoreflect.ValueOfInt32(int32(x)), nil - } - - // The C++ parser accepts large positive hex numbers that uses - // two's complement arithmetic to represent negative numbers. - // This feature is here for backwards compatibility with C++. - if strings.HasPrefix(tok.value, "0x") { - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - return protoreflect.ValueOfInt32(int32(-(int64(^x) + 1))), nil - } - } - case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: - if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { - return protoreflect.ValueOfInt64(int64(x)), nil - } - - // The C++ parser accepts large positive hex numbers that uses - // two's complement arithmetic to represent negative numbers. - // This feature is here for backwards compatibility with C++. - if strings.HasPrefix(tok.value, "0x") { - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - return protoreflect.ValueOfInt64(int64(-(int64(^x) + 1))), nil - } - } - case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - return protoreflect.ValueOfUint32(uint32(x)), nil - } - case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - return protoreflect.ValueOfUint64(uint64(x)), nil - } - case protoreflect.FloatKind: - // Ignore 'f' for compatibility with output generated by C++, - // but don't remove 'f' when the value is "-inf" or "inf". - v := tok.value - if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" { - v = v[:len(v)-len("f")] - } - if x, err := strconv.ParseFloat(v, 32); err == nil { - return protoreflect.ValueOfFloat32(float32(x)), nil - } - case protoreflect.DoubleKind: - // Ignore 'f' for compatibility with output generated by C++, - // but don't remove 'f' when the value is "-inf" or "inf". - v := tok.value - if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" { - v = v[:len(v)-len("f")] - } - if x, err := strconv.ParseFloat(v, 64); err == nil { - return protoreflect.ValueOfFloat64(float64(x)), nil - } - case protoreflect.StringKind: - if isQuote(tok.value[0]) { - return protoreflect.ValueOfString(tok.unquoted), nil - } - case protoreflect.BytesKind: - if isQuote(tok.value[0]) { - return protoreflect.ValueOfBytes([]byte(tok.unquoted)), nil - } - case protoreflect.EnumKind: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - return protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil - } - vd := fd.Enum().Values().ByName(protoreflect.Name(tok.value)) - if vd != nil { - return protoreflect.ValueOfEnum(vd.Number()), nil - } - case protoreflect.MessageKind, protoreflect.GroupKind: - var terminator string - switch tok.value { - case "{": - terminator = "}" - case "<": - terminator = ">" - default: - return v, p.errorf("expected '{' or '<', found %q", tok.value) - } - err := p.unmarshalMessage(v.Message(), terminator) - return v, err - default: - panic(fmt.Sprintf("invalid kind %v", fd.Kind())) - } - return v, p.errorf("invalid %v: %v", fd.Kind(), tok.value) -} - -// Consume a ':' from the input stream (if the next token is a colon), -// returning an error if a colon is needed but not present. -func (p *textParser) checkForColon(fd protoreflect.FieldDescriptor) *ParseError { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ":" { - if fd.Message() == nil { - return p.errorf("expected ':', found %q", tok.value) - } - p.back() - } - return nil -} - -// consumeExtensionOrAnyName consumes an extension name or an Any type URL and -// the following ']'. It returns the name or URL consumed. -func (p *textParser) consumeExtensionOrAnyName() (string, error) { - tok := p.next() - if tok.err != nil { - return "", tok.err - } - - // If extension name or type url is quoted, it's a single token. - if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { - name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) - if err != nil { - return "", err - } - return name, p.consumeToken("]") - } - - // Consume everything up to "]" - var parts []string - for tok.value != "]" { - parts = append(parts, tok.value) - tok = p.next() - if tok.err != nil { - return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) - } - if p.done && tok.value != "]" { - return "", p.errorf("unclosed type_url or extension name") - } - } - return strings.Join(parts, ""), nil -} - -// consumeOptionalSeparator consumes an optional semicolon or comma. -// It is used in unmarshalMessage to provide backward compatibility. -func (p *textParser) consumeOptionalSeparator() error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ";" && tok.value != "," { - p.back() - } - return nil -} - -func (p *textParser) errorf(format string, a ...interface{}) *ParseError { - pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} - p.cur.err = pe - p.done = true - return pe -} - -func (p *textParser) skipWhitespace() { - i := 0 - for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { - if p.s[i] == '#' { - // comment; skip to end of line or input - for i < len(p.s) && p.s[i] != '\n' { - i++ - } - if i == len(p.s) { - break - } - } - if p.s[i] == '\n' { - p.line++ - } - i++ - } - p.offset += i - p.s = p.s[i:len(p.s)] - if len(p.s) == 0 { - p.done = true - } -} - -func (p *textParser) advance() { - // Skip whitespace - p.skipWhitespace() - if p.done { - return - } - - // Start of non-whitespace - p.cur.err = nil - p.cur.offset, p.cur.line = p.offset, p.line - p.cur.unquoted = "" - switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': - // Single symbol - p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] - case '"', '\'': - // Quoted string - i := 1 - for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { - if p.s[i] == '\\' && i+1 < len(p.s) { - // skip escaped char - i++ - } - i++ - } - if i >= len(p.s) || p.s[i] != p.s[0] { - p.errorf("unmatched quote") - return - } - unq, err := unquoteC(p.s[1:i], rune(p.s[0])) - if err != nil { - p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) - return - } - p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] - p.cur.unquoted = unq - default: - i := 0 - for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { - i++ - } - if i == 0 { - p.errorf("unexpected byte %#x", p.s[0]) - return - } - p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] - } - p.offset += len(p.cur.value) -} - -// Back off the parser by one token. Can only be done between calls to next(). -// It makes the next advance() a no-op. -func (p *textParser) back() { p.backed = true } - -// Advances the parser and returns the new current token. -func (p *textParser) next() *token { - if p.backed || p.done { - p.backed = false - return &p.cur - } - p.advance() - if p.done { - p.cur.value = "" - } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { - // Look for multiple quoted strings separated by whitespace, - // and concatenate them. - cat := p.cur - for { - p.skipWhitespace() - if p.done || !isQuote(p.s[0]) { - break - } - p.advance() - if p.cur.err != nil { - return &p.cur - } - cat.value += " " + p.cur.value - cat.unquoted += p.cur.unquoted - } - p.done = false // parser may have seen EOF, but we want to return cat - p.cur = cat - } - return &p.cur -} - -func (p *textParser) consumeToken(s string) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != s { - p.back() - return p.errorf("expected %q, found %q", s, tok.value) - } - return nil -} - -var errBadUTF8 = errors.New("proto: bad UTF-8") - -func unquoteC(s string, quote rune) (string, error) { - // This is based on C++'s tokenizer.cc. - // Despite its name, this is *not* parsing C syntax. - // For instance, "\0" is an invalid quoted string. - - // Avoid allocation in trivial cases. - simple := true - for _, r := range s { - if r == '\\' || r == quote { - simple = false - break - } - } - if simple { - return s, nil - } - - buf := make([]byte, 0, 3*len(s)/2) - for len(s) > 0 { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", errBadUTF8 - } - s = s[n:] - if r != '\\' { - if r < utf8.RuneSelf { - buf = append(buf, byte(r)) - } else { - buf = append(buf, string(r)...) - } - continue - } - - ch, tail, err := unescape(s) - if err != nil { - return "", err - } - buf = append(buf, ch...) - s = tail - } - return string(buf), nil -} - -func unescape(s string) (ch string, tail string, err error) { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", "", errBadUTF8 - } - s = s[n:] - switch r { - case 'a': - return "\a", s, nil - case 'b': - return "\b", s, nil - case 'f': - return "\f", s, nil - case 'n': - return "\n", s, nil - case 'r': - return "\r", s, nil - case 't': - return "\t", s, nil - case 'v': - return "\v", s, nil - case '?': - return "?", s, nil // trigraph workaround - case '\'', '"', '\\': - return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7': - if len(s) < 2 { - return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) - } - ss := string(r) + s[:2] - s = s[2:] - i, err := strconv.ParseUint(ss, 8, 8) - if err != nil { - return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) - } - return string([]byte{byte(i)}), s, nil - case 'x', 'X', 'u', 'U': - var n int - switch r { - case 'x', 'X': - n = 2 - case 'u': - n = 4 - case 'U': - n = 8 - } - if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) - } - ss := s[:n] - s = s[n:] - i, err := strconv.ParseUint(ss, 16, 64) - if err != nil { - return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) - } - if r == 'x' || r == 'X' { - return string([]byte{byte(i)}), s, nil - } - if i > utf8.MaxRune { - return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) - } - return string(rune(i)), s, nil - } - return "", "", fmt.Errorf(`unknown escape \%c`, r) -} - -func isIdentOrNumberChar(c byte) bool { - switch { - case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': - return true - case '0' <= c && c <= '9': - return true - } - switch c { - case '-', '+', '.', '_': - return true - } - return false -} - -func isWhitespace(c byte) bool { - switch c { - case ' ', '\t', '\n', '\r': - return true - } - return false -} - -func isQuote(c byte) bool { - switch c { - case '"', '\'': - return true - } - return false -} diff --git a/ui/wasm/vendor/github.com/golang/protobuf/proto/text_encode.go b/ui/wasm/vendor/github.com/golang/protobuf/proto/text_encode.go deleted file mode 100644 index a31134eeb3b..00000000000 --- a/ui/wasm/vendor/github.com/golang/protobuf/proto/text_encode.go +++ /dev/null @@ -1,560 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - "bytes" - "encoding" - "fmt" - "io" - "math" - "sort" - "strings" - - "google.golang.org/protobuf/encoding/prototext" - "google.golang.org/protobuf/encoding/protowire" - "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" -) - -const wrapTextMarshalV2 = false - -// TextMarshaler is a configurable text format marshaler. -type TextMarshaler struct { - Compact bool // use compact text format (one line) - ExpandAny bool // expand google.protobuf.Any messages of known types -} - -// Marshal writes the proto text format of m to w. -func (tm *TextMarshaler) Marshal(w io.Writer, m Message) error { - b, err := tm.marshal(m) - if len(b) > 0 { - if _, err := w.Write(b); err != nil { - return err - } - } - return err -} - -// Text returns a proto text formatted string of m. -func (tm *TextMarshaler) Text(m Message) string { - b, _ := tm.marshal(m) - return string(b) -} - -func (tm *TextMarshaler) marshal(m Message) ([]byte, error) { - mr := MessageReflect(m) - if mr == nil || !mr.IsValid() { - return []byte(""), nil - } - - if wrapTextMarshalV2 { - if m, ok := m.(encoding.TextMarshaler); ok { - return m.MarshalText() - } - - opts := prototext.MarshalOptions{ - AllowPartial: true, - EmitUnknown: true, - } - if !tm.Compact { - opts.Indent = " " - } - if !tm.ExpandAny { - opts.Resolver = (*protoregistry.Types)(nil) - } - return opts.Marshal(mr.Interface()) - } else { - w := &textWriter{ - compact: tm.Compact, - expandAny: tm.ExpandAny, - complete: true, - } - - if m, ok := m.(encoding.TextMarshaler); ok { - b, err := m.MarshalText() - if err != nil { - return nil, err - } - w.Write(b) - return w.buf, nil - } - - err := w.writeMessage(mr) - return w.buf, err - } -} - -var ( - defaultTextMarshaler = TextMarshaler{} - compactTextMarshaler = TextMarshaler{Compact: true} -) - -// MarshalText writes the proto text format of m to w. -func MarshalText(w io.Writer, m Message) error { return defaultTextMarshaler.Marshal(w, m) } - -// MarshalTextString returns a proto text formatted string of m. -func MarshalTextString(m Message) string { return defaultTextMarshaler.Text(m) } - -// CompactText writes the compact proto text format of m to w. -func CompactText(w io.Writer, m Message) error { return compactTextMarshaler.Marshal(w, m) } - -// CompactTextString returns a compact proto text formatted string of m. -func CompactTextString(m Message) string { return compactTextMarshaler.Text(m) } - -var ( - newline = []byte("\n") - endBraceNewline = []byte("}\n") - posInf = []byte("inf") - negInf = []byte("-inf") - nan = []byte("nan") -) - -// textWriter is an io.Writer that tracks its indentation level. -type textWriter struct { - compact bool // same as TextMarshaler.Compact - expandAny bool // same as TextMarshaler.ExpandAny - complete bool // whether the current position is a complete line - indent int // indentation level; never negative - buf []byte -} - -func (w *textWriter) Write(p []byte) (n int, _ error) { - newlines := bytes.Count(p, newline) - if newlines == 0 { - if !w.compact && w.complete { - w.writeIndent() - } - w.buf = append(w.buf, p...) - w.complete = false - return len(p), nil - } - - frags := bytes.SplitN(p, newline, newlines+1) - if w.compact { - for i, frag := range frags { - if i > 0 { - w.buf = append(w.buf, ' ') - n++ - } - w.buf = append(w.buf, frag...) - n += len(frag) - } - return n, nil - } - - for i, frag := range frags { - if w.complete { - w.writeIndent() - } - w.buf = append(w.buf, frag...) - n += len(frag) - if i+1 < len(frags) { - w.buf = append(w.buf, '\n') - n++ - } - } - w.complete = len(frags[len(frags)-1]) == 0 - return n, nil -} - -func (w *textWriter) WriteByte(c byte) error { - if w.compact && c == '\n' { - c = ' ' - } - if !w.compact && w.complete { - w.writeIndent() - } - w.buf = append(w.buf, c) - w.complete = c == '\n' - return nil -} - -func (w *textWriter) writeName(fd protoreflect.FieldDescriptor) { - if !w.compact && w.complete { - w.writeIndent() - } - w.complete = false - - if fd.Kind() != protoreflect.GroupKind { - w.buf = append(w.buf, fd.Name()...) - w.WriteByte(':') - } else { - // Use message type name for group field name. - w.buf = append(w.buf, fd.Message().Name()...) - } - - if !w.compact { - w.WriteByte(' ') - } -} - -func requiresQuotes(u string) bool { - // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. - for _, ch := range u { - switch { - case ch == '.' || ch == '/' || ch == '_': - continue - case '0' <= ch && ch <= '9': - continue - case 'A' <= ch && ch <= 'Z': - continue - case 'a' <= ch && ch <= 'z': - continue - default: - return true - } - } - return false -} - -// writeProto3Any writes an expanded google.protobuf.Any message. -// -// It returns (false, nil) if sv value can't be unmarshaled (e.g. because -// required messages are not linked in). -// -// It returns (true, error) when sv was written in expanded format or an error -// was encountered. -func (w *textWriter) writeProto3Any(m protoreflect.Message) (bool, error) { - md := m.Descriptor() - fdURL := md.Fields().ByName("type_url") - fdVal := md.Fields().ByName("value") - - url := m.Get(fdURL).String() - mt, err := protoregistry.GlobalTypes.FindMessageByURL(url) - if err != nil { - return false, nil - } - - b := m.Get(fdVal).Bytes() - m2 := mt.New() - if err := proto.Unmarshal(b, m2.Interface()); err != nil { - return false, nil - } - w.Write([]byte("[")) - if requiresQuotes(url) { - w.writeQuotedString(url) - } else { - w.Write([]byte(url)) - } - if w.compact { - w.Write([]byte("]:<")) - } else { - w.Write([]byte("]: <\n")) - w.indent++ - } - if err := w.writeMessage(m2); err != nil { - return true, err - } - if w.compact { - w.Write([]byte("> ")) - } else { - w.indent-- - w.Write([]byte(">\n")) - } - return true, nil -} - -func (w *textWriter) writeMessage(m protoreflect.Message) error { - md := m.Descriptor() - if w.expandAny && md.FullName() == "google.protobuf.Any" { - if canExpand, err := w.writeProto3Any(m); canExpand { - return err - } - } - - fds := md.Fields() - for i := 0; i < fds.Len(); { - fd := fds.Get(i) - if od := fd.ContainingOneof(); od != nil { - fd = m.WhichOneof(od) - i += od.Fields().Len() - } else { - i++ - } - if fd == nil || !m.Has(fd) { - continue - } - - switch { - case fd.IsList(): - lv := m.Get(fd).List() - for j := 0; j < lv.Len(); j++ { - w.writeName(fd) - v := lv.Get(j) - if err := w.writeSingularValue(v, fd); err != nil { - return err - } - w.WriteByte('\n') - } - case fd.IsMap(): - kfd := fd.MapKey() - vfd := fd.MapValue() - mv := m.Get(fd).Map() - - type entry struct{ key, val protoreflect.Value } - var entries []entry - mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { - entries = append(entries, entry{k.Value(), v}) - return true - }) - sort.Slice(entries, func(i, j int) bool { - switch kfd.Kind() { - case protoreflect.BoolKind: - return !entries[i].key.Bool() && entries[j].key.Bool() - case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: - return entries[i].key.Int() < entries[j].key.Int() - case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind: - return entries[i].key.Uint() < entries[j].key.Uint() - case protoreflect.StringKind: - return entries[i].key.String() < entries[j].key.String() - default: - panic("invalid kind") - } - }) - for _, entry := range entries { - w.writeName(fd) - w.WriteByte('<') - if !w.compact { - w.WriteByte('\n') - } - w.indent++ - w.writeName(kfd) - if err := w.writeSingularValue(entry.key, kfd); err != nil { - return err - } - w.WriteByte('\n') - w.writeName(vfd) - if err := w.writeSingularValue(entry.val, vfd); err != nil { - return err - } - w.WriteByte('\n') - w.indent-- - w.WriteByte('>') - w.WriteByte('\n') - } - default: - w.writeName(fd) - if err := w.writeSingularValue(m.Get(fd), fd); err != nil { - return err - } - w.WriteByte('\n') - } - } - - if b := m.GetUnknown(); len(b) > 0 { - w.writeUnknownFields(b) - } - return w.writeExtensions(m) -} - -func (w *textWriter) writeSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) error { - switch fd.Kind() { - case protoreflect.FloatKind, protoreflect.DoubleKind: - switch vf := v.Float(); { - case math.IsInf(vf, +1): - w.Write(posInf) - case math.IsInf(vf, -1): - w.Write(negInf) - case math.IsNaN(vf): - w.Write(nan) - default: - fmt.Fprint(w, v.Interface()) - } - case protoreflect.StringKind: - // NOTE: This does not validate UTF-8 for historical reasons. - w.writeQuotedString(string(v.String())) - case protoreflect.BytesKind: - w.writeQuotedString(string(v.Bytes())) - case protoreflect.MessageKind, protoreflect.GroupKind: - var bra, ket byte = '<', '>' - if fd.Kind() == protoreflect.GroupKind { - bra, ket = '{', '}' - } - w.WriteByte(bra) - if !w.compact { - w.WriteByte('\n') - } - w.indent++ - m := v.Message() - if m2, ok := m.Interface().(encoding.TextMarshaler); ok { - b, err := m2.MarshalText() - if err != nil { - return err - } - w.Write(b) - } else { - w.writeMessage(m) - } - w.indent-- - w.WriteByte(ket) - case protoreflect.EnumKind: - if ev := fd.Enum().Values().ByNumber(v.Enum()); ev != nil { - fmt.Fprint(w, ev.Name()) - } else { - fmt.Fprint(w, v.Enum()) - } - default: - fmt.Fprint(w, v.Interface()) - } - return nil -} - -// writeQuotedString writes a quoted string in the protocol buffer text format. -func (w *textWriter) writeQuotedString(s string) { - w.WriteByte('"') - for i := 0; i < len(s); i++ { - switch c := s[i]; c { - case '\n': - w.buf = append(w.buf, `\n`...) - case '\r': - w.buf = append(w.buf, `\r`...) - case '\t': - w.buf = append(w.buf, `\t`...) - case '"': - w.buf = append(w.buf, `\"`...) - case '\\': - w.buf = append(w.buf, `\\`...) - default: - if isPrint := c >= 0x20 && c < 0x7f; isPrint { - w.buf = append(w.buf, c) - } else { - w.buf = append(w.buf, fmt.Sprintf(`\%03o`, c)...) - } - } - } - w.WriteByte('"') -} - -func (w *textWriter) writeUnknownFields(b []byte) { - if !w.compact { - fmt.Fprintf(w, "/* %d unknown bytes */\n", len(b)) - } - - for len(b) > 0 { - num, wtyp, n := protowire.ConsumeTag(b) - if n < 0 { - return - } - b = b[n:] - - if wtyp == protowire.EndGroupType { - w.indent-- - w.Write(endBraceNewline) - continue - } - fmt.Fprint(w, num) - if wtyp != protowire.StartGroupType { - w.WriteByte(':') - } - if !w.compact || wtyp == protowire.StartGroupType { - w.WriteByte(' ') - } - switch wtyp { - case protowire.VarintType: - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return - } - b = b[n:] - fmt.Fprint(w, v) - case protowire.Fixed32Type: - v, n := protowire.ConsumeFixed32(b) - if n < 0 { - return - } - b = b[n:] - fmt.Fprint(w, v) - case protowire.Fixed64Type: - v, n := protowire.ConsumeFixed64(b) - if n < 0 { - return - } - b = b[n:] - fmt.Fprint(w, v) - case protowire.BytesType: - v, n := protowire.ConsumeBytes(b) - if n < 0 { - return - } - b = b[n:] - fmt.Fprintf(w, "%q", v) - case protowire.StartGroupType: - w.WriteByte('{') - w.indent++ - default: - fmt.Fprintf(w, "/* unknown wire type %d */", wtyp) - } - w.WriteByte('\n') - } -} - -// writeExtensions writes all the extensions in m. -func (w *textWriter) writeExtensions(m protoreflect.Message) error { - md := m.Descriptor() - if md.ExtensionRanges().Len() == 0 { - return nil - } - - type ext struct { - desc protoreflect.FieldDescriptor - val protoreflect.Value - } - var exts []ext - m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { - if fd.IsExtension() { - exts = append(exts, ext{fd, v}) - } - return true - }) - sort.Slice(exts, func(i, j int) bool { - return exts[i].desc.Number() < exts[j].desc.Number() - }) - - for _, ext := range exts { - // For message set, use the name of the message as the extension name. - name := string(ext.desc.FullName()) - if isMessageSet(ext.desc.ContainingMessage()) { - name = strings.TrimSuffix(name, ".message_set_extension") - } - - if !ext.desc.IsList() { - if err := w.writeSingularExtension(name, ext.val, ext.desc); err != nil { - return err - } - } else { - lv := ext.val.List() - for i := 0; i < lv.Len(); i++ { - if err := w.writeSingularExtension(name, lv.Get(i), ext.desc); err != nil { - return err - } - } - } - } - return nil -} - -func (w *textWriter) writeSingularExtension(name string, v protoreflect.Value, fd protoreflect.FieldDescriptor) error { - fmt.Fprintf(w, "[%s]:", name) - if !w.compact { - w.WriteByte(' ') - } - if err := w.writeSingularValue(v, fd); err != nil { - return err - } - w.WriteByte('\n') - return nil -} - -func (w *textWriter) writeIndent() { - if !w.complete { - return - } - for i := 0; i < w.indent*2; i++ { - w.buf = append(w.buf, ' ') - } - w.complete = false -} diff --git a/ui/wasm/vendor/github.com/golang/protobuf/proto/wire.go b/ui/wasm/vendor/github.com/golang/protobuf/proto/wire.go deleted file mode 100644 index d7c28da5a75..00000000000 --- a/ui/wasm/vendor/github.com/golang/protobuf/proto/wire.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -import ( - protoV2 "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/runtime/protoiface" -) - -// Size returns the size in bytes of the wire-format encoding of m. -func Size(m Message) int { - if m == nil { - return 0 - } - mi := MessageV2(m) - return protoV2.Size(mi) -} - -// Marshal returns the wire-format encoding of m. -func Marshal(m Message) ([]byte, error) { - b, err := marshalAppend(nil, m, false) - if b == nil { - b = zeroBytes - } - return b, err -} - -var zeroBytes = make([]byte, 0, 0) - -func marshalAppend(buf []byte, m Message, deterministic bool) ([]byte, error) { - if m == nil { - return nil, ErrNil - } - mi := MessageV2(m) - nbuf, err := protoV2.MarshalOptions{ - Deterministic: deterministic, - AllowPartial: true, - }.MarshalAppend(buf, mi) - if err != nil { - return buf, err - } - if len(buf) == len(nbuf) { - if !mi.ProtoReflect().IsValid() { - return buf, ErrNil - } - } - return nbuf, checkRequiredNotSet(mi) -} - -// Unmarshal parses a wire-format message in b and places the decoded results in m. -// -// Unmarshal resets m before starting to unmarshal, so any existing data in m is always -// removed. Use UnmarshalMerge to preserve and append to existing data. -func Unmarshal(b []byte, m Message) error { - m.Reset() - return UnmarshalMerge(b, m) -} - -// UnmarshalMerge parses a wire-format message in b and places the decoded results in m. -func UnmarshalMerge(b []byte, m Message) error { - mi := MessageV2(m) - out, err := protoV2.UnmarshalOptions{ - AllowPartial: true, - Merge: true, - }.UnmarshalState(protoiface.UnmarshalInput{ - Buf: b, - Message: mi.ProtoReflect(), - }) - if err != nil { - return err - } - if out.Flags&protoiface.UnmarshalInitialized > 0 { - return nil - } - return checkRequiredNotSet(mi) -} diff --git a/ui/wasm/vendor/github.com/golang/protobuf/proto/wrappers.go b/ui/wasm/vendor/github.com/golang/protobuf/proto/wrappers.go deleted file mode 100644 index 398e348599b..00000000000 --- a/ui/wasm/vendor/github.com/golang/protobuf/proto/wrappers.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package proto - -// Bool stores v in a new bool value and returns a pointer to it. -func Bool(v bool) *bool { return &v } - -// Int stores v in a new int32 value and returns a pointer to it. -// -// Deprecated: Use Int32 instead. -func Int(v int) *int32 { return Int32(int32(v)) } - -// Int32 stores v in a new int32 value and returns a pointer to it. -func Int32(v int32) *int32 { return &v } - -// Int64 stores v in a new int64 value and returns a pointer to it. -func Int64(v int64) *int64 { return &v } - -// Uint32 stores v in a new uint32 value and returns a pointer to it. -func Uint32(v uint32) *uint32 { return &v } - -// Uint64 stores v in a new uint64 value and returns a pointer to it. -func Uint64(v uint64) *uint64 { return &v } - -// Float32 stores v in a new float32 value and returns a pointer to it. -func Float32(v float32) *float32 { return &v } - -// Float64 stores v in a new float64 value and returns a pointer to it. -func Float64(v float64) *float64 { return &v } - -// String stores v in a new string value and returns a pointer to it. -func String(v string) *string { return &v } diff --git a/ui/wasm/vendor/github.com/golang/protobuf/ptypes/any.go b/ui/wasm/vendor/github.com/golang/protobuf/ptypes/any.go deleted file mode 100644 index 85f9f57365f..00000000000 --- a/ui/wasm/vendor/github.com/golang/protobuf/ptypes/any.go +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ptypes - -import ( - "fmt" - "strings" - - "github.com/golang/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" - - anypb "github.com/golang/protobuf/ptypes/any" -) - -const urlPrefix = "type.googleapis.com/" - -// AnyMessageName returns the message name contained in an anypb.Any message. -// Most type assertions should use the Is function instead. -// -// Deprecated: Call the any.MessageName method instead. -func AnyMessageName(any *anypb.Any) (string, error) { - name, err := anyMessageName(any) - return string(name), err -} -func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) { - if any == nil { - return "", fmt.Errorf("message is nil") - } - name := protoreflect.FullName(any.TypeUrl) - if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 { - name = name[i+len("/"):] - } - if !name.IsValid() { - return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) - } - return name, nil -} - -// MarshalAny marshals the given message m into an anypb.Any message. -// -// Deprecated: Call the anypb.New function instead. -func MarshalAny(m proto.Message) (*anypb.Any, error) { - switch dm := m.(type) { - case DynamicAny: - m = dm.Message - case *DynamicAny: - if dm == nil { - return nil, proto.ErrNil - } - m = dm.Message - } - b, err := proto.Marshal(m) - if err != nil { - return nil, err - } - return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil -} - -// Empty returns a new message of the type specified in an anypb.Any message. -// It returns protoregistry.NotFound if the corresponding message type could not -// be resolved in the global registry. -// -// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead -// to resolve the message name and create a new instance of it. -func Empty(any *anypb.Any) (proto.Message, error) { - name, err := anyMessageName(any) - if err != nil { - return nil, err - } - mt, err := protoregistry.GlobalTypes.FindMessageByName(name) - if err != nil { - return nil, err - } - return proto.MessageV1(mt.New().Interface()), nil -} - -// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message -// into the provided message m. It returns an error if the target message -// does not match the type in the Any message or if an unmarshal error occurs. -// -// The target message m may be a *DynamicAny message. If the underlying message -// type could not be resolved, then this returns protoregistry.NotFound. -// -// Deprecated: Call the any.UnmarshalTo method instead. -func UnmarshalAny(any *anypb.Any, m proto.Message) error { - if dm, ok := m.(*DynamicAny); ok { - if dm.Message == nil { - var err error - dm.Message, err = Empty(any) - if err != nil { - return err - } - } - m = dm.Message - } - - anyName, err := AnyMessageName(any) - if err != nil { - return err - } - msgName := proto.MessageName(m) - if anyName != msgName { - return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName) - } - return proto.Unmarshal(any.Value, m) -} - -// Is reports whether the Any message contains a message of the specified type. -// -// Deprecated: Call the any.MessageIs method instead. -func Is(any *anypb.Any, m proto.Message) bool { - if any == nil || m == nil { - return false - } - name := proto.MessageName(m) - if !strings.HasSuffix(any.TypeUrl, name) { - return false - } - return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/' -} - -// DynamicAny is a value that can be passed to UnmarshalAny to automatically -// allocate a proto.Message for the type specified in an anypb.Any message. -// The allocated message is stored in the embedded proto.Message. -// -// Example: -// var x ptypes.DynamicAny -// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } -// fmt.Printf("unmarshaled message: %v", x.Message) -// -// Deprecated: Use the any.UnmarshalNew method instead to unmarshal -// the any message contents into a new instance of the underlying message. -type DynamicAny struct{ proto.Message } - -func (m DynamicAny) String() string { - if m.Message == nil { - return "" - } - return m.Message.String() -} -func (m DynamicAny) Reset() { - if m.Message == nil { - return - } - m.Message.Reset() -} -func (m DynamicAny) ProtoMessage() { - return -} -func (m DynamicAny) ProtoReflect() protoreflect.Message { - if m.Message == nil { - return nil - } - return dynamicAny{proto.MessageReflect(m.Message)} -} - -type dynamicAny struct{ protoreflect.Message } - -func (m dynamicAny) Type() protoreflect.MessageType { - return dynamicAnyType{m.Message.Type()} -} -func (m dynamicAny) New() protoreflect.Message { - return dynamicAnyType{m.Message.Type()}.New() -} -func (m dynamicAny) Interface() protoreflect.ProtoMessage { - return DynamicAny{proto.MessageV1(m.Message.Interface())} -} - -type dynamicAnyType struct{ protoreflect.MessageType } - -func (t dynamicAnyType) New() protoreflect.Message { - return dynamicAny{t.MessageType.New()} -} -func (t dynamicAnyType) Zero() protoreflect.Message { - return dynamicAny{t.MessageType.Zero()} -} diff --git a/ui/wasm/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/ui/wasm/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go deleted file mode 100644 index 0ef27d33deb..00000000000 --- a/ui/wasm/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go +++ /dev/null @@ -1,62 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: github.com/golang/protobuf/ptypes/any/any.proto - -package any - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - anypb "google.golang.org/protobuf/types/known/anypb" - reflect "reflect" -) - -// Symbols defined in public import of google/protobuf/any.proto. - -type Any = anypb.Any - -var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor - -var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{ - 0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29, - 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, - 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, -} - -var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{} -var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() } -func file_github_com_golang_protobuf_ptypes_any_any_proto_init() { - if File_github_com_golang_protobuf_ptypes_any_any_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes, - DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs, - }.Build() - File_github_com_golang_protobuf_ptypes_any_any_proto = out.File - file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil - file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil - file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil -} diff --git a/ui/wasm/vendor/github.com/golang/protobuf/ptypes/doc.go b/ui/wasm/vendor/github.com/golang/protobuf/ptypes/doc.go deleted file mode 100644 index d3c33259d28..00000000000 --- a/ui/wasm/vendor/github.com/golang/protobuf/ptypes/doc.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ptypes provides functionality for interacting with well-known types. -// -// Deprecated: Well-known types have specialized functionality directly -// injected into the generated packages for each message type. -// See the deprecation notice for each function for the suggested alternative. -package ptypes diff --git a/ui/wasm/vendor/github.com/golang/protobuf/ptypes/duration.go b/ui/wasm/vendor/github.com/golang/protobuf/ptypes/duration.go deleted file mode 100644 index b2b55dd851f..00000000000 --- a/ui/wasm/vendor/github.com/golang/protobuf/ptypes/duration.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ptypes - -import ( - "errors" - "fmt" - "time" - - durationpb "github.com/golang/protobuf/ptypes/duration" -) - -// Range of google.protobuf.Duration as specified in duration.proto. -// This is about 10,000 years in seconds. -const ( - maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) - minSeconds = -maxSeconds -) - -// Duration converts a durationpb.Duration to a time.Duration. -// Duration returns an error if dur is invalid or overflows a time.Duration. -// -// Deprecated: Call the dur.AsDuration and dur.CheckValid methods instead. -func Duration(dur *durationpb.Duration) (time.Duration, error) { - if err := validateDuration(dur); err != nil { - return 0, err - } - d := time.Duration(dur.Seconds) * time.Second - if int64(d/time.Second) != dur.Seconds { - return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) - } - if dur.Nanos != 0 { - d += time.Duration(dur.Nanos) * time.Nanosecond - if (d < 0) != (dur.Nanos < 0) { - return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) - } - } - return d, nil -} - -// DurationProto converts a time.Duration to a durationpb.Duration. -// -// Deprecated: Call the durationpb.New function instead. -func DurationProto(d time.Duration) *durationpb.Duration { - nanos := d.Nanoseconds() - secs := nanos / 1e9 - nanos -= secs * 1e9 - return &durationpb.Duration{ - Seconds: int64(secs), - Nanos: int32(nanos), - } -} - -// validateDuration determines whether the durationpb.Duration is valid -// according to the definition in google/protobuf/duration.proto. -// A valid durpb.Duration may still be too large to fit into a time.Duration -// Note that the range of durationpb.Duration is about 10,000 years, -// while the range of time.Duration is about 290 years. -func validateDuration(dur *durationpb.Duration) error { - if dur == nil { - return errors.New("duration: nil Duration") - } - if dur.Seconds < minSeconds || dur.Seconds > maxSeconds { - return fmt.Errorf("duration: %v: seconds out of range", dur) - } - if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 { - return fmt.Errorf("duration: %v: nanos out of range", dur) - } - // Seconds and Nanos must have the same sign, unless d.Nanos is zero. - if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) { - return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur) - } - return nil -} diff --git a/ui/wasm/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/ui/wasm/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go deleted file mode 100644 index d0079ee3ef3..00000000000 --- a/ui/wasm/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go +++ /dev/null @@ -1,63 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: github.com/golang/protobuf/ptypes/duration/duration.proto - -package duration - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - durationpb "google.golang.org/protobuf/types/known/durationpb" - reflect "reflect" -) - -// Symbols defined in public import of google/protobuf/duration.proto. - -type Duration = durationpb.Duration - -var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor - -var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{ - 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, - 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, - 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{} -var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() } -func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() { - if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes, - DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs, - }.Build() - File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File - file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil - file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil - file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil -} diff --git a/ui/wasm/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/ui/wasm/vendor/github.com/golang/protobuf/ptypes/timestamp.go deleted file mode 100644 index 8368a3f70d3..00000000000 --- a/ui/wasm/vendor/github.com/golang/protobuf/ptypes/timestamp.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ptypes - -import ( - "errors" - "fmt" - "time" - - timestamppb "github.com/golang/protobuf/ptypes/timestamp" -) - -// Range of google.protobuf.Duration as specified in timestamp.proto. -const ( - // Seconds field of the earliest valid Timestamp. - // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - minValidSeconds = -62135596800 - // Seconds field just after the latest valid Timestamp. - // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - maxValidSeconds = 253402300800 -) - -// Timestamp converts a timestamppb.Timestamp to a time.Time. -// It returns an error if the argument is invalid. -// -// Unlike most Go functions, if Timestamp returns an error, the first return -// value is not the zero time.Time. Instead, it is the value obtained from the -// time.Unix function when passed the contents of the Timestamp, in the UTC -// locale. This may or may not be a meaningful time; many invalid Timestamps -// do map to valid time.Times. -// -// A nil Timestamp returns an error. The first return value in that case is -// undefined. -// -// Deprecated: Call the ts.AsTime and ts.CheckValid methods instead. -func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) { - // Don't return the zero value on error, because corresponds to a valid - // timestamp. Instead return whatever time.Unix gives us. - var t time.Time - if ts == nil { - t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp - } else { - t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() - } - return t, validateTimestamp(ts) -} - -// TimestampNow returns a google.protobuf.Timestamp for the current time. -// -// Deprecated: Call the timestamppb.Now function instead. -func TimestampNow() *timestamppb.Timestamp { - ts, err := TimestampProto(time.Now()) - if err != nil { - panic("ptypes: time.Now() out of Timestamp range") - } - return ts -} - -// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. -// It returns an error if the resulting Timestamp is invalid. -// -// Deprecated: Call the timestamppb.New function instead. -func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) { - ts := ×tamppb.Timestamp{ - Seconds: t.Unix(), - Nanos: int32(t.Nanosecond()), - } - if err := validateTimestamp(ts); err != nil { - return nil, err - } - return ts, nil -} - -// TimestampString returns the RFC 3339 string for valid Timestamps. -// For invalid Timestamps, it returns an error message in parentheses. -// -// Deprecated: Call the ts.AsTime method instead, -// followed by a call to the Format method on the time.Time value. -func TimestampString(ts *timestamppb.Timestamp) string { - t, err := Timestamp(ts) - if err != nil { - return fmt.Sprintf("(%v)", err) - } - return t.Format(time.RFC3339Nano) -} - -// validateTimestamp determines whether a Timestamp is valid. -// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01) -// and has a Nanos field in the range [0, 1e9). -// -// If the Timestamp is valid, validateTimestamp returns nil. -// Otherwise, it returns an error that describes the problem. -// -// Every valid Timestamp can be represented by a time.Time, -// but the converse is not true. -func validateTimestamp(ts *timestamppb.Timestamp) error { - if ts == nil { - return errors.New("timestamp: nil Timestamp") - } - if ts.Seconds < minValidSeconds { - return fmt.Errorf("timestamp: %v before 0001-01-01", ts) - } - if ts.Seconds >= maxValidSeconds { - return fmt.Errorf("timestamp: %v after 10000-01-01", ts) - } - if ts.Nanos < 0 || ts.Nanos >= 1e9 { - return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) - } - return nil -} diff --git a/ui/wasm/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/ui/wasm/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go deleted file mode 100644 index a76f8076009..00000000000 --- a/ui/wasm/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go +++ /dev/null @@ -1,64 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto - -package timestamp - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" - reflect "reflect" -) - -// Symbols defined in public import of google/protobuf/timestamp.proto. - -type Timestamp = timestamppb.Timestamp - -var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor - -var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{ - 0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37, - 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, -} - -var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{} -var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() } -func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() { - if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes, - DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs, - }.Build() - File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File - file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil - file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil - file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil -} diff --git a/ui/wasm/vendor/github.com/google/btree/.travis.yml b/ui/wasm/vendor/github.com/google/btree/.travis.yml deleted file mode 100644 index 4f2ee4d9733..00000000000 --- a/ui/wasm/vendor/github.com/google/btree/.travis.yml +++ /dev/null @@ -1 +0,0 @@ -language: go diff --git a/ui/wasm/vendor/github.com/google/btree/LICENSE b/ui/wasm/vendor/github.com/google/btree/LICENSE deleted file mode 100644 index d6456956733..00000000000 --- a/ui/wasm/vendor/github.com/google/btree/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/ui/wasm/vendor/github.com/google/btree/README.md b/ui/wasm/vendor/github.com/google/btree/README.md deleted file mode 100644 index 6062a4dacd4..00000000000 --- a/ui/wasm/vendor/github.com/google/btree/README.md +++ /dev/null @@ -1,12 +0,0 @@ -# BTree implementation for Go - -![Travis CI Build Status](https://api.travis-ci.org/google/btree.svg?branch=master) - -This package provides an in-memory B-Tree implementation for Go, useful as -an ordered, mutable data structure. - -The API is based off of the wonderful -http://godoc.org/github.com/petar/GoLLRB/llrb, and is meant to allow btree to -act as a drop-in replacement for gollrb trees. - -See http://godoc.org/github.com/google/btree for documentation. diff --git a/ui/wasm/vendor/github.com/google/btree/btree.go b/ui/wasm/vendor/github.com/google/btree/btree.go deleted file mode 100644 index 6ff062f9bb4..00000000000 --- a/ui/wasm/vendor/github.com/google/btree/btree.go +++ /dev/null @@ -1,890 +0,0 @@ -// Copyright 2014 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package btree implements in-memory B-Trees of arbitrary degree. -// -// btree implements an in-memory B-Tree for use as an ordered data structure. -// It is not meant for persistent storage solutions. -// -// It has a flatter structure than an equivalent red-black or other binary tree, -// which in some cases yields better memory usage and/or performance. -// See some discussion on the matter here: -// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html -// Note, though, that this project is in no way related to the C++ B-Tree -// implementation written about there. -// -// Within this tree, each node contains a slice of items and a (possibly nil) -// slice of children. For basic numeric values or raw structs, this can cause -// efficiency differences when compared to equivalent C++ template code that -// stores values in arrays within the node: -// * Due to the overhead of storing values as interfaces (each -// value needs to be stored as the value itself, then 2 words for the -// interface pointing to that value and its type), resulting in higher -// memory use. -// * Since interfaces can point to values anywhere in memory, values are -// most likely not stored in contiguous blocks, resulting in a higher -// number of cache misses. -// These issues don't tend to matter, though, when working with strings or other -// heap-allocated structures, since C++-equivalent structures also must store -// pointers and also distribute their values across the heap. -// -// This implementation is designed to be a drop-in replacement to gollrb.LLRB -// trees, (http://github.com/petar/gollrb), an excellent and probably the most -// widely used ordered tree implementation in the Go ecosystem currently. -// Its functions, therefore, exactly mirror those of -// llrb.LLRB where possible. Unlike gollrb, though, we currently don't -// support storing multiple equivalent values. -package btree - -import ( - "fmt" - "io" - "sort" - "strings" - "sync" -) - -// Item represents a single object in the tree. -type Item interface { - // Less tests whether the current item is less than the given argument. - // - // This must provide a strict weak ordering. - // If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only - // hold one of either a or b in the tree). - Less(than Item) bool -} - -const ( - DefaultFreeListSize = 32 -) - -var ( - nilItems = make(items, 16) - nilChildren = make(children, 16) -) - -// FreeList represents a free list of btree nodes. By default each -// BTree has its own FreeList, but multiple BTrees can share the same -// FreeList. -// Two Btrees using the same freelist are safe for concurrent write access. -type FreeList struct { - mu sync.Mutex - freelist []*node -} - -// NewFreeList creates a new free list. -// size is the maximum size of the returned free list. -func NewFreeList(size int) *FreeList { - return &FreeList{freelist: make([]*node, 0, size)} -} - -func (f *FreeList) newNode() (n *node) { - f.mu.Lock() - index := len(f.freelist) - 1 - if index < 0 { - f.mu.Unlock() - return new(node) - } - n = f.freelist[index] - f.freelist[index] = nil - f.freelist = f.freelist[:index] - f.mu.Unlock() - return -} - -// freeNode adds the given node to the list, returning true if it was added -// and false if it was discarded. -func (f *FreeList) freeNode(n *node) (out bool) { - f.mu.Lock() - if len(f.freelist) < cap(f.freelist) { - f.freelist = append(f.freelist, n) - out = true - } - f.mu.Unlock() - return -} - -// ItemIterator allows callers of Ascend* to iterate in-order over portions of -// the tree. When this function returns false, iteration will stop and the -// associated Ascend* function will immediately return. -type ItemIterator func(i Item) bool - -// New creates a new B-Tree with the given degree. -// -// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items -// and 2-4 children). -func New(degree int) *BTree { - return NewWithFreeList(degree, NewFreeList(DefaultFreeListSize)) -} - -// NewWithFreeList creates a new B-Tree that uses the given node free list. -func NewWithFreeList(degree int, f *FreeList) *BTree { - if degree <= 1 { - panic("bad degree") - } - return &BTree{ - degree: degree, - cow: ©OnWriteContext{freelist: f}, - } -} - -// items stores items in a node. -type items []Item - -// insertAt inserts a value into the given index, pushing all subsequent values -// forward. -func (s *items) insertAt(index int, item Item) { - *s = append(*s, nil) - if index < len(*s) { - copy((*s)[index+1:], (*s)[index:]) - } - (*s)[index] = item -} - -// removeAt removes a value at a given index, pulling all subsequent values -// back. -func (s *items) removeAt(index int) Item { - item := (*s)[index] - copy((*s)[index:], (*s)[index+1:]) - (*s)[len(*s)-1] = nil - *s = (*s)[:len(*s)-1] - return item -} - -// pop removes and returns the last element in the list. -func (s *items) pop() (out Item) { - index := len(*s) - 1 - out = (*s)[index] - (*s)[index] = nil - *s = (*s)[:index] - return -} - -// truncate truncates this instance at index so that it contains only the -// first index items. index must be less than or equal to length. -func (s *items) truncate(index int) { - var toClear items - *s, toClear = (*s)[:index], (*s)[index:] - for len(toClear) > 0 { - toClear = toClear[copy(toClear, nilItems):] - } -} - -// find returns the index where the given item should be inserted into this -// list. 'found' is true if the item already exists in the list at the given -// index. -func (s items) find(item Item) (index int, found bool) { - i := sort.Search(len(s), func(i int) bool { - return item.Less(s[i]) - }) - if i > 0 && !s[i-1].Less(item) { - return i - 1, true - } - return i, false -} - -// children stores child nodes in a node. -type children []*node - -// insertAt inserts a value into the given index, pushing all subsequent values -// forward. -func (s *children) insertAt(index int, n *node) { - *s = append(*s, nil) - if index < len(*s) { - copy((*s)[index+1:], (*s)[index:]) - } - (*s)[index] = n -} - -// removeAt removes a value at a given index, pulling all subsequent values -// back. -func (s *children) removeAt(index int) *node { - n := (*s)[index] - copy((*s)[index:], (*s)[index+1:]) - (*s)[len(*s)-1] = nil - *s = (*s)[:len(*s)-1] - return n -} - -// pop removes and returns the last element in the list. -func (s *children) pop() (out *node) { - index := len(*s) - 1 - out = (*s)[index] - (*s)[index] = nil - *s = (*s)[:index] - return -} - -// truncate truncates this instance at index so that it contains only the -// first index children. index must be less than or equal to length. -func (s *children) truncate(index int) { - var toClear children - *s, toClear = (*s)[:index], (*s)[index:] - for len(toClear) > 0 { - toClear = toClear[copy(toClear, nilChildren):] - } -} - -// node is an internal node in a tree. -// -// It must at all times maintain the invariant that either -// * len(children) == 0, len(items) unconstrained -// * len(children) == len(items) + 1 -type node struct { - items items - children children - cow *copyOnWriteContext -} - -func (n *node) mutableFor(cow *copyOnWriteContext) *node { - if n.cow == cow { - return n - } - out := cow.newNode() - if cap(out.items) >= len(n.items) { - out.items = out.items[:len(n.items)] - } else { - out.items = make(items, len(n.items), cap(n.items)) - } - copy(out.items, n.items) - // Copy children - if cap(out.children) >= len(n.children) { - out.children = out.children[:len(n.children)] - } else { - out.children = make(children, len(n.children), cap(n.children)) - } - copy(out.children, n.children) - return out -} - -func (n *node) mutableChild(i int) *node { - c := n.children[i].mutableFor(n.cow) - n.children[i] = c - return c -} - -// split splits the given node at the given index. The current node shrinks, -// and this function returns the item that existed at that index and a new node -// containing all items/children after it. -func (n *node) split(i int) (Item, *node) { - item := n.items[i] - next := n.cow.newNode() - next.items = append(next.items, n.items[i+1:]...) - n.items.truncate(i) - if len(n.children) > 0 { - next.children = append(next.children, n.children[i+1:]...) - n.children.truncate(i + 1) - } - return item, next -} - -// maybeSplitChild checks if a child should be split, and if so splits it. -// Returns whether or not a split occurred. -func (n *node) maybeSplitChild(i, maxItems int) bool { - if len(n.children[i].items) < maxItems { - return false - } - first := n.mutableChild(i) - item, second := first.split(maxItems / 2) - n.items.insertAt(i, item) - n.children.insertAt(i+1, second) - return true -} - -// insert inserts an item into the subtree rooted at this node, making sure -// no nodes in the subtree exceed maxItems items. Should an equivalent item be -// be found/replaced by insert, it will be returned. -func (n *node) insert(item Item, maxItems int) Item { - i, found := n.items.find(item) - if found { - out := n.items[i] - n.items[i] = item - return out - } - if len(n.children) == 0 { - n.items.insertAt(i, item) - return nil - } - if n.maybeSplitChild(i, maxItems) { - inTree := n.items[i] - switch { - case item.Less(inTree): - // no change, we want first split node - case inTree.Less(item): - i++ // we want second split node - default: - out := n.items[i] - n.items[i] = item - return out - } - } - return n.mutableChild(i).insert(item, maxItems) -} - -// get finds the given key in the subtree and returns it. -func (n *node) get(key Item) Item { - i, found := n.items.find(key) - if found { - return n.items[i] - } else if len(n.children) > 0 { - return n.children[i].get(key) - } - return nil -} - -// min returns the first item in the subtree. -func min(n *node) Item { - if n == nil { - return nil - } - for len(n.children) > 0 { - n = n.children[0] - } - if len(n.items) == 0 { - return nil - } - return n.items[0] -} - -// max returns the last item in the subtree. -func max(n *node) Item { - if n == nil { - return nil - } - for len(n.children) > 0 { - n = n.children[len(n.children)-1] - } - if len(n.items) == 0 { - return nil - } - return n.items[len(n.items)-1] -} - -// toRemove details what item to remove in a node.remove call. -type toRemove int - -const ( - removeItem toRemove = iota // removes the given item - removeMin // removes smallest item in the subtree - removeMax // removes largest item in the subtree -) - -// remove removes an item from the subtree rooted at this node. -func (n *node) remove(item Item, minItems int, typ toRemove) Item { - var i int - var found bool - switch typ { - case removeMax: - if len(n.children) == 0 { - return n.items.pop() - } - i = len(n.items) - case removeMin: - if len(n.children) == 0 { - return n.items.removeAt(0) - } - i = 0 - case removeItem: - i, found = n.items.find(item) - if len(n.children) == 0 { - if found { - return n.items.removeAt(i) - } - return nil - } - default: - panic("invalid type") - } - // If we get to here, we have children. - if len(n.children[i].items) <= minItems { - return n.growChildAndRemove(i, item, minItems, typ) - } - child := n.mutableChild(i) - // Either we had enough items to begin with, or we've done some - // merging/stealing, because we've got enough now and we're ready to return - // stuff. - if found { - // The item exists at index 'i', and the child we've selected can give us a - // predecessor, since if we've gotten here it's got > minItems items in it. - out := n.items[i] - // We use our special-case 'remove' call with typ=maxItem to pull the - // predecessor of item i (the rightmost leaf of our immediate left child) - // and set it into where we pulled the item from. - n.items[i] = child.remove(nil, minItems, removeMax) - return out - } - // Final recursive call. Once we're here, we know that the item isn't in this - // node and that the child is big enough to remove from. - return child.remove(item, minItems, typ) -} - -// growChildAndRemove grows child 'i' to make sure it's possible to remove an -// item from it while keeping it at minItems, then calls remove to actually -// remove it. -// -// Most documentation says we have to do two sets of special casing: -// 1) item is in this node -// 2) item is in child -// In both cases, we need to handle the two subcases: -// A) node has enough values that it can spare one -// B) node doesn't have enough values -// For the latter, we have to check: -// a) left sibling has node to spare -// b) right sibling has node to spare -// c) we must merge -// To simplify our code here, we handle cases #1 and #2 the same: -// If a node doesn't have enough items, we make sure it does (using a,b,c). -// We then simply redo our remove call, and the second time (regardless of -// whether we're in case 1 or 2), we'll have enough items and can guarantee -// that we hit case A. -func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) Item { - if i > 0 && len(n.children[i-1].items) > minItems { - // Steal from left child - child := n.mutableChild(i) - stealFrom := n.mutableChild(i - 1) - stolenItem := stealFrom.items.pop() - child.items.insertAt(0, n.items[i-1]) - n.items[i-1] = stolenItem - if len(stealFrom.children) > 0 { - child.children.insertAt(0, stealFrom.children.pop()) - } - } else if i < len(n.items) && len(n.children[i+1].items) > minItems { - // steal from right child - child := n.mutableChild(i) - stealFrom := n.mutableChild(i + 1) - stolenItem := stealFrom.items.removeAt(0) - child.items = append(child.items, n.items[i]) - n.items[i] = stolenItem - if len(stealFrom.children) > 0 { - child.children = append(child.children, stealFrom.children.removeAt(0)) - } - } else { - if i >= len(n.items) { - i-- - } - child := n.mutableChild(i) - // merge with right child - mergeItem := n.items.removeAt(i) - mergeChild := n.children.removeAt(i + 1) - child.items = append(child.items, mergeItem) - child.items = append(child.items, mergeChild.items...) - child.children = append(child.children, mergeChild.children...) - n.cow.freeNode(mergeChild) - } - return n.remove(item, minItems, typ) -} - -type direction int - -const ( - descend = direction(-1) - ascend = direction(+1) -) - -// iterate provides a simple method for iterating over elements in the tree. -// -// When ascending, the 'start' should be less than 'stop' and when descending, -// the 'start' should be greater than 'stop'. Setting 'includeStart' to true -// will force the iterator to include the first item when it equals 'start', -// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a -// "greaterThan" or "lessThan" queries. -func (n *node) iterate(dir direction, start, stop Item, includeStart bool, hit bool, iter ItemIterator) (bool, bool) { - var ok, found bool - var index int - switch dir { - case ascend: - if start != nil { - index, _ = n.items.find(start) - } - for i := index; i < len(n.items); i++ { - if len(n.children) > 0 { - if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok { - return hit, false - } - } - if !includeStart && !hit && start != nil && !start.Less(n.items[i]) { - hit = true - continue - } - hit = true - if stop != nil && !n.items[i].Less(stop) { - return hit, false - } - if !iter(n.items[i]) { - return hit, false - } - } - if len(n.children) > 0 { - if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok { - return hit, false - } - } - case descend: - if start != nil { - index, found = n.items.find(start) - if !found { - index = index - 1 - } - } else { - index = len(n.items) - 1 - } - for i := index; i >= 0; i-- { - if start != nil && !n.items[i].Less(start) { - if !includeStart || hit || start.Less(n.items[i]) { - continue - } - } - if len(n.children) > 0 { - if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok { - return hit, false - } - } - if stop != nil && !stop.Less(n.items[i]) { - return hit, false // continue - } - hit = true - if !iter(n.items[i]) { - return hit, false - } - } - if len(n.children) > 0 { - if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok { - return hit, false - } - } - } - return hit, true -} - -// Used for testing/debugging purposes. -func (n *node) print(w io.Writer, level int) { - fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items) - for _, c := range n.children { - c.print(w, level+1) - } -} - -// BTree is an implementation of a B-Tree. -// -// BTree stores Item instances in an ordered structure, allowing easy insertion, -// removal, and iteration. -// -// Write operations are not safe for concurrent mutation by multiple -// goroutines, but Read operations are. -type BTree struct { - degree int - length int - root *node - cow *copyOnWriteContext -} - -// copyOnWriteContext pointers determine node ownership... a tree with a write -// context equivalent to a node's write context is allowed to modify that node. -// A tree whose write context does not match a node's is not allowed to modify -// it, and must create a new, writable copy (IE: it's a Clone). -// -// When doing any write operation, we maintain the invariant that the current -// node's context is equal to the context of the tree that requested the write. -// We do this by, before we descend into any node, creating a copy with the -// correct context if the contexts don't match. -// -// Since the node we're currently visiting on any write has the requesting -// tree's context, that node is modifiable in place. Children of that node may -// not share context, but before we descend into them, we'll make a mutable -// copy. -type copyOnWriteContext struct { - freelist *FreeList -} - -// Clone clones the btree, lazily. Clone should not be called concurrently, -// but the original tree (t) and the new tree (t2) can be used concurrently -// once the Clone call completes. -// -// The internal tree structure of b is marked read-only and shared between t and -// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes -// whenever one of b's original nodes would have been modified. Read operations -// should have no performance degredation. Write operations for both t and t2 -// will initially experience minor slow-downs caused by additional allocs and -// copies due to the aforementioned copy-on-write logic, but should converge to -// the original performance characteristics of the original tree. -func (t *BTree) Clone() (t2 *BTree) { - // Create two entirely new copy-on-write contexts. - // This operation effectively creates three trees: - // the original, shared nodes (old b.cow) - // the new b.cow nodes - // the new out.cow nodes - cow1, cow2 := *t.cow, *t.cow - out := *t - t.cow = &cow1 - out.cow = &cow2 - return &out -} - -// maxItems returns the max number of items to allow per node. -func (t *BTree) maxItems() int { - return t.degree*2 - 1 -} - -// minItems returns the min number of items to allow per node (ignored for the -// root node). -func (t *BTree) minItems() int { - return t.degree - 1 -} - -func (c *copyOnWriteContext) newNode() (n *node) { - n = c.freelist.newNode() - n.cow = c - return -} - -type freeType int - -const ( - ftFreelistFull freeType = iota // node was freed (available for GC, not stored in freelist) - ftStored // node was stored in the freelist for later use - ftNotOwned // node was ignored by COW, since it's owned by another one -) - -// freeNode frees a node within a given COW context, if it's owned by that -// context. It returns what happened to the node (see freeType const -// documentation). -func (c *copyOnWriteContext) freeNode(n *node) freeType { - if n.cow == c { - // clear to allow GC - n.items.truncate(0) - n.children.truncate(0) - n.cow = nil - if c.freelist.freeNode(n) { - return ftStored - } else { - return ftFreelistFull - } - } else { - return ftNotOwned - } -} - -// ReplaceOrInsert adds the given item to the tree. If an item in the tree -// already equals the given one, it is removed from the tree and returned. -// Otherwise, nil is returned. -// -// nil cannot be added to the tree (will panic). -func (t *BTree) ReplaceOrInsert(item Item) Item { - if item == nil { - panic("nil item being added to BTree") - } - if t.root == nil { - t.root = t.cow.newNode() - t.root.items = append(t.root.items, item) - t.length++ - return nil - } else { - t.root = t.root.mutableFor(t.cow) - if len(t.root.items) >= t.maxItems() { - item2, second := t.root.split(t.maxItems() / 2) - oldroot := t.root - t.root = t.cow.newNode() - t.root.items = append(t.root.items, item2) - t.root.children = append(t.root.children, oldroot, second) - } - } - out := t.root.insert(item, t.maxItems()) - if out == nil { - t.length++ - } - return out -} - -// Delete removes an item equal to the passed in item from the tree, returning -// it. If no such item exists, returns nil. -func (t *BTree) Delete(item Item) Item { - return t.deleteItem(item, removeItem) -} - -// DeleteMin removes the smallest item in the tree and returns it. -// If no such item exists, returns nil. -func (t *BTree) DeleteMin() Item { - return t.deleteItem(nil, removeMin) -} - -// DeleteMax removes the largest item in the tree and returns it. -// If no such item exists, returns nil. -func (t *BTree) DeleteMax() Item { - return t.deleteItem(nil, removeMax) -} - -func (t *BTree) deleteItem(item Item, typ toRemove) Item { - if t.root == nil || len(t.root.items) == 0 { - return nil - } - t.root = t.root.mutableFor(t.cow) - out := t.root.remove(item, t.minItems(), typ) - if len(t.root.items) == 0 && len(t.root.children) > 0 { - oldroot := t.root - t.root = t.root.children[0] - t.cow.freeNode(oldroot) - } - if out != nil { - t.length-- - } - return out -} - -// AscendRange calls the iterator for every value in the tree within the range -// [greaterOrEqual, lessThan), until iterator returns false. -func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(ascend, greaterOrEqual, lessThan, true, false, iterator) -} - -// AscendLessThan calls the iterator for every value in the tree within the range -// [first, pivot), until iterator returns false. -func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(ascend, nil, pivot, false, false, iterator) -} - -// AscendGreaterOrEqual calls the iterator for every value in the tree within -// the range [pivot, last], until iterator returns false. -func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(ascend, pivot, nil, true, false, iterator) -} - -// Ascend calls the iterator for every value in the tree within the range -// [first, last], until iterator returns false. -func (t *BTree) Ascend(iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(ascend, nil, nil, false, false, iterator) -} - -// DescendRange calls the iterator for every value in the tree within the range -// [lessOrEqual, greaterThan), until iterator returns false. -func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(descend, lessOrEqual, greaterThan, true, false, iterator) -} - -// DescendLessOrEqual calls the iterator for every value in the tree within the range -// [pivot, first], until iterator returns false. -func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(descend, pivot, nil, true, false, iterator) -} - -// DescendGreaterThan calls the iterator for every value in the tree within -// the range (pivot, last], until iterator returns false. -func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(descend, nil, pivot, false, false, iterator) -} - -// Descend calls the iterator for every value in the tree within the range -// [last, first], until iterator returns false. -func (t *BTree) Descend(iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(descend, nil, nil, false, false, iterator) -} - -// Get looks for the key item in the tree, returning it. It returns nil if -// unable to find that item. -func (t *BTree) Get(key Item) Item { - if t.root == nil { - return nil - } - return t.root.get(key) -} - -// Min returns the smallest item in the tree, or nil if the tree is empty. -func (t *BTree) Min() Item { - return min(t.root) -} - -// Max returns the largest item in the tree, or nil if the tree is empty. -func (t *BTree) Max() Item { - return max(t.root) -} - -// Has returns true if the given key is in the tree. -func (t *BTree) Has(key Item) bool { - return t.Get(key) != nil -} - -// Len returns the number of items currently in the tree. -func (t *BTree) Len() int { - return t.length -} - -// Clear removes all items from the btree. If addNodesToFreelist is true, -// t's nodes are added to its freelist as part of this call, until the freelist -// is full. Otherwise, the root node is simply dereferenced and the subtree -// left to Go's normal GC processes. -// -// This can be much faster -// than calling Delete on all elements, because that requires finding/removing -// each element in the tree and updating the tree accordingly. It also is -// somewhat faster than creating a new tree to replace the old one, because -// nodes from the old tree are reclaimed into the freelist for use by the new -// one, instead of being lost to the garbage collector. -// -// This call takes: -// O(1): when addNodesToFreelist is false, this is a single operation. -// O(1): when the freelist is already full, it breaks out immediately -// O(freelist size): when the freelist is empty and the nodes are all owned -// by this tree, nodes are added to the freelist until full. -// O(tree size): when all nodes are owned by another tree, all nodes are -// iterated over looking for nodes to add to the freelist, and due to -// ownership, none are. -func (t *BTree) Clear(addNodesToFreelist bool) { - if t.root != nil && addNodesToFreelist { - t.root.reset(t.cow) - } - t.root, t.length = nil, 0 -} - -// reset returns a subtree to the freelist. It breaks out immediately if the -// freelist is full, since the only benefit of iterating is to fill that -// freelist up. Returns true if parent reset call should continue. -func (n *node) reset(c *copyOnWriteContext) bool { - for _, child := range n.children { - if !child.reset(c) { - return false - } - } - return c.freeNode(n) != ftFreelistFull -} - -// Int implements the Item interface for integers. -type Int int - -// Less returns true if int(a) < int(b). -func (a Int) Less(b Item) bool { - return a < b.(Int) -} diff --git a/ui/wasm/vendor/github.com/google/go-cmp/LICENSE b/ui/wasm/vendor/github.com/google/go-cmp/LICENSE deleted file mode 100644 index 32017f8fa1d..00000000000 --- a/ui/wasm/vendor/github.com/google/go-cmp/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2017 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/ui/wasm/vendor/github.com/google/go-cmp/cmp/compare.go b/ui/wasm/vendor/github.com/google/go-cmp/cmp/compare.go deleted file mode 100644 index 86d0903b8b5..00000000000 --- a/ui/wasm/vendor/github.com/google/go-cmp/cmp/compare.go +++ /dev/null @@ -1,682 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package cmp determines equality of values. -// -// This package is intended to be a more powerful and safer alternative to -// reflect.DeepEqual for comparing whether two values are semantically equal. -// It is intended to only be used in tests, as performance is not a goal and -// it may panic if it cannot compare the values. Its propensity towards -// panicking means that its unsuitable for production environments where a -// spurious panic may be fatal. -// -// The primary features of cmp are: -// -// • When the default behavior of equality does not suit the needs of the test, -// custom equality functions can override the equality operation. -// For example, an equality function may report floats as equal so long as they -// are within some tolerance of each other. -// -// • Types that have an Equal method may use that method to determine equality. -// This allows package authors to determine the equality operation for the types -// that they define. -// -// • If no custom equality functions are used and no Equal method is defined, -// equality is determined by recursively comparing the primitive kinds on both -// values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, unexported -// fields are not compared by default; they result in panics unless suppressed -// by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly -// compared using the Exporter option. -package cmp - -import ( - "fmt" - "reflect" - "strings" - - "github.com/google/go-cmp/cmp/internal/diff" - "github.com/google/go-cmp/cmp/internal/flags" - "github.com/google/go-cmp/cmp/internal/function" - "github.com/google/go-cmp/cmp/internal/value" -) - -// Equal reports whether x and y are equal by recursively applying the -// following rules in the given order to x and y and all of their sub-values: -// -// • Let S be the set of all Ignore, Transformer, and Comparer options that -// remain after applying all path filters, value filters, and type filters. -// If at least one Ignore exists in S, then the comparison is ignored. -// If the number of Transformer and Comparer options in S is greater than one, -// then Equal panics because it is ambiguous which option to use. -// If S contains a single Transformer, then use that to transform the current -// values and recursively call Equal on the output values. -// If S contains a single Comparer, then use that to compare the current values. -// Otherwise, evaluation proceeds to the next rule. -// -// • If the values have an Equal method of the form "(T) Equal(T) bool" or -// "(T) Equal(I) bool" where T is assignable to I, then use the result of -// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and -// evaluation proceeds to the next rule. -// -// • Lastly, try to compare x and y based on their basic kinds. -// Simple kinds like booleans, integers, floats, complex numbers, strings, and -// channels are compared using the equivalent of the == operator in Go. -// Functions are only equal if they are both nil, otherwise they are unequal. -// -// Structs are equal if recursively calling Equal on all fields report equal. -// If a struct contains unexported fields, Equal panics unless an Ignore option -// (e.g., cmpopts.IgnoreUnexported) ignores that field or the Exporter option -// explicitly permits comparing the unexported field. -// -// Slices are equal if they are both nil or both non-nil, where recursively -// calling Equal on all non-ignored slice or array elements report equal. -// Empty non-nil slices and nil slices are not equal; to equate empty slices, -// consider using cmpopts.EquateEmpty. -// -// Maps are equal if they are both nil or both non-nil, where recursively -// calling Equal on all non-ignored map entries report equal. -// Map keys are equal according to the == operator. -// To use custom comparisons for map keys, consider using cmpopts.SortMaps. -// Empty non-nil maps and nil maps are not equal; to equate empty maps, -// consider using cmpopts.EquateEmpty. -// -// Pointers and interfaces are equal if they are both nil or both non-nil, -// where they have the same underlying concrete type and recursively -// calling Equal on the underlying values reports equal. -// -// Before recursing into a pointer, slice element, or map, the current path -// is checked to detect whether the address has already been visited. -// If there is a cycle, then the pointed at values are considered equal -// only if both addresses were previously visited in the same path step. -func Equal(x, y interface{}, opts ...Option) bool { - s := newState(opts) - s.compareAny(rootStep(x, y)) - return s.result.Equal() -} - -// Diff returns a human-readable report of the differences between two values: -// y - x. It returns an empty string if and only if Equal returns true for the -// same input values and options. -// -// The output is displayed as a literal in pseudo-Go syntax. -// At the start of each line, a "-" prefix indicates an element removed from x, -// a "+" prefix to indicates an element added from y, and the lack of a prefix -// indicates an element common to both x and y. If possible, the output -// uses fmt.Stringer.String or error.Error methods to produce more humanly -// readable outputs. In such cases, the string is prefixed with either an -// 's' or 'e' character, respectively, to indicate that the method was called. -// -// Do not depend on this output being stable. If you need the ability to -// programmatically interpret the difference, consider using a custom Reporter. -func Diff(x, y interface{}, opts ...Option) string { - s := newState(opts) - - // Optimization: If there are no other reporters, we can optimize for the - // common case where the result is equal (and thus no reported difference). - // This avoids the expensive construction of a difference tree. - if len(s.reporters) == 0 { - s.compareAny(rootStep(x, y)) - if s.result.Equal() { - return "" - } - s.result = diff.Result{} // Reset results - } - - r := new(defaultReporter) - s.reporters = append(s.reporters, reporter{r}) - s.compareAny(rootStep(x, y)) - d := r.String() - if (d == "") != s.result.Equal() { - panic("inconsistent difference and equality results") - } - return d -} - -// rootStep constructs the first path step. If x and y have differing types, -// then they are stored within an empty interface type. -func rootStep(x, y interface{}) PathStep { - vx := reflect.ValueOf(x) - vy := reflect.ValueOf(y) - - // If the inputs are different types, auto-wrap them in an empty interface - // so that they have the same parent type. - var t reflect.Type - if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() { - t = reflect.TypeOf((*interface{})(nil)).Elem() - if vx.IsValid() { - vvx := reflect.New(t).Elem() - vvx.Set(vx) - vx = vvx - } - if vy.IsValid() { - vvy := reflect.New(t).Elem() - vvy.Set(vy) - vy = vvy - } - } else { - t = vx.Type() - } - - return &pathStep{t, vx, vy} -} - -type state struct { - // These fields represent the "comparison state". - // Calling statelessCompare must not result in observable changes to these. - result diff.Result // The current result of comparison - curPath Path // The current path in the value tree - curPtrs pointerPath // The current set of visited pointers - reporters []reporter // Optional reporters - - // recChecker checks for infinite cycles applying the same set of - // transformers upon the output of itself. - recChecker recChecker - - // dynChecker triggers pseudo-random checks for option correctness. - // It is safe for statelessCompare to mutate this value. - dynChecker dynChecker - - // These fields, once set by processOption, will not change. - exporters []exporter // List of exporters for structs with unexported fields - opts Options // List of all fundamental and filter options -} - -func newState(opts []Option) *state { - // Always ensure a validator option exists to validate the inputs. - s := &state{opts: Options{validator{}}} - s.curPtrs.Init() - s.processOption(Options(opts)) - return s -} - -func (s *state) processOption(opt Option) { - switch opt := opt.(type) { - case nil: - case Options: - for _, o := range opt { - s.processOption(o) - } - case coreOption: - type filtered interface { - isFiltered() bool - } - if fopt, ok := opt.(filtered); ok && !fopt.isFiltered() { - panic(fmt.Sprintf("cannot use an unfiltered option: %v", opt)) - } - s.opts = append(s.opts, opt) - case exporter: - s.exporters = append(s.exporters, opt) - case reporter: - s.reporters = append(s.reporters, opt) - default: - panic(fmt.Sprintf("unknown option %T", opt)) - } -} - -// statelessCompare compares two values and returns the result. -// This function is stateless in that it does not alter the current result, -// or output to any registered reporters. -func (s *state) statelessCompare(step PathStep) diff.Result { - // We do not save and restore curPath and curPtrs because all of the - // compareX methods should properly push and pop from them. - // It is an implementation bug if the contents of the paths differ from - // when calling this function to when returning from it. - - oldResult, oldReporters := s.result, s.reporters - s.result = diff.Result{} // Reset result - s.reporters = nil // Remove reporters to avoid spurious printouts - s.compareAny(step) - res := s.result - s.result, s.reporters = oldResult, oldReporters - return res -} - -func (s *state) compareAny(step PathStep) { - // Update the path stack. - s.curPath.push(step) - defer s.curPath.pop() - for _, r := range s.reporters { - r.PushStep(step) - defer r.PopStep() - } - s.recChecker.Check(s.curPath) - - // Cycle-detection for slice elements (see NOTE in compareSlice). - t := step.Type() - vx, vy := step.Values() - if si, ok := step.(SliceIndex); ok && si.isSlice && vx.IsValid() && vy.IsValid() { - px, py := vx.Addr(), vy.Addr() - if eq, visited := s.curPtrs.Push(px, py); visited { - s.report(eq, reportByCycle) - return - } - defer s.curPtrs.Pop(px, py) - } - - // Rule 1: Check whether an option applies on this node in the value tree. - if s.tryOptions(t, vx, vy) { - return - } - - // Rule 2: Check whether the type has a valid Equal method. - if s.tryMethod(t, vx, vy) { - return - } - - // Rule 3: Compare based on the underlying kind. - switch t.Kind() { - case reflect.Bool: - s.report(vx.Bool() == vy.Bool(), 0) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - s.report(vx.Int() == vy.Int(), 0) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - s.report(vx.Uint() == vy.Uint(), 0) - case reflect.Float32, reflect.Float64: - s.report(vx.Float() == vy.Float(), 0) - case reflect.Complex64, reflect.Complex128: - s.report(vx.Complex() == vy.Complex(), 0) - case reflect.String: - s.report(vx.String() == vy.String(), 0) - case reflect.Chan, reflect.UnsafePointer: - s.report(vx.Pointer() == vy.Pointer(), 0) - case reflect.Func: - s.report(vx.IsNil() && vy.IsNil(), 0) - case reflect.Struct: - s.compareStruct(t, vx, vy) - case reflect.Slice, reflect.Array: - s.compareSlice(t, vx, vy) - case reflect.Map: - s.compareMap(t, vx, vy) - case reflect.Ptr: - s.comparePtr(t, vx, vy) - case reflect.Interface: - s.compareInterface(t, vx, vy) - default: - panic(fmt.Sprintf("%v kind not handled", t.Kind())) - } -} - -func (s *state) tryOptions(t reflect.Type, vx, vy reflect.Value) bool { - // Evaluate all filters and apply the remaining options. - if opt := s.opts.filter(s, t, vx, vy); opt != nil { - opt.apply(s, vx, vy) - return true - } - return false -} - -func (s *state) tryMethod(t reflect.Type, vx, vy reflect.Value) bool { - // Check if this type even has an Equal method. - m, ok := t.MethodByName("Equal") - if !ok || !function.IsType(m.Type, function.EqualAssignable) { - return false - } - - eq := s.callTTBFunc(m.Func, vx, vy) - s.report(eq, reportByMethod) - return true -} - -func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value { - v = sanitizeValue(v, f.Type().In(0)) - if !s.dynChecker.Next() { - return f.Call([]reflect.Value{v})[0] - } - - // Run the function twice and ensure that we get the same results back. - // We run in goroutines so that the race detector (if enabled) can detect - // unsafe mutations to the input. - c := make(chan reflect.Value) - go detectRaces(c, f, v) - got := <-c - want := f.Call([]reflect.Value{v})[0] - if step.vx, step.vy = got, want; !s.statelessCompare(step).Equal() { - // To avoid false-positives with non-reflexive equality operations, - // we sanity check whether a value is equal to itself. - if step.vx, step.vy = want, want; !s.statelessCompare(step).Equal() { - return want - } - panic(fmt.Sprintf("non-deterministic function detected: %s", function.NameOf(f))) - } - return want -} - -func (s *state) callTTBFunc(f, x, y reflect.Value) bool { - x = sanitizeValue(x, f.Type().In(0)) - y = sanitizeValue(y, f.Type().In(1)) - if !s.dynChecker.Next() { - return f.Call([]reflect.Value{x, y})[0].Bool() - } - - // Swapping the input arguments is sufficient to check that - // f is symmetric and deterministic. - // We run in goroutines so that the race detector (if enabled) can detect - // unsafe mutations to the input. - c := make(chan reflect.Value) - go detectRaces(c, f, y, x) - got := <-c - want := f.Call([]reflect.Value{x, y})[0].Bool() - if !got.IsValid() || got.Bool() != want { - panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", function.NameOf(f))) - } - return want -} - -func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) { - var ret reflect.Value - defer func() { - recover() // Ignore panics, let the other call to f panic instead - c <- ret - }() - ret = f.Call(vs)[0] -} - -// sanitizeValue converts nil interfaces of type T to those of type R, -// assuming that T is assignable to R. -// Otherwise, it returns the input value as is. -func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value { - // TODO(≥go1.10): Workaround for reflect bug (https://golang.org/issue/22143). - if !flags.AtLeastGo110 { - if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t { - return reflect.New(t).Elem() - } - } - return v -} - -func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) { - var addr bool - var vax, vay reflect.Value // Addressable versions of vx and vy - - var mayForce, mayForceInit bool - step := StructField{&structField{}} - for i := 0; i < t.NumField(); i++ { - step.typ = t.Field(i).Type - step.vx = vx.Field(i) - step.vy = vy.Field(i) - step.name = t.Field(i).Name - step.idx = i - step.unexported = !isExported(step.name) - if step.unexported { - if step.name == "_" { - continue - } - // Defer checking of unexported fields until later to give an - // Ignore a chance to ignore the field. - if !vax.IsValid() || !vay.IsValid() { - // For retrieveUnexportedField to work, the parent struct must - // be addressable. Create a new copy of the values if - // necessary to make them addressable. - addr = vx.CanAddr() || vy.CanAddr() - vax = makeAddressable(vx) - vay = makeAddressable(vy) - } - if !mayForceInit { - for _, xf := range s.exporters { - mayForce = mayForce || xf(t) - } - mayForceInit = true - } - step.mayForce = mayForce - step.paddr = addr - step.pvx = vax - step.pvy = vay - step.field = t.Field(i) - } - s.compareAny(step) - } -} - -func (s *state) compareSlice(t reflect.Type, vx, vy reflect.Value) { - isSlice := t.Kind() == reflect.Slice - if isSlice && (vx.IsNil() || vy.IsNil()) { - s.report(vx.IsNil() && vy.IsNil(), 0) - return - } - - // NOTE: It is incorrect to call curPtrs.Push on the slice header pointer - // since slices represents a list of pointers, rather than a single pointer. - // The pointer checking logic must be handled on a per-element basis - // in compareAny. - // - // A slice header (see reflect.SliceHeader) in Go is a tuple of a starting - // pointer P, a length N, and a capacity C. Supposing each slice element has - // a memory size of M, then the slice is equivalent to the list of pointers: - // [P+i*M for i in range(N)] - // - // For example, v[:0] and v[:1] are slices with the same starting pointer, - // but they are clearly different values. Using the slice pointer alone - // violates the assumption that equal pointers implies equal values. - - step := SliceIndex{&sliceIndex{pathStep: pathStep{typ: t.Elem()}, isSlice: isSlice}} - withIndexes := func(ix, iy int) SliceIndex { - if ix >= 0 { - step.vx, step.xkey = vx.Index(ix), ix - } else { - step.vx, step.xkey = reflect.Value{}, -1 - } - if iy >= 0 { - step.vy, step.ykey = vy.Index(iy), iy - } else { - step.vy, step.ykey = reflect.Value{}, -1 - } - return step - } - - // Ignore options are able to ignore missing elements in a slice. - // However, detecting these reliably requires an optimal differencing - // algorithm, for which diff.Difference is not. - // - // Instead, we first iterate through both slices to detect which elements - // would be ignored if standing alone. The index of non-discarded elements - // are stored in a separate slice, which diffing is then performed on. - var indexesX, indexesY []int - var ignoredX, ignoredY []bool - for ix := 0; ix < vx.Len(); ix++ { - ignored := s.statelessCompare(withIndexes(ix, -1)).NumDiff == 0 - if !ignored { - indexesX = append(indexesX, ix) - } - ignoredX = append(ignoredX, ignored) - } - for iy := 0; iy < vy.Len(); iy++ { - ignored := s.statelessCompare(withIndexes(-1, iy)).NumDiff == 0 - if !ignored { - indexesY = append(indexesY, iy) - } - ignoredY = append(ignoredY, ignored) - } - - // Compute an edit-script for slices vx and vy (excluding ignored elements). - edits := diff.Difference(len(indexesX), len(indexesY), func(ix, iy int) diff.Result { - return s.statelessCompare(withIndexes(indexesX[ix], indexesY[iy])) - }) - - // Replay the ignore-scripts and the edit-script. - var ix, iy int - for ix < vx.Len() || iy < vy.Len() { - var e diff.EditType - switch { - case ix < len(ignoredX) && ignoredX[ix]: - e = diff.UniqueX - case iy < len(ignoredY) && ignoredY[iy]: - e = diff.UniqueY - default: - e, edits = edits[0], edits[1:] - } - switch e { - case diff.UniqueX: - s.compareAny(withIndexes(ix, -1)) - ix++ - case diff.UniqueY: - s.compareAny(withIndexes(-1, iy)) - iy++ - default: - s.compareAny(withIndexes(ix, iy)) - ix++ - iy++ - } - } -} - -func (s *state) compareMap(t reflect.Type, vx, vy reflect.Value) { - if vx.IsNil() || vy.IsNil() { - s.report(vx.IsNil() && vy.IsNil(), 0) - return - } - - // Cycle-detection for maps. - if eq, visited := s.curPtrs.Push(vx, vy); visited { - s.report(eq, reportByCycle) - return - } - defer s.curPtrs.Pop(vx, vy) - - // We combine and sort the two map keys so that we can perform the - // comparisons in a deterministic order. - step := MapIndex{&mapIndex{pathStep: pathStep{typ: t.Elem()}}} - for _, k := range value.SortKeys(append(vx.MapKeys(), vy.MapKeys()...)) { - step.vx = vx.MapIndex(k) - step.vy = vy.MapIndex(k) - step.key = k - if !step.vx.IsValid() && !step.vy.IsValid() { - // It is possible for both vx and vy to be invalid if the - // key contained a NaN value in it. - // - // Even with the ability to retrieve NaN keys in Go 1.12, - // there still isn't a sensible way to compare the values since - // a NaN key may map to multiple unordered values. - // The most reasonable way to compare NaNs would be to compare the - // set of values. However, this is impossible to do efficiently - // since set equality is provably an O(n^2) operation given only - // an Equal function. If we had a Less function or Hash function, - // this could be done in O(n*log(n)) or O(n), respectively. - // - // Rather than adding complex logic to deal with NaNs, make it - // the user's responsibility to compare such obscure maps. - const help = "consider providing a Comparer to compare the map" - panic(fmt.Sprintf("%#v has map key with NaNs\n%s", s.curPath, help)) - } - s.compareAny(step) - } -} - -func (s *state) comparePtr(t reflect.Type, vx, vy reflect.Value) { - if vx.IsNil() || vy.IsNil() { - s.report(vx.IsNil() && vy.IsNil(), 0) - return - } - - // Cycle-detection for pointers. - if eq, visited := s.curPtrs.Push(vx, vy); visited { - s.report(eq, reportByCycle) - return - } - defer s.curPtrs.Pop(vx, vy) - - vx, vy = vx.Elem(), vy.Elem() - s.compareAny(Indirect{&indirect{pathStep{t.Elem(), vx, vy}}}) -} - -func (s *state) compareInterface(t reflect.Type, vx, vy reflect.Value) { - if vx.IsNil() || vy.IsNil() { - s.report(vx.IsNil() && vy.IsNil(), 0) - return - } - vx, vy = vx.Elem(), vy.Elem() - if vx.Type() != vy.Type() { - s.report(false, 0) - return - } - s.compareAny(TypeAssertion{&typeAssertion{pathStep{vx.Type(), vx, vy}}}) -} - -func (s *state) report(eq bool, rf resultFlags) { - if rf&reportByIgnore == 0 { - if eq { - s.result.NumSame++ - rf |= reportEqual - } else { - s.result.NumDiff++ - rf |= reportUnequal - } - } - for _, r := range s.reporters { - r.Report(Result{flags: rf}) - } -} - -// recChecker tracks the state needed to periodically perform checks that -// user provided transformers are not stuck in an infinitely recursive cycle. -type recChecker struct{ next int } - -// Check scans the Path for any recursive transformers and panics when any -// recursive transformers are detected. Note that the presence of a -// recursive Transformer does not necessarily imply an infinite cycle. -// As such, this check only activates after some minimal number of path steps. -func (rc *recChecker) Check(p Path) { - const minLen = 1 << 16 - if rc.next == 0 { - rc.next = minLen - } - if len(p) < rc.next { - return - } - rc.next <<= 1 - - // Check whether the same transformer has appeared at least twice. - var ss []string - m := map[Option]int{} - for _, ps := range p { - if t, ok := ps.(Transform); ok { - t := t.Option() - if m[t] == 1 { // Transformer was used exactly once before - tf := t.(*transformer).fnc.Type() - ss = append(ss, fmt.Sprintf("%v: %v => %v", t, tf.In(0), tf.Out(0))) - } - m[t]++ - } - } - if len(ss) > 0 { - const warning = "recursive set of Transformers detected" - const help = "consider using cmpopts.AcyclicTransformer" - set := strings.Join(ss, "\n\t") - panic(fmt.Sprintf("%s:\n\t%s\n%s", warning, set, help)) - } -} - -// dynChecker tracks the state needed to periodically perform checks that -// user provided functions are symmetric and deterministic. -// The zero value is safe for immediate use. -type dynChecker struct{ curr, next int } - -// Next increments the state and reports whether a check should be performed. -// -// Checks occur every Nth function call, where N is a triangular number: -// 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ... -// See https://en.wikipedia.org/wiki/Triangular_number -// -// This sequence ensures that the cost of checks drops significantly as -// the number of functions calls grows larger. -func (dc *dynChecker) Next() bool { - ok := dc.curr == dc.next - if ok { - dc.curr = 0 - dc.next++ - } - dc.curr++ - return ok -} - -// makeAddressable returns a value that is always addressable. -// It returns the input verbatim if it is already addressable, -// otherwise it creates a new value and returns an addressable copy. -func makeAddressable(v reflect.Value) reflect.Value { - if v.CanAddr() { - return v - } - vc := reflect.New(v.Type()).Elem() - vc.Set(v) - return vc -} diff --git a/ui/wasm/vendor/github.com/google/go-cmp/cmp/export_panic.go b/ui/wasm/vendor/github.com/google/go-cmp/cmp/export_panic.go deleted file mode 100644 index 5ff0b4218c6..00000000000 --- a/ui/wasm/vendor/github.com/google/go-cmp/cmp/export_panic.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build purego - -package cmp - -import "reflect" - -const supportExporters = false - -func retrieveUnexportedField(reflect.Value, reflect.StructField, bool) reflect.Value { - panic("no support for forcibly accessing unexported fields") -} diff --git a/ui/wasm/vendor/github.com/google/go-cmp/cmp/export_unsafe.go b/ui/wasm/vendor/github.com/google/go-cmp/cmp/export_unsafe.go deleted file mode 100644 index 21eb54858e0..00000000000 --- a/ui/wasm/vendor/github.com/google/go-cmp/cmp/export_unsafe.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !purego - -package cmp - -import ( - "reflect" - "unsafe" -) - -const supportExporters = true - -// retrieveUnexportedField uses unsafe to forcibly retrieve any field from -// a struct such that the value has read-write permissions. -// -// The parent struct, v, must be addressable, while f must be a StructField -// describing the field to retrieve. If addr is false, -// then the returned value will be shallowed copied to be non-addressable. -func retrieveUnexportedField(v reflect.Value, f reflect.StructField, addr bool) reflect.Value { - ve := reflect.NewAt(f.Type, unsafe.Pointer(uintptr(unsafe.Pointer(v.UnsafeAddr()))+f.Offset)).Elem() - if !addr { - // A field is addressable if and only if the struct is addressable. - // If the original parent value was not addressable, shallow copy the - // value to make it non-addressable to avoid leaking an implementation - // detail of how forcibly exporting a field works. - if ve.Kind() == reflect.Interface && ve.IsNil() { - return reflect.Zero(f.Type) - } - return reflect.ValueOf(ve.Interface()).Convert(f.Type) - } - return ve -} diff --git a/ui/wasm/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go b/ui/wasm/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go deleted file mode 100644 index 1daaaacc5ee..00000000000 --- a/ui/wasm/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !cmp_debug - -package diff - -var debug debugger - -type debugger struct{} - -func (debugger) Begin(_, _ int, f EqualFunc, _, _ *EditScript) EqualFunc { - return f -} -func (debugger) Update() {} -func (debugger) Finish() {} diff --git a/ui/wasm/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go b/ui/wasm/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go deleted file mode 100644 index 4b91dbcacae..00000000000 --- a/ui/wasm/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build cmp_debug - -package diff - -import ( - "fmt" - "strings" - "sync" - "time" -) - -// The algorithm can be seen running in real-time by enabling debugging: -// go test -tags=cmp_debug -v -// -// Example output: -// === RUN TestDifference/#34 -// ┌───────────────────────────────┐ -// │ \ · · · · · · · · · · · · · · │ -// │ · # · · · · · · · · · · · · · │ -// │ · \ · · · · · · · · · · · · · │ -// │ · · \ · · · · · · · · · · · · │ -// │ · · · X # · · · · · · · · · · │ -// │ · · · # \ · · · · · · · · · · │ -// │ · · · · · # # · · · · · · · · │ -// │ · · · · · # \ · · · · · · · · │ -// │ · · · · · · · \ · · · · · · · │ -// │ · · · · · · · · \ · · · · · · │ -// │ · · · · · · · · · \ · · · · · │ -// │ · · · · · · · · · · \ · · # · │ -// │ · · · · · · · · · · · \ # # · │ -// │ · · · · · · · · · · · # # # · │ -// │ · · · · · · · · · · # # # # · │ -// │ · · · · · · · · · # # # # # · │ -// │ · · · · · · · · · · · · · · \ │ -// └───────────────────────────────┘ -// [.Y..M.XY......YXYXY.|] -// -// The grid represents the edit-graph where the horizontal axis represents -// list X and the vertical axis represents list Y. The start of the two lists -// is the top-left, while the ends are the bottom-right. The '·' represents -// an unexplored node in the graph. The '\' indicates that the two symbols -// from list X and Y are equal. The 'X' indicates that two symbols are similar -// (but not exactly equal) to each other. The '#' indicates that the two symbols -// are different (and not similar). The algorithm traverses this graph trying to -// make the paths starting in the top-left and the bottom-right connect. -// -// The series of '.', 'X', 'Y', and 'M' characters at the bottom represents -// the currently established path from the forward and reverse searches, -// separated by a '|' character. - -const ( - updateDelay = 100 * time.Millisecond - finishDelay = 500 * time.Millisecond - ansiTerminal = true // ANSI escape codes used to move terminal cursor -) - -var debug debugger - -type debugger struct { - sync.Mutex - p1, p2 EditScript - fwdPath, revPath *EditScript - grid []byte - lines int -} - -func (dbg *debugger) Begin(nx, ny int, f EqualFunc, p1, p2 *EditScript) EqualFunc { - dbg.Lock() - dbg.fwdPath, dbg.revPath = p1, p2 - top := "┌─" + strings.Repeat("──", nx) + "┐\n" - row := "│ " + strings.Repeat("· ", nx) + "│\n" - btm := "└─" + strings.Repeat("──", nx) + "┘\n" - dbg.grid = []byte(top + strings.Repeat(row, ny) + btm) - dbg.lines = strings.Count(dbg.String(), "\n") - fmt.Print(dbg) - - // Wrap the EqualFunc so that we can intercept each result. - return func(ix, iy int) (r Result) { - cell := dbg.grid[len(top)+iy*len(row):][len("│ ")+len("· ")*ix:][:len("·")] - for i := range cell { - cell[i] = 0 // Zero out the multiple bytes of UTF-8 middle-dot - } - switch r = f(ix, iy); { - case r.Equal(): - cell[0] = '\\' - case r.Similar(): - cell[0] = 'X' - default: - cell[0] = '#' - } - return - } -} - -func (dbg *debugger) Update() { - dbg.print(updateDelay) -} - -func (dbg *debugger) Finish() { - dbg.print(finishDelay) - dbg.Unlock() -} - -func (dbg *debugger) String() string { - dbg.p1, dbg.p2 = *dbg.fwdPath, dbg.p2[:0] - for i := len(*dbg.revPath) - 1; i >= 0; i-- { - dbg.p2 = append(dbg.p2, (*dbg.revPath)[i]) - } - return fmt.Sprintf("%s[%v|%v]\n\n", dbg.grid, dbg.p1, dbg.p2) -} - -func (dbg *debugger) print(d time.Duration) { - if ansiTerminal { - fmt.Printf("\x1b[%dA", dbg.lines) // Reset terminal cursor - } - fmt.Print(dbg) - time.Sleep(d) -} diff --git a/ui/wasm/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/ui/wasm/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go deleted file mode 100644 index bc196b16cfa..00000000000 --- a/ui/wasm/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go +++ /dev/null @@ -1,398 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package diff implements an algorithm for producing edit-scripts. -// The edit-script is a sequence of operations needed to transform one list -// of symbols into another (or vice-versa). The edits allowed are insertions, -// deletions, and modifications. The summation of all edits is called the -// Levenshtein distance as this problem is well-known in computer science. -// -// This package prioritizes performance over accuracy. That is, the run time -// is more important than obtaining a minimal Levenshtein distance. -package diff - -import ( - "math/rand" - "time" - - "github.com/google/go-cmp/cmp/internal/flags" -) - -// EditType represents a single operation within an edit-script. -type EditType uint8 - -const ( - // Identity indicates that a symbol pair is identical in both list X and Y. - Identity EditType = iota - // UniqueX indicates that a symbol only exists in X and not Y. - UniqueX - // UniqueY indicates that a symbol only exists in Y and not X. - UniqueY - // Modified indicates that a symbol pair is a modification of each other. - Modified -) - -// EditScript represents the series of differences between two lists. -type EditScript []EditType - -// String returns a human-readable string representing the edit-script where -// Identity, UniqueX, UniqueY, and Modified are represented by the -// '.', 'X', 'Y', and 'M' characters, respectively. -func (es EditScript) String() string { - b := make([]byte, len(es)) - for i, e := range es { - switch e { - case Identity: - b[i] = '.' - case UniqueX: - b[i] = 'X' - case UniqueY: - b[i] = 'Y' - case Modified: - b[i] = 'M' - default: - panic("invalid edit-type") - } - } - return string(b) -} - -// stats returns a histogram of the number of each type of edit operation. -func (es EditScript) stats() (s struct{ NI, NX, NY, NM int }) { - for _, e := range es { - switch e { - case Identity: - s.NI++ - case UniqueX: - s.NX++ - case UniqueY: - s.NY++ - case Modified: - s.NM++ - default: - panic("invalid edit-type") - } - } - return -} - -// Dist is the Levenshtein distance and is guaranteed to be 0 if and only if -// lists X and Y are equal. -func (es EditScript) Dist() int { return len(es) - es.stats().NI } - -// LenX is the length of the X list. -func (es EditScript) LenX() int { return len(es) - es.stats().NY } - -// LenY is the length of the Y list. -func (es EditScript) LenY() int { return len(es) - es.stats().NX } - -// EqualFunc reports whether the symbols at indexes ix and iy are equal. -// When called by Difference, the index is guaranteed to be within nx and ny. -type EqualFunc func(ix int, iy int) Result - -// Result is the result of comparison. -// NumSame is the number of sub-elements that are equal. -// NumDiff is the number of sub-elements that are not equal. -type Result struct{ NumSame, NumDiff int } - -// BoolResult returns a Result that is either Equal or not Equal. -func BoolResult(b bool) Result { - if b { - return Result{NumSame: 1} // Equal, Similar - } else { - return Result{NumDiff: 2} // Not Equal, not Similar - } -} - -// Equal indicates whether the symbols are equal. Two symbols are equal -// if and only if NumDiff == 0. If Equal, then they are also Similar. -func (r Result) Equal() bool { return r.NumDiff == 0 } - -// Similar indicates whether two symbols are similar and may be represented -// by using the Modified type. As a special case, we consider binary comparisons -// (i.e., those that return Result{1, 0} or Result{0, 1}) to be similar. -// -// The exact ratio of NumSame to NumDiff to determine similarity may change. -func (r Result) Similar() bool { - // Use NumSame+1 to offset NumSame so that binary comparisons are similar. - return r.NumSame+1 >= r.NumDiff -} - -var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0 - -// Difference reports whether two lists of lengths nx and ny are equal -// given the definition of equality provided as f. -// -// This function returns an edit-script, which is a sequence of operations -// needed to convert one list into the other. The following invariants for -// the edit-script are maintained: -// • eq == (es.Dist()==0) -// • nx == es.LenX() -// • ny == es.LenY() -// -// This algorithm is not guaranteed to be an optimal solution (i.e., one that -// produces an edit-script with a minimal Levenshtein distance). This algorithm -// favors performance over optimality. The exact output is not guaranteed to -// be stable and may change over time. -func Difference(nx, ny int, f EqualFunc) (es EditScript) { - // This algorithm is based on traversing what is known as an "edit-graph". - // See Figure 1 from "An O(ND) Difference Algorithm and Its Variations" - // by Eugene W. Myers. Since D can be as large as N itself, this is - // effectively O(N^2). Unlike the algorithm from that paper, we are not - // interested in the optimal path, but at least some "decent" path. - // - // For example, let X and Y be lists of symbols: - // X = [A B C A B B A] - // Y = [C B A B A C] - // - // The edit-graph can be drawn as the following: - // A B C A B B A - // ┌─────────────┐ - // C │_|_|\|_|_|_|_│ 0 - // B │_|\|_|_|\|\|_│ 1 - // A │\|_|_|\|_|_|\│ 2 - // B │_|\|_|_|\|\|_│ 3 - // A │\|_|_|\|_|_|\│ 4 - // C │ | |\| | | | │ 5 - // └─────────────┘ 6 - // 0 1 2 3 4 5 6 7 - // - // List X is written along the horizontal axis, while list Y is written - // along the vertical axis. At any point on this grid, if the symbol in - // list X matches the corresponding symbol in list Y, then a '\' is drawn. - // The goal of any minimal edit-script algorithm is to find a path from the - // top-left corner to the bottom-right corner, while traveling through the - // fewest horizontal or vertical edges. - // A horizontal edge is equivalent to inserting a symbol from list X. - // A vertical edge is equivalent to inserting a symbol from list Y. - // A diagonal edge is equivalent to a matching symbol between both X and Y. - - // Invariants: - // • 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx - // • 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny - // - // In general: - // • fwdFrontier.X < revFrontier.X - // • fwdFrontier.Y < revFrontier.Y - // Unless, it is time for the algorithm to terminate. - fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)} - revPath := path{-1, point{nx, ny}, make(EditScript, 0)} - fwdFrontier := fwdPath.point // Forward search frontier - revFrontier := revPath.point // Reverse search frontier - - // Search budget bounds the cost of searching for better paths. - // The longest sequence of non-matching symbols that can be tolerated is - // approximately the square-root of the search budget. - searchBudget := 4 * (nx + ny) // O(n) - - // Running the tests with the "cmp_debug" build tag prints a visualization - // of the algorithm running in real-time. This is educational for - // understanding how the algorithm works. See debug_enable.go. - f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es) - - // The algorithm below is a greedy, meet-in-the-middle algorithm for - // computing sub-optimal edit-scripts between two lists. - // - // The algorithm is approximately as follows: - // • Searching for differences switches back-and-forth between - // a search that starts at the beginning (the top-left corner), and - // a search that starts at the end (the bottom-right corner). The goal of - // the search is connect with the search from the opposite corner. - // • As we search, we build a path in a greedy manner, where the first - // match seen is added to the path (this is sub-optimal, but provides a - // decent result in practice). When matches are found, we try the next pair - // of symbols in the lists and follow all matches as far as possible. - // • When searching for matches, we search along a diagonal going through - // through the "frontier" point. If no matches are found, we advance the - // frontier towards the opposite corner. - // • This algorithm terminates when either the X coordinates or the - // Y coordinates of the forward and reverse frontier points ever intersect. - - // This algorithm is correct even if searching only in the forward direction - // or in the reverse direction. We do both because it is commonly observed - // that two lists commonly differ because elements were added to the front - // or end of the other list. - // - // Non-deterministically start with either the forward or reverse direction - // to introduce some deliberate instability so that we have the flexibility - // to change this algorithm in the future. - if flags.Deterministic || randBool { - goto forwardSearch - } else { - goto reverseSearch - } - -forwardSearch: - { - // Forward search from the beginning. - if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { - goto finishSearch - } - for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { - // Search in a diagonal pattern for a match. - z := zigzag(i) - p := point{fwdFrontier.X + z, fwdFrontier.Y - z} - switch { - case p.X >= revPath.X || p.Y < fwdPath.Y: - stop1 = true // Hit top-right corner - case p.Y >= revPath.Y || p.X < fwdPath.X: - stop2 = true // Hit bottom-left corner - case f(p.X, p.Y).Equal(): - // Match found, so connect the path to this point. - fwdPath.connect(p, f) - fwdPath.append(Identity) - // Follow sequence of matches as far as possible. - for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y { - if !f(fwdPath.X, fwdPath.Y).Equal() { - break - } - fwdPath.append(Identity) - } - fwdFrontier = fwdPath.point - stop1, stop2 = true, true - default: - searchBudget-- // Match not found - } - debug.Update() - } - // Advance the frontier towards reverse point. - if revPath.X-fwdFrontier.X >= revPath.Y-fwdFrontier.Y { - fwdFrontier.X++ - } else { - fwdFrontier.Y++ - } - goto reverseSearch - } - -reverseSearch: - { - // Reverse search from the end. - if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { - goto finishSearch - } - for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { - // Search in a diagonal pattern for a match. - z := zigzag(i) - p := point{revFrontier.X - z, revFrontier.Y + z} - switch { - case fwdPath.X >= p.X || revPath.Y < p.Y: - stop1 = true // Hit bottom-left corner - case fwdPath.Y >= p.Y || revPath.X < p.X: - stop2 = true // Hit top-right corner - case f(p.X-1, p.Y-1).Equal(): - // Match found, so connect the path to this point. - revPath.connect(p, f) - revPath.append(Identity) - // Follow sequence of matches as far as possible. - for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y { - if !f(revPath.X-1, revPath.Y-1).Equal() { - break - } - revPath.append(Identity) - } - revFrontier = revPath.point - stop1, stop2 = true, true - default: - searchBudget-- // Match not found - } - debug.Update() - } - // Advance the frontier towards forward point. - if revFrontier.X-fwdPath.X >= revFrontier.Y-fwdPath.Y { - revFrontier.X-- - } else { - revFrontier.Y-- - } - goto forwardSearch - } - -finishSearch: - // Join the forward and reverse paths and then append the reverse path. - fwdPath.connect(revPath.point, f) - for i := len(revPath.es) - 1; i >= 0; i-- { - t := revPath.es[i] - revPath.es = revPath.es[:i] - fwdPath.append(t) - } - debug.Finish() - return fwdPath.es -} - -type path struct { - dir int // +1 if forward, -1 if reverse - point // Leading point of the EditScript path - es EditScript -} - -// connect appends any necessary Identity, Modified, UniqueX, or UniqueY types -// to the edit-script to connect p.point to dst. -func (p *path) connect(dst point, f EqualFunc) { - if p.dir > 0 { - // Connect in forward direction. - for dst.X > p.X && dst.Y > p.Y { - switch r := f(p.X, p.Y); { - case r.Equal(): - p.append(Identity) - case r.Similar(): - p.append(Modified) - case dst.X-p.X >= dst.Y-p.Y: - p.append(UniqueX) - default: - p.append(UniqueY) - } - } - for dst.X > p.X { - p.append(UniqueX) - } - for dst.Y > p.Y { - p.append(UniqueY) - } - } else { - // Connect in reverse direction. - for p.X > dst.X && p.Y > dst.Y { - switch r := f(p.X-1, p.Y-1); { - case r.Equal(): - p.append(Identity) - case r.Similar(): - p.append(Modified) - case p.Y-dst.Y >= p.X-dst.X: - p.append(UniqueY) - default: - p.append(UniqueX) - } - } - for p.X > dst.X { - p.append(UniqueX) - } - for p.Y > dst.Y { - p.append(UniqueY) - } - } -} - -func (p *path) append(t EditType) { - p.es = append(p.es, t) - switch t { - case Identity, Modified: - p.add(p.dir, p.dir) - case UniqueX: - p.add(p.dir, 0) - case UniqueY: - p.add(0, p.dir) - } - debug.Update() -} - -type point struct{ X, Y int } - -func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy } - -// zigzag maps a consecutive sequence of integers to a zig-zag sequence. -// [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...] -func zigzag(x int) int { - if x&1 != 0 { - x = ^x - } - return x >> 1 -} diff --git a/ui/wasm/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go b/ui/wasm/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go deleted file mode 100644 index d8e459c9b93..00000000000 --- a/ui/wasm/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2019, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package flags - -// Deterministic controls whether the output of Diff should be deterministic. -// This is only used for testing. -var Deterministic bool diff --git a/ui/wasm/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go b/ui/wasm/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go deleted file mode 100644 index 82d1d7fbf8a..00000000000 --- a/ui/wasm/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2019, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.10 - -package flags - -// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10. -const AtLeastGo110 = false diff --git a/ui/wasm/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go b/ui/wasm/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go deleted file mode 100644 index 8646f052934..00000000000 --- a/ui/wasm/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2019, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.10 - -package flags - -// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10. -const AtLeastGo110 = true diff --git a/ui/wasm/vendor/github.com/google/go-cmp/cmp/internal/function/func.go b/ui/wasm/vendor/github.com/google/go-cmp/cmp/internal/function/func.go deleted file mode 100644 index d127d436230..00000000000 --- a/ui/wasm/vendor/github.com/google/go-cmp/cmp/internal/function/func.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package function provides functionality for identifying function types. -package function - -import ( - "reflect" - "regexp" - "runtime" - "strings" -) - -type funcType int - -const ( - _ funcType = iota - - tbFunc // func(T) bool - ttbFunc // func(T, T) bool - trbFunc // func(T, R) bool - tibFunc // func(T, I) bool - trFunc // func(T) R - - Equal = ttbFunc // func(T, T) bool - EqualAssignable = tibFunc // func(T, I) bool; encapsulates func(T, T) bool - Transformer = trFunc // func(T) R - ValueFilter = ttbFunc // func(T, T) bool - Less = ttbFunc // func(T, T) bool - ValuePredicate = tbFunc // func(T) bool - KeyValuePredicate = trbFunc // func(T, R) bool -) - -var boolType = reflect.TypeOf(true) - -// IsType reports whether the reflect.Type is of the specified function type. -func IsType(t reflect.Type, ft funcType) bool { - if t == nil || t.Kind() != reflect.Func || t.IsVariadic() { - return false - } - ni, no := t.NumIn(), t.NumOut() - switch ft { - case tbFunc: // func(T) bool - if ni == 1 && no == 1 && t.Out(0) == boolType { - return true - } - case ttbFunc: // func(T, T) bool - if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType { - return true - } - case trbFunc: // func(T, R) bool - if ni == 2 && no == 1 && t.Out(0) == boolType { - return true - } - case tibFunc: // func(T, I) bool - if ni == 2 && no == 1 && t.In(0).AssignableTo(t.In(1)) && t.Out(0) == boolType { - return true - } - case trFunc: // func(T) R - if ni == 1 && no == 1 { - return true - } - } - return false -} - -var lastIdentRx = regexp.MustCompile(`[_\p{L}][_\p{L}\p{N}]*$`) - -// NameOf returns the name of the function value. -func NameOf(v reflect.Value) string { - fnc := runtime.FuncForPC(v.Pointer()) - if fnc == nil { - return "" - } - fullName := fnc.Name() // e.g., "long/path/name/mypkg.(*MyType).(long/path/name/mypkg.myMethod)-fm" - - // Method closures have a "-fm" suffix. - fullName = strings.TrimSuffix(fullName, "-fm") - - var name string - for len(fullName) > 0 { - inParen := strings.HasSuffix(fullName, ")") - fullName = strings.TrimSuffix(fullName, ")") - - s := lastIdentRx.FindString(fullName) - if s == "" { - break - } - name = s + "." + name - fullName = strings.TrimSuffix(fullName, s) - - if i := strings.LastIndexByte(fullName, '('); inParen && i >= 0 { - fullName = fullName[:i] - } - fullName = strings.TrimSuffix(fullName, ".") - } - return strings.TrimSuffix(name, ".") -} diff --git a/ui/wasm/vendor/github.com/google/go-cmp/cmp/internal/value/name.go b/ui/wasm/vendor/github.com/google/go-cmp/cmp/internal/value/name.go deleted file mode 100644 index b6c12cefb47..00000000000 --- a/ui/wasm/vendor/github.com/google/go-cmp/cmp/internal/value/name.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright 2020, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package value - -import ( - "reflect" - "strconv" -) - -// TypeString is nearly identical to reflect.Type.String, -// but has an additional option to specify that full type names be used. -func TypeString(t reflect.Type, qualified bool) string { - return string(appendTypeName(nil, t, qualified, false)) -} - -func appendTypeName(b []byte, t reflect.Type, qualified, elideFunc bool) []byte { - // BUG: Go reflection provides no way to disambiguate two named types - // of the same name and within the same package, - // but declared within the namespace of different functions. - - // Named type. - if t.Name() != "" { - if qualified && t.PkgPath() != "" { - b = append(b, '"') - b = append(b, t.PkgPath()...) - b = append(b, '"') - b = append(b, '.') - b = append(b, t.Name()...) - } else { - b = append(b, t.String()...) - } - return b - } - - // Unnamed type. - switch k := t.Kind(); k { - case reflect.Bool, reflect.String, reflect.UnsafePointer, - reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, - reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: - b = append(b, k.String()...) - case reflect.Chan: - if t.ChanDir() == reflect.RecvDir { - b = append(b, "<-"...) - } - b = append(b, "chan"...) - if t.ChanDir() == reflect.SendDir { - b = append(b, "<-"...) - } - b = append(b, ' ') - b = appendTypeName(b, t.Elem(), qualified, false) - case reflect.Func: - if !elideFunc { - b = append(b, "func"...) - } - b = append(b, '(') - for i := 0; i < t.NumIn(); i++ { - if i > 0 { - b = append(b, ", "...) - } - if i == t.NumIn()-1 && t.IsVariadic() { - b = append(b, "..."...) - b = appendTypeName(b, t.In(i).Elem(), qualified, false) - } else { - b = appendTypeName(b, t.In(i), qualified, false) - } - } - b = append(b, ')') - switch t.NumOut() { - case 0: - // Do nothing - case 1: - b = append(b, ' ') - b = appendTypeName(b, t.Out(0), qualified, false) - default: - b = append(b, " ("...) - for i := 0; i < t.NumOut(); i++ { - if i > 0 { - b = append(b, ", "...) - } - b = appendTypeName(b, t.Out(i), qualified, false) - } - b = append(b, ')') - } - case reflect.Struct: - b = append(b, "struct{ "...) - for i := 0; i < t.NumField(); i++ { - if i > 0 { - b = append(b, "; "...) - } - sf := t.Field(i) - if !sf.Anonymous { - if qualified && sf.PkgPath != "" { - b = append(b, '"') - b = append(b, sf.PkgPath...) - b = append(b, '"') - b = append(b, '.') - } - b = append(b, sf.Name...) - b = append(b, ' ') - } - b = appendTypeName(b, sf.Type, qualified, false) - if sf.Tag != "" { - b = append(b, ' ') - b = strconv.AppendQuote(b, string(sf.Tag)) - } - } - if b[len(b)-1] == ' ' { - b = b[:len(b)-1] - } else { - b = append(b, ' ') - } - b = append(b, '}') - case reflect.Slice, reflect.Array: - b = append(b, '[') - if k == reflect.Array { - b = strconv.AppendUint(b, uint64(t.Len()), 10) - } - b = append(b, ']') - b = appendTypeName(b, t.Elem(), qualified, false) - case reflect.Map: - b = append(b, "map["...) - b = appendTypeName(b, t.Key(), qualified, false) - b = append(b, ']') - b = appendTypeName(b, t.Elem(), qualified, false) - case reflect.Ptr: - b = append(b, '*') - b = appendTypeName(b, t.Elem(), qualified, false) - case reflect.Interface: - b = append(b, "interface{ "...) - for i := 0; i < t.NumMethod(); i++ { - if i > 0 { - b = append(b, "; "...) - } - m := t.Method(i) - if qualified && m.PkgPath != "" { - b = append(b, '"') - b = append(b, m.PkgPath...) - b = append(b, '"') - b = append(b, '.') - } - b = append(b, m.Name...) - b = appendTypeName(b, m.Type, qualified, true) - } - if b[len(b)-1] == ' ' { - b = b[:len(b)-1] - } else { - b = append(b, ' ') - } - b = append(b, '}') - default: - panic("invalid kind: " + k.String()) - } - return b -} diff --git a/ui/wasm/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go b/ui/wasm/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go deleted file mode 100644 index 44f4a5afddc..00000000000 --- a/ui/wasm/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2018, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build purego - -package value - -import "reflect" - -// Pointer is an opaque typed pointer and is guaranteed to be comparable. -type Pointer struct { - p uintptr - t reflect.Type -} - -// PointerOf returns a Pointer from v, which must be a -// reflect.Ptr, reflect.Slice, or reflect.Map. -func PointerOf(v reflect.Value) Pointer { - // NOTE: Storing a pointer as an uintptr is technically incorrect as it - // assumes that the GC implementation does not use a moving collector. - return Pointer{v.Pointer(), v.Type()} -} - -// IsNil reports whether the pointer is nil. -func (p Pointer) IsNil() bool { - return p.p == 0 -} - -// Uintptr returns the pointer as a uintptr. -func (p Pointer) Uintptr() uintptr { - return p.p -} diff --git a/ui/wasm/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go b/ui/wasm/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go deleted file mode 100644 index a605953d466..00000000000 --- a/ui/wasm/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2018, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !purego - -package value - -import ( - "reflect" - "unsafe" -) - -// Pointer is an opaque typed pointer and is guaranteed to be comparable. -type Pointer struct { - p unsafe.Pointer - t reflect.Type -} - -// PointerOf returns a Pointer from v, which must be a -// reflect.Ptr, reflect.Slice, or reflect.Map. -func PointerOf(v reflect.Value) Pointer { - // The proper representation of a pointer is unsafe.Pointer, - // which is necessary if the GC ever uses a moving collector. - return Pointer{unsafe.Pointer(v.Pointer()), v.Type()} -} - -// IsNil reports whether the pointer is nil. -func (p Pointer) IsNil() bool { - return p.p == nil -} - -// Uintptr returns the pointer as a uintptr. -func (p Pointer) Uintptr() uintptr { - return uintptr(p.p) -} diff --git a/ui/wasm/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go b/ui/wasm/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go deleted file mode 100644 index 98533b036cc..00000000000 --- a/ui/wasm/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package value - -import ( - "fmt" - "math" - "reflect" - "sort" -) - -// SortKeys sorts a list of map keys, deduplicating keys if necessary. -// The type of each value must be comparable. -func SortKeys(vs []reflect.Value) []reflect.Value { - if len(vs) == 0 { - return vs - } - - // Sort the map keys. - sort.SliceStable(vs, func(i, j int) bool { return isLess(vs[i], vs[j]) }) - - // Deduplicate keys (fails for NaNs). - vs2 := vs[:1] - for _, v := range vs[1:] { - if isLess(vs2[len(vs2)-1], v) { - vs2 = append(vs2, v) - } - } - return vs2 -} - -// isLess is a generic function for sorting arbitrary map keys. -// The inputs must be of the same type and must be comparable. -func isLess(x, y reflect.Value) bool { - switch x.Type().Kind() { - case reflect.Bool: - return !x.Bool() && y.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return x.Int() < y.Int() - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return x.Uint() < y.Uint() - case reflect.Float32, reflect.Float64: - // NOTE: This does not sort -0 as less than +0 - // since Go maps treat -0 and +0 as equal keys. - fx, fy := x.Float(), y.Float() - return fx < fy || math.IsNaN(fx) && !math.IsNaN(fy) - case reflect.Complex64, reflect.Complex128: - cx, cy := x.Complex(), y.Complex() - rx, ix, ry, iy := real(cx), imag(cx), real(cy), imag(cy) - if rx == ry || (math.IsNaN(rx) && math.IsNaN(ry)) { - return ix < iy || math.IsNaN(ix) && !math.IsNaN(iy) - } - return rx < ry || math.IsNaN(rx) && !math.IsNaN(ry) - case reflect.Ptr, reflect.UnsafePointer, reflect.Chan: - return x.Pointer() < y.Pointer() - case reflect.String: - return x.String() < y.String() - case reflect.Array: - for i := 0; i < x.Len(); i++ { - if isLess(x.Index(i), y.Index(i)) { - return true - } - if isLess(y.Index(i), x.Index(i)) { - return false - } - } - return false - case reflect.Struct: - for i := 0; i < x.NumField(); i++ { - if isLess(x.Field(i), y.Field(i)) { - return true - } - if isLess(y.Field(i), x.Field(i)) { - return false - } - } - return false - case reflect.Interface: - vx, vy := x.Elem(), y.Elem() - if !vx.IsValid() || !vy.IsValid() { - return !vx.IsValid() && vy.IsValid() - } - tx, ty := vx.Type(), vy.Type() - if tx == ty { - return isLess(x.Elem(), y.Elem()) - } - if tx.Kind() != ty.Kind() { - return vx.Kind() < vy.Kind() - } - if tx.String() != ty.String() { - return tx.String() < ty.String() - } - if tx.PkgPath() != ty.PkgPath() { - return tx.PkgPath() < ty.PkgPath() - } - // This can happen in rare situations, so we fallback to just comparing - // the unique pointer for a reflect.Type. This guarantees deterministic - // ordering within a program, but it is obviously not stable. - return reflect.ValueOf(vx.Type()).Pointer() < reflect.ValueOf(vy.Type()).Pointer() - default: - // Must be Func, Map, or Slice; which are not comparable. - panic(fmt.Sprintf("%T is not comparable", x.Type())) - } -} diff --git a/ui/wasm/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go b/ui/wasm/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go deleted file mode 100644 index 9147a299731..00000000000 --- a/ui/wasm/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package value - -import ( - "math" - "reflect" -) - -// IsZero reports whether v is the zero value. -// This does not rely on Interface and so can be used on unexported fields. -func IsZero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return v.Bool() == false - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return math.Float64bits(v.Float()) == 0 - case reflect.Complex64, reflect.Complex128: - return math.Float64bits(real(v.Complex())) == 0 && math.Float64bits(imag(v.Complex())) == 0 - case reflect.String: - return v.String() == "" - case reflect.UnsafePointer: - return v.Pointer() == 0 - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: - return v.IsNil() - case reflect.Array: - for i := 0; i < v.Len(); i++ { - if !IsZero(v.Index(i)) { - return false - } - } - return true - case reflect.Struct: - for i := 0; i < v.NumField(); i++ { - if !IsZero(v.Field(i)) { - return false - } - } - return true - } - return false -} diff --git a/ui/wasm/vendor/github.com/google/go-cmp/cmp/options.go b/ui/wasm/vendor/github.com/google/go-cmp/cmp/options.go deleted file mode 100644 index e57b9eb5392..00000000000 --- a/ui/wasm/vendor/github.com/google/go-cmp/cmp/options.go +++ /dev/null @@ -1,552 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmp - -import ( - "fmt" - "reflect" - "regexp" - "strings" - - "github.com/google/go-cmp/cmp/internal/function" -) - -// Option configures for specific behavior of Equal and Diff. In particular, -// the fundamental Option functions (Ignore, Transformer, and Comparer), -// configure how equality is determined. -// -// The fundamental options may be composed with filters (FilterPath and -// FilterValues) to control the scope over which they are applied. -// -// The cmp/cmpopts package provides helper functions for creating options that -// may be used with Equal and Diff. -type Option interface { - // filter applies all filters and returns the option that remains. - // Each option may only read s.curPath and call s.callTTBFunc. - // - // An Options is returned only if multiple comparers or transformers - // can apply simultaneously and will only contain values of those types - // or sub-Options containing values of those types. - filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption -} - -// applicableOption represents the following types: -// Fundamental: ignore | validator | *comparer | *transformer -// Grouping: Options -type applicableOption interface { - Option - - // apply executes the option, which may mutate s or panic. - apply(s *state, vx, vy reflect.Value) -} - -// coreOption represents the following types: -// Fundamental: ignore | validator | *comparer | *transformer -// Filters: *pathFilter | *valuesFilter -type coreOption interface { - Option - isCore() -} - -type core struct{} - -func (core) isCore() {} - -// Options is a list of Option values that also satisfies the Option interface. -// Helper comparison packages may return an Options value when packing multiple -// Option values into a single Option. When this package processes an Options, -// it will be implicitly expanded into a flat list. -// -// Applying a filter on an Options is equivalent to applying that same filter -// on all individual options held within. -type Options []Option - -func (opts Options) filter(s *state, t reflect.Type, vx, vy reflect.Value) (out applicableOption) { - for _, opt := range opts { - switch opt := opt.filter(s, t, vx, vy); opt.(type) { - case ignore: - return ignore{} // Only ignore can short-circuit evaluation - case validator: - out = validator{} // Takes precedence over comparer or transformer - case *comparer, *transformer, Options: - switch out.(type) { - case nil: - out = opt - case validator: - // Keep validator - case *comparer, *transformer, Options: - out = Options{out, opt} // Conflicting comparers or transformers - } - } - } - return out -} - -func (opts Options) apply(s *state, _, _ reflect.Value) { - const warning = "ambiguous set of applicable options" - const help = "consider using filters to ensure at most one Comparer or Transformer may apply" - var ss []string - for _, opt := range flattenOptions(nil, opts) { - ss = append(ss, fmt.Sprint(opt)) - } - set := strings.Join(ss, "\n\t") - panic(fmt.Sprintf("%s at %#v:\n\t%s\n%s", warning, s.curPath, set, help)) -} - -func (opts Options) String() string { - var ss []string - for _, opt := range opts { - ss = append(ss, fmt.Sprint(opt)) - } - return fmt.Sprintf("Options{%s}", strings.Join(ss, ", ")) -} - -// FilterPath returns a new Option where opt is only evaluated if filter f -// returns true for the current Path in the value tree. -// -// This filter is called even if a slice element or map entry is missing and -// provides an opportunity to ignore such cases. The filter function must be -// symmetric such that the filter result is identical regardless of whether the -// missing value is from x or y. -// -// The option passed in may be an Ignore, Transformer, Comparer, Options, or -// a previously filtered Option. -func FilterPath(f func(Path) bool, opt Option) Option { - if f == nil { - panic("invalid path filter function") - } - if opt := normalizeOption(opt); opt != nil { - return &pathFilter{fnc: f, opt: opt} - } - return nil -} - -type pathFilter struct { - core - fnc func(Path) bool - opt Option -} - -func (f pathFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption { - if f.fnc(s.curPath) { - return f.opt.filter(s, t, vx, vy) - } - return nil -} - -func (f pathFilter) String() string { - return fmt.Sprintf("FilterPath(%s, %v)", function.NameOf(reflect.ValueOf(f.fnc)), f.opt) -} - -// FilterValues returns a new Option where opt is only evaluated if filter f, -// which is a function of the form "func(T, T) bool", returns true for the -// current pair of values being compared. If either value is invalid or -// the type of the values is not assignable to T, then this filter implicitly -// returns false. -// -// The filter function must be -// symmetric (i.e., agnostic to the order of the inputs) and -// deterministic (i.e., produces the same result when given the same inputs). -// If T is an interface, it is possible that f is called with two values with -// different concrete types that both implement T. -// -// The option passed in may be an Ignore, Transformer, Comparer, Options, or -// a previously filtered Option. -func FilterValues(f interface{}, opt Option) Option { - v := reflect.ValueOf(f) - if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() { - panic(fmt.Sprintf("invalid values filter function: %T", f)) - } - if opt := normalizeOption(opt); opt != nil { - vf := &valuesFilter{fnc: v, opt: opt} - if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { - vf.typ = ti - } - return vf - } - return nil -} - -type valuesFilter struct { - core - typ reflect.Type // T - fnc reflect.Value // func(T, T) bool - opt Option -} - -func (f valuesFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption { - if !vx.IsValid() || !vx.CanInterface() || !vy.IsValid() || !vy.CanInterface() { - return nil - } - if (f.typ == nil || t.AssignableTo(f.typ)) && s.callTTBFunc(f.fnc, vx, vy) { - return f.opt.filter(s, t, vx, vy) - } - return nil -} - -func (f valuesFilter) String() string { - return fmt.Sprintf("FilterValues(%s, %v)", function.NameOf(f.fnc), f.opt) -} - -// Ignore is an Option that causes all comparisons to be ignored. -// This value is intended to be combined with FilterPath or FilterValues. -// It is an error to pass an unfiltered Ignore option to Equal. -func Ignore() Option { return ignore{} } - -type ignore struct{ core } - -func (ignore) isFiltered() bool { return false } -func (ignore) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { return ignore{} } -func (ignore) apply(s *state, _, _ reflect.Value) { s.report(true, reportByIgnore) } -func (ignore) String() string { return "Ignore()" } - -// validator is a sentinel Option type to indicate that some options could not -// be evaluated due to unexported fields, missing slice elements, or -// missing map entries. Both values are validator only for unexported fields. -type validator struct{ core } - -func (validator) filter(_ *state, _ reflect.Type, vx, vy reflect.Value) applicableOption { - if !vx.IsValid() || !vy.IsValid() { - return validator{} - } - if !vx.CanInterface() || !vy.CanInterface() { - return validator{} - } - return nil -} -func (validator) apply(s *state, vx, vy reflect.Value) { - // Implies missing slice element or map entry. - if !vx.IsValid() || !vy.IsValid() { - s.report(vx.IsValid() == vy.IsValid(), 0) - return - } - - // Unable to Interface implies unexported field without visibility access. - if !vx.CanInterface() || !vy.CanInterface() { - help := "consider using a custom Comparer; if you control the implementation of type, you can also consider using an Exporter, AllowUnexported, or cmpopts.IgnoreUnexported" - var name string - if t := s.curPath.Index(-2).Type(); t.Name() != "" { - // Named type with unexported fields. - name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType - if _, ok := reflect.New(t).Interface().(error); ok { - help = "consider using cmpopts.EquateErrors to compare error values" - } - } else { - // Unnamed type with unexported fields. Derive PkgPath from field. - var pkgPath string - for i := 0; i < t.NumField() && pkgPath == ""; i++ { - pkgPath = t.Field(i).PkgPath - } - name = fmt.Sprintf("%q.(%v)", pkgPath, t.String()) // e.g., "path/to/package".(struct { a int }) - } - panic(fmt.Sprintf("cannot handle unexported field at %#v:\n\t%v\n%s", s.curPath, name, help)) - } - - panic("not reachable") -} - -// identRx represents a valid identifier according to the Go specification. -const identRx = `[_\p{L}][_\p{L}\p{N}]*` - -var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`) - -// Transformer returns an Option that applies a transformation function that -// converts values of a certain type into that of another. -// -// The transformer f must be a function "func(T) R" that converts values of -// type T to those of type R and is implicitly filtered to input values -// assignable to T. The transformer must not mutate T in any way. -// -// To help prevent some cases of infinite recursive cycles applying the -// same transform to the output of itself (e.g., in the case where the -// input and output types are the same), an implicit filter is added such that -// a transformer is applicable only if that exact transformer is not already -// in the tail of the Path since the last non-Transform step. -// For situations where the implicit filter is still insufficient, -// consider using cmpopts.AcyclicTransformer, which adds a filter -// to prevent the transformer from being recursively applied upon itself. -// -// The name is a user provided label that is used as the Transform.Name in the -// transformation PathStep (and eventually shown in the Diff output). -// The name must be a valid identifier or qualified identifier in Go syntax. -// If empty, an arbitrary name is used. -func Transformer(name string, f interface{}) Option { - v := reflect.ValueOf(f) - if !function.IsType(v.Type(), function.Transformer) || v.IsNil() { - panic(fmt.Sprintf("invalid transformer function: %T", f)) - } - if name == "" { - name = function.NameOf(v) - if !identsRx.MatchString(name) { - name = "λ" // Lambda-symbol as placeholder name - } - } else if !identsRx.MatchString(name) { - panic(fmt.Sprintf("invalid name: %q", name)) - } - tr := &transformer{name: name, fnc: reflect.ValueOf(f)} - if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { - tr.typ = ti - } - return tr -} - -type transformer struct { - core - name string - typ reflect.Type // T - fnc reflect.Value // func(T) R -} - -func (tr *transformer) isFiltered() bool { return tr.typ != nil } - -func (tr *transformer) filter(s *state, t reflect.Type, _, _ reflect.Value) applicableOption { - for i := len(s.curPath) - 1; i >= 0; i-- { - if t, ok := s.curPath[i].(Transform); !ok { - break // Hit most recent non-Transform step - } else if tr == t.trans { - return nil // Cannot directly use same Transform - } - } - if tr.typ == nil || t.AssignableTo(tr.typ) { - return tr - } - return nil -} - -func (tr *transformer) apply(s *state, vx, vy reflect.Value) { - step := Transform{&transform{pathStep{typ: tr.fnc.Type().Out(0)}, tr}} - vvx := s.callTRFunc(tr.fnc, vx, step) - vvy := s.callTRFunc(tr.fnc, vy, step) - step.vx, step.vy = vvx, vvy - s.compareAny(step) -} - -func (tr transformer) String() string { - return fmt.Sprintf("Transformer(%s, %s)", tr.name, function.NameOf(tr.fnc)) -} - -// Comparer returns an Option that determines whether two values are equal -// to each other. -// -// The comparer f must be a function "func(T, T) bool" and is implicitly -// filtered to input values assignable to T. If T is an interface, it is -// possible that f is called with two values of different concrete types that -// both implement T. -// -// The equality function must be: -// • Symmetric: equal(x, y) == equal(y, x) -// • Deterministic: equal(x, y) == equal(x, y) -// • Pure: equal(x, y) does not modify x or y -func Comparer(f interface{}) Option { - v := reflect.ValueOf(f) - if !function.IsType(v.Type(), function.Equal) || v.IsNil() { - panic(fmt.Sprintf("invalid comparer function: %T", f)) - } - cm := &comparer{fnc: v} - if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { - cm.typ = ti - } - return cm -} - -type comparer struct { - core - typ reflect.Type // T - fnc reflect.Value // func(T, T) bool -} - -func (cm *comparer) isFiltered() bool { return cm.typ != nil } - -func (cm *comparer) filter(_ *state, t reflect.Type, _, _ reflect.Value) applicableOption { - if cm.typ == nil || t.AssignableTo(cm.typ) { - return cm - } - return nil -} - -func (cm *comparer) apply(s *state, vx, vy reflect.Value) { - eq := s.callTTBFunc(cm.fnc, vx, vy) - s.report(eq, reportByFunc) -} - -func (cm comparer) String() string { - return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc)) -} - -// Exporter returns an Option that specifies whether Equal is allowed to -// introspect into the unexported fields of certain struct types. -// -// Users of this option must understand that comparing on unexported fields -// from external packages is not safe since changes in the internal -// implementation of some external package may cause the result of Equal -// to unexpectedly change. However, it may be valid to use this option on types -// defined in an internal package where the semantic meaning of an unexported -// field is in the control of the user. -// -// In many cases, a custom Comparer should be used instead that defines -// equality as a function of the public API of a type rather than the underlying -// unexported implementation. -// -// For example, the reflect.Type documentation defines equality to be determined -// by the == operator on the interface (essentially performing a shallow pointer -// comparison) and most attempts to compare *regexp.Regexp types are interested -// in only checking that the regular expression strings are equal. -// Both of these are accomplished using Comparers: -// -// Comparer(func(x, y reflect.Type) bool { return x == y }) -// Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() }) -// -// In other cases, the cmpopts.IgnoreUnexported option can be used to ignore -// all unexported fields on specified struct types. -func Exporter(f func(reflect.Type) bool) Option { - if !supportExporters { - panic("Exporter is not supported on purego builds") - } - return exporter(f) -} - -type exporter func(reflect.Type) bool - -func (exporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { - panic("not implemented") -} - -// AllowUnexported returns an Options that allows Equal to forcibly introspect -// unexported fields of the specified struct types. -// -// See Exporter for the proper use of this option. -func AllowUnexported(types ...interface{}) Option { - m := make(map[reflect.Type]bool) - for _, typ := range types { - t := reflect.TypeOf(typ) - if t.Kind() != reflect.Struct { - panic(fmt.Sprintf("invalid struct type: %T", typ)) - } - m[t] = true - } - return exporter(func(t reflect.Type) bool { return m[t] }) -} - -// Result represents the comparison result for a single node and -// is provided by cmp when calling Result (see Reporter). -type Result struct { - _ [0]func() // Make Result incomparable - flags resultFlags -} - -// Equal reports whether the node was determined to be equal or not. -// As a special case, ignored nodes are considered equal. -func (r Result) Equal() bool { - return r.flags&(reportEqual|reportByIgnore) != 0 -} - -// ByIgnore reports whether the node is equal because it was ignored. -// This never reports true if Equal reports false. -func (r Result) ByIgnore() bool { - return r.flags&reportByIgnore != 0 -} - -// ByMethod reports whether the Equal method determined equality. -func (r Result) ByMethod() bool { - return r.flags&reportByMethod != 0 -} - -// ByFunc reports whether a Comparer function determined equality. -func (r Result) ByFunc() bool { - return r.flags&reportByFunc != 0 -} - -// ByCycle reports whether a reference cycle was detected. -func (r Result) ByCycle() bool { - return r.flags&reportByCycle != 0 -} - -type resultFlags uint - -const ( - _ resultFlags = (1 << iota) / 2 - - reportEqual - reportUnequal - reportByIgnore - reportByMethod - reportByFunc - reportByCycle -) - -// Reporter is an Option that can be passed to Equal. When Equal traverses -// the value trees, it calls PushStep as it descends into each node in the -// tree and PopStep as it ascend out of the node. The leaves of the tree are -// either compared (determined to be equal or not equal) or ignored and reported -// as such by calling the Report method. -func Reporter(r interface { - // PushStep is called when a tree-traversal operation is performed. - // The PathStep itself is only valid until the step is popped. - // The PathStep.Values are valid for the duration of the entire traversal - // and must not be mutated. - // - // Equal always calls PushStep at the start to provide an operation-less - // PathStep used to report the root values. - // - // Within a slice, the exact set of inserted, removed, or modified elements - // is unspecified and may change in future implementations. - // The entries of a map are iterated through in an unspecified order. - PushStep(PathStep) - - // Report is called exactly once on leaf nodes to report whether the - // comparison identified the node as equal, unequal, or ignored. - // A leaf node is one that is immediately preceded by and followed by - // a pair of PushStep and PopStep calls. - Report(Result) - - // PopStep ascends back up the value tree. - // There is always a matching pop call for every push call. - PopStep() -}) Option { - return reporter{r} -} - -type reporter struct{ reporterIface } -type reporterIface interface { - PushStep(PathStep) - Report(Result) - PopStep() -} - -func (reporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { - panic("not implemented") -} - -// normalizeOption normalizes the input options such that all Options groups -// are flattened and groups with a single element are reduced to that element. -// Only coreOptions and Options containing coreOptions are allowed. -func normalizeOption(src Option) Option { - switch opts := flattenOptions(nil, Options{src}); len(opts) { - case 0: - return nil - case 1: - return opts[0] - default: - return opts - } -} - -// flattenOptions copies all options in src to dst as a flat list. -// Only coreOptions and Options containing coreOptions are allowed. -func flattenOptions(dst, src Options) Options { - for _, opt := range src { - switch opt := opt.(type) { - case nil: - continue - case Options: - dst = flattenOptions(dst, opt) - case coreOption: - dst = append(dst, opt) - default: - panic(fmt.Sprintf("invalid option type: %T", opt)) - } - } - return dst -} diff --git a/ui/wasm/vendor/github.com/google/go-cmp/cmp/path.go b/ui/wasm/vendor/github.com/google/go-cmp/cmp/path.go deleted file mode 100644 index f01eff318c5..00000000000 --- a/ui/wasm/vendor/github.com/google/go-cmp/cmp/path.go +++ /dev/null @@ -1,378 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmp - -import ( - "fmt" - "reflect" - "strings" - "unicode" - "unicode/utf8" - - "github.com/google/go-cmp/cmp/internal/value" -) - -// Path is a list of PathSteps describing the sequence of operations to get -// from some root type to the current position in the value tree. -// The first Path element is always an operation-less PathStep that exists -// simply to identify the initial type. -// -// When traversing structs with embedded structs, the embedded struct will -// always be accessed as a field before traversing the fields of the -// embedded struct themselves. That is, an exported field from the -// embedded struct will never be accessed directly from the parent struct. -type Path []PathStep - -// PathStep is a union-type for specific operations to traverse -// a value's tree structure. Users of this package never need to implement -// these types as values of this type will be returned by this package. -// -// Implementations of this interface are -// StructField, SliceIndex, MapIndex, Indirect, TypeAssertion, and Transform. -type PathStep interface { - String() string - - // Type is the resulting type after performing the path step. - Type() reflect.Type - - // Values is the resulting values after performing the path step. - // The type of each valid value is guaranteed to be identical to Type. - // - // In some cases, one or both may be invalid or have restrictions: - // • For StructField, both are not interface-able if the current field - // is unexported and the struct type is not explicitly permitted by - // an Exporter to traverse unexported fields. - // • For SliceIndex, one may be invalid if an element is missing from - // either the x or y slice. - // • For MapIndex, one may be invalid if an entry is missing from - // either the x or y map. - // - // The provided values must not be mutated. - Values() (vx, vy reflect.Value) -} - -var ( - _ PathStep = StructField{} - _ PathStep = SliceIndex{} - _ PathStep = MapIndex{} - _ PathStep = Indirect{} - _ PathStep = TypeAssertion{} - _ PathStep = Transform{} -) - -func (pa *Path) push(s PathStep) { - *pa = append(*pa, s) -} - -func (pa *Path) pop() { - *pa = (*pa)[:len(*pa)-1] -} - -// Last returns the last PathStep in the Path. -// If the path is empty, this returns a non-nil PathStep that reports a nil Type. -func (pa Path) Last() PathStep { - return pa.Index(-1) -} - -// Index returns the ith step in the Path and supports negative indexing. -// A negative index starts counting from the tail of the Path such that -1 -// refers to the last step, -2 refers to the second-to-last step, and so on. -// If index is invalid, this returns a non-nil PathStep that reports a nil Type. -func (pa Path) Index(i int) PathStep { - if i < 0 { - i = len(pa) + i - } - if i < 0 || i >= len(pa) { - return pathStep{} - } - return pa[i] -} - -// String returns the simplified path to a node. -// The simplified path only contains struct field accesses. -// -// For example: -// MyMap.MySlices.MyField -func (pa Path) String() string { - var ss []string - for _, s := range pa { - if _, ok := s.(StructField); ok { - ss = append(ss, s.String()) - } - } - return strings.TrimPrefix(strings.Join(ss, ""), ".") -} - -// GoString returns the path to a specific node using Go syntax. -// -// For example: -// (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField -func (pa Path) GoString() string { - var ssPre, ssPost []string - var numIndirect int - for i, s := range pa { - var nextStep PathStep - if i+1 < len(pa) { - nextStep = pa[i+1] - } - switch s := s.(type) { - case Indirect: - numIndirect++ - pPre, pPost := "(", ")" - switch nextStep.(type) { - case Indirect: - continue // Next step is indirection, so let them batch up - case StructField: - numIndirect-- // Automatic indirection on struct fields - case nil: - pPre, pPost = "", "" // Last step; no need for parenthesis - } - if numIndirect > 0 { - ssPre = append(ssPre, pPre+strings.Repeat("*", numIndirect)) - ssPost = append(ssPost, pPost) - } - numIndirect = 0 - continue - case Transform: - ssPre = append(ssPre, s.trans.name+"(") - ssPost = append(ssPost, ")") - continue - } - ssPost = append(ssPost, s.String()) - } - for i, j := 0, len(ssPre)-1; i < j; i, j = i+1, j-1 { - ssPre[i], ssPre[j] = ssPre[j], ssPre[i] - } - return strings.Join(ssPre, "") + strings.Join(ssPost, "") -} - -type pathStep struct { - typ reflect.Type - vx, vy reflect.Value -} - -func (ps pathStep) Type() reflect.Type { return ps.typ } -func (ps pathStep) Values() (vx, vy reflect.Value) { return ps.vx, ps.vy } -func (ps pathStep) String() string { - if ps.typ == nil { - return "" - } - s := ps.typ.String() - if s == "" || strings.ContainsAny(s, "{}\n") { - return "root" // Type too simple or complex to print - } - return fmt.Sprintf("{%s}", s) -} - -// StructField represents a struct field access on a field called Name. -type StructField struct{ *structField } -type structField struct { - pathStep - name string - idx int - - // These fields are used for forcibly accessing an unexported field. - // pvx, pvy, and field are only valid if unexported is true. - unexported bool - mayForce bool // Forcibly allow visibility - paddr bool // Was parent addressable? - pvx, pvy reflect.Value // Parent values (always addressible) - field reflect.StructField // Field information -} - -func (sf StructField) Type() reflect.Type { return sf.typ } -func (sf StructField) Values() (vx, vy reflect.Value) { - if !sf.unexported { - return sf.vx, sf.vy // CanInterface reports true - } - - // Forcibly obtain read-write access to an unexported struct field. - if sf.mayForce { - vx = retrieveUnexportedField(sf.pvx, sf.field, sf.paddr) - vy = retrieveUnexportedField(sf.pvy, sf.field, sf.paddr) - return vx, vy // CanInterface reports true - } - return sf.vx, sf.vy // CanInterface reports false -} -func (sf StructField) String() string { return fmt.Sprintf(".%s", sf.name) } - -// Name is the field name. -func (sf StructField) Name() string { return sf.name } - -// Index is the index of the field in the parent struct type. -// See reflect.Type.Field. -func (sf StructField) Index() int { return sf.idx } - -// SliceIndex is an index operation on a slice or array at some index Key. -type SliceIndex struct{ *sliceIndex } -type sliceIndex struct { - pathStep - xkey, ykey int - isSlice bool // False for reflect.Array -} - -func (si SliceIndex) Type() reflect.Type { return si.typ } -func (si SliceIndex) Values() (vx, vy reflect.Value) { return si.vx, si.vy } -func (si SliceIndex) String() string { - switch { - case si.xkey == si.ykey: - return fmt.Sprintf("[%d]", si.xkey) - case si.ykey == -1: - // [5->?] means "I don't know where X[5] went" - return fmt.Sprintf("[%d->?]", si.xkey) - case si.xkey == -1: - // [?->3] means "I don't know where Y[3] came from" - return fmt.Sprintf("[?->%d]", si.ykey) - default: - // [5->3] means "X[5] moved to Y[3]" - return fmt.Sprintf("[%d->%d]", si.xkey, si.ykey) - } -} - -// Key is the index key; it may return -1 if in a split state -func (si SliceIndex) Key() int { - if si.xkey != si.ykey { - return -1 - } - return si.xkey -} - -// SplitKeys are the indexes for indexing into slices in the -// x and y values, respectively. These indexes may differ due to the -// insertion or removal of an element in one of the slices, causing -// all of the indexes to be shifted. If an index is -1, then that -// indicates that the element does not exist in the associated slice. -// -// Key is guaranteed to return -1 if and only if the indexes returned -// by SplitKeys are not the same. SplitKeys will never return -1 for -// both indexes. -func (si SliceIndex) SplitKeys() (ix, iy int) { return si.xkey, si.ykey } - -// MapIndex is an index operation on a map at some index Key. -type MapIndex struct{ *mapIndex } -type mapIndex struct { - pathStep - key reflect.Value -} - -func (mi MapIndex) Type() reflect.Type { return mi.typ } -func (mi MapIndex) Values() (vx, vy reflect.Value) { return mi.vx, mi.vy } -func (mi MapIndex) String() string { return fmt.Sprintf("[%#v]", mi.key) } - -// Key is the value of the map key. -func (mi MapIndex) Key() reflect.Value { return mi.key } - -// Indirect represents pointer indirection on the parent type. -type Indirect struct{ *indirect } -type indirect struct { - pathStep -} - -func (in Indirect) Type() reflect.Type { return in.typ } -func (in Indirect) Values() (vx, vy reflect.Value) { return in.vx, in.vy } -func (in Indirect) String() string { return "*" } - -// TypeAssertion represents a type assertion on an interface. -type TypeAssertion struct{ *typeAssertion } -type typeAssertion struct { - pathStep -} - -func (ta TypeAssertion) Type() reflect.Type { return ta.typ } -func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy } -func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) } - -// Transform is a transformation from the parent type to the current type. -type Transform struct{ *transform } -type transform struct { - pathStep - trans *transformer -} - -func (tf Transform) Type() reflect.Type { return tf.typ } -func (tf Transform) Values() (vx, vy reflect.Value) { return tf.vx, tf.vy } -func (tf Transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) } - -// Name is the name of the Transformer. -func (tf Transform) Name() string { return tf.trans.name } - -// Func is the function pointer to the transformer function. -func (tf Transform) Func() reflect.Value { return tf.trans.fnc } - -// Option returns the originally constructed Transformer option. -// The == operator can be used to detect the exact option used. -func (tf Transform) Option() Option { return tf.trans } - -// pointerPath represents a dual-stack of pointers encountered when -// recursively traversing the x and y values. This data structure supports -// detection of cycles and determining whether the cycles are equal. -// In Go, cycles can occur via pointers, slices, and maps. -// -// The pointerPath uses a map to represent a stack; where descension into a -// pointer pushes the address onto the stack, and ascension from a pointer -// pops the address from the stack. Thus, when traversing into a pointer from -// reflect.Ptr, reflect.Slice element, or reflect.Map, we can detect cycles -// by checking whether the pointer has already been visited. The cycle detection -// uses a separate stack for the x and y values. -// -// If a cycle is detected we need to determine whether the two pointers -// should be considered equal. The definition of equality chosen by Equal -// requires two graphs to have the same structure. To determine this, both the -// x and y values must have a cycle where the previous pointers were also -// encountered together as a pair. -// -// Semantically, this is equivalent to augmenting Indirect, SliceIndex, and -// MapIndex with pointer information for the x and y values. -// Suppose px and py are two pointers to compare, we then search the -// Path for whether px was ever encountered in the Path history of x, and -// similarly so with py. If either side has a cycle, the comparison is only -// equal if both px and py have a cycle resulting from the same PathStep. -// -// Using a map as a stack is more performant as we can perform cycle detection -// in O(1) instead of O(N) where N is len(Path). -type pointerPath struct { - // mx is keyed by x pointers, where the value is the associated y pointer. - mx map[value.Pointer]value.Pointer - // my is keyed by y pointers, where the value is the associated x pointer. - my map[value.Pointer]value.Pointer -} - -func (p *pointerPath) Init() { - p.mx = make(map[value.Pointer]value.Pointer) - p.my = make(map[value.Pointer]value.Pointer) -} - -// Push indicates intent to descend into pointers vx and vy where -// visited reports whether either has been seen before. If visited before, -// equal reports whether both pointers were encountered together. -// Pop must be called if and only if the pointers were never visited. -// -// The pointers vx and vy must be a reflect.Ptr, reflect.Slice, or reflect.Map -// and be non-nil. -func (p pointerPath) Push(vx, vy reflect.Value) (equal, visited bool) { - px := value.PointerOf(vx) - py := value.PointerOf(vy) - _, ok1 := p.mx[px] - _, ok2 := p.my[py] - if ok1 || ok2 { - equal = p.mx[px] == py && p.my[py] == px // Pointers paired together - return equal, true - } - p.mx[px] = py - p.my[py] = px - return false, false -} - -// Pop ascends from pointers vx and vy. -func (p pointerPath) Pop(vx, vy reflect.Value) { - delete(p.mx, value.PointerOf(vx)) - delete(p.my, value.PointerOf(vy)) -} - -// isExported reports whether the identifier is exported. -func isExported(id string) bool { - r, _ := utf8.DecodeRuneInString(id) - return unicode.IsUpper(r) -} diff --git a/ui/wasm/vendor/github.com/google/go-cmp/cmp/report.go b/ui/wasm/vendor/github.com/google/go-cmp/cmp/report.go deleted file mode 100644 index f43cd12eb5f..00000000000 --- a/ui/wasm/vendor/github.com/google/go-cmp/cmp/report.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2017, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmp - -// defaultReporter implements the reporter interface. -// -// As Equal serially calls the PushStep, Report, and PopStep methods, the -// defaultReporter constructs a tree-based representation of the compared value -// and the result of each comparison (see valueNode). -// -// When the String method is called, the FormatDiff method transforms the -// valueNode tree into a textNode tree, which is a tree-based representation -// of the textual output (see textNode). -// -// Lastly, the textNode.String method produces the final report as a string. -type defaultReporter struct { - root *valueNode - curr *valueNode -} - -func (r *defaultReporter) PushStep(ps PathStep) { - r.curr = r.curr.PushStep(ps) - if r.root == nil { - r.root = r.curr - } -} -func (r *defaultReporter) Report(rs Result) { - r.curr.Report(rs) -} -func (r *defaultReporter) PopStep() { - r.curr = r.curr.PopStep() -} - -// String provides a full report of the differences detected as a structured -// literal in pseudo-Go syntax. String may only be called after the entire tree -// has been traversed. -func (r *defaultReporter) String() string { - assert(r.root != nil && r.curr == nil) - if r.root.NumDiff == 0 { - return "" - } - ptrs := new(pointerReferences) - text := formatOptions{}.FormatDiff(r.root, ptrs) - resolveReferences(text) - return text.String() -} - -func assert(ok bool) { - if !ok { - panic("assertion failure") - } -} diff --git a/ui/wasm/vendor/github.com/google/go-cmp/cmp/report_compare.go b/ui/wasm/vendor/github.com/google/go-cmp/cmp/report_compare.go deleted file mode 100644 index 104bb30538b..00000000000 --- a/ui/wasm/vendor/github.com/google/go-cmp/cmp/report_compare.go +++ /dev/null @@ -1,432 +0,0 @@ -// Copyright 2019, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmp - -import ( - "fmt" - "reflect" - - "github.com/google/go-cmp/cmp/internal/value" -) - -// numContextRecords is the number of surrounding equal records to print. -const numContextRecords = 2 - -type diffMode byte - -const ( - diffUnknown diffMode = 0 - diffIdentical diffMode = ' ' - diffRemoved diffMode = '-' - diffInserted diffMode = '+' -) - -type typeMode int - -const ( - // emitType always prints the type. - emitType typeMode = iota - // elideType never prints the type. - elideType - // autoType prints the type only for composite kinds - // (i.e., structs, slices, arrays, and maps). - autoType -) - -type formatOptions struct { - // DiffMode controls the output mode of FormatDiff. - // - // If diffUnknown, then produce a diff of the x and y values. - // If diffIdentical, then emit values as if they were equal. - // If diffRemoved, then only emit x values (ignoring y values). - // If diffInserted, then only emit y values (ignoring x values). - DiffMode diffMode - - // TypeMode controls whether to print the type for the current node. - // - // As a general rule of thumb, we always print the type of the next node - // after an interface, and always elide the type of the next node after - // a slice or map node. - TypeMode typeMode - - // formatValueOptions are options specific to printing reflect.Values. - formatValueOptions -} - -func (opts formatOptions) WithDiffMode(d diffMode) formatOptions { - opts.DiffMode = d - return opts -} -func (opts formatOptions) WithTypeMode(t typeMode) formatOptions { - opts.TypeMode = t - return opts -} -func (opts formatOptions) WithVerbosity(level int) formatOptions { - opts.VerbosityLevel = level - opts.LimitVerbosity = true - return opts -} -func (opts formatOptions) verbosity() uint { - switch { - case opts.VerbosityLevel < 0: - return 0 - case opts.VerbosityLevel > 16: - return 16 // some reasonable maximum to avoid shift overflow - default: - return uint(opts.VerbosityLevel) - } -} - -const maxVerbosityPreset = 6 - -// verbosityPreset modifies the verbosity settings given an index -// between 0 and maxVerbosityPreset, inclusive. -func verbosityPreset(opts formatOptions, i int) formatOptions { - opts.VerbosityLevel = int(opts.verbosity()) + 2*i - if i > 0 { - opts.AvoidStringer = true - } - if i >= maxVerbosityPreset { - opts.PrintAddresses = true - opts.QualifiedNames = true - } - return opts -} - -// FormatDiff converts a valueNode tree into a textNode tree, where the later -// is a textual representation of the differences detected in the former. -func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out textNode) { - if opts.DiffMode == diffIdentical { - opts = opts.WithVerbosity(1) - } else if opts.verbosity() < 3 { - opts = opts.WithVerbosity(3) - } - - // Check whether we have specialized formatting for this node. - // This is not necessary, but helpful for producing more readable outputs. - if opts.CanFormatDiffSlice(v) { - return opts.FormatDiffSlice(v) - } - - var parentKind reflect.Kind - if v.parent != nil && v.parent.TransformerName == "" { - parentKind = v.parent.Type.Kind() - } - - // For leaf nodes, format the value based on the reflect.Values alone. - if v.MaxDepth == 0 { - switch opts.DiffMode { - case diffUnknown, diffIdentical: - // Format Equal. - if v.NumDiff == 0 { - outx := opts.FormatValue(v.ValueX, parentKind, ptrs) - outy := opts.FormatValue(v.ValueY, parentKind, ptrs) - if v.NumIgnored > 0 && v.NumSame == 0 { - return textEllipsis - } else if outx.Len() < outy.Len() { - return outx - } else { - return outy - } - } - - // Format unequal. - assert(opts.DiffMode == diffUnknown) - var list textList - outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, parentKind, ptrs) - outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, parentKind, ptrs) - for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ { - opts2 := verbosityPreset(opts, i).WithTypeMode(elideType) - outx = opts2.FormatValue(v.ValueX, parentKind, ptrs) - outy = opts2.FormatValue(v.ValueY, parentKind, ptrs) - } - if outx != nil { - list = append(list, textRecord{Diff: '-', Value: outx}) - } - if outy != nil { - list = append(list, textRecord{Diff: '+', Value: outy}) - } - return opts.WithTypeMode(emitType).FormatType(v.Type, list) - case diffRemoved: - return opts.FormatValue(v.ValueX, parentKind, ptrs) - case diffInserted: - return opts.FormatValue(v.ValueY, parentKind, ptrs) - default: - panic("invalid diff mode") - } - } - - // Register slice element to support cycle detection. - if parentKind == reflect.Slice { - ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, true) - defer ptrs.Pop() - defer func() { out = wrapTrunkReferences(ptrRefs, out) }() - } - - // Descend into the child value node. - if v.TransformerName != "" { - out := opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs) - out = &textWrap{Prefix: "Inverse(" + v.TransformerName + ", ", Value: out, Suffix: ")"} - return opts.FormatType(v.Type, out) - } else { - switch k := v.Type.Kind(); k { - case reflect.Struct, reflect.Array, reflect.Slice: - out = opts.formatDiffList(v.Records, k, ptrs) - out = opts.FormatType(v.Type, out) - case reflect.Map: - // Register map to support cycle detection. - ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false) - defer ptrs.Pop() - - out = opts.formatDiffList(v.Records, k, ptrs) - out = wrapTrunkReferences(ptrRefs, out) - out = opts.FormatType(v.Type, out) - case reflect.Ptr: - // Register pointer to support cycle detection. - ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false) - defer ptrs.Pop() - - out = opts.FormatDiff(v.Value, ptrs) - out = wrapTrunkReferences(ptrRefs, out) - out = &textWrap{Prefix: "&", Value: out} - case reflect.Interface: - out = opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs) - default: - panic(fmt.Sprintf("%v cannot have children", k)) - } - return out - } -} - -func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind, ptrs *pointerReferences) textNode { - // Derive record name based on the data structure kind. - var name string - var formatKey func(reflect.Value) string - switch k { - case reflect.Struct: - name = "field" - opts = opts.WithTypeMode(autoType) - formatKey = func(v reflect.Value) string { return v.String() } - case reflect.Slice, reflect.Array: - name = "element" - opts = opts.WithTypeMode(elideType) - formatKey = func(reflect.Value) string { return "" } - case reflect.Map: - name = "entry" - opts = opts.WithTypeMode(elideType) - formatKey = func(v reflect.Value) string { return formatMapKey(v, false, ptrs) } - } - - maxLen := -1 - if opts.LimitVerbosity { - if opts.DiffMode == diffIdentical { - maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... - } else { - maxLen = (1 << opts.verbosity()) << 1 // 2, 4, 8, 16, 32, 64, etc... - } - opts.VerbosityLevel-- - } - - // Handle unification. - switch opts.DiffMode { - case diffIdentical, diffRemoved, diffInserted: - var list textList - var deferredEllipsis bool // Add final "..." to indicate records were dropped - for _, r := range recs { - if len(list) == maxLen { - deferredEllipsis = true - break - } - - // Elide struct fields that are zero value. - if k == reflect.Struct { - var isZero bool - switch opts.DiffMode { - case diffIdentical: - isZero = value.IsZero(r.Value.ValueX) || value.IsZero(r.Value.ValueY) - case diffRemoved: - isZero = value.IsZero(r.Value.ValueX) - case diffInserted: - isZero = value.IsZero(r.Value.ValueY) - } - if isZero { - continue - } - } - // Elide ignored nodes. - if r.Value.NumIgnored > 0 && r.Value.NumSame+r.Value.NumDiff == 0 { - deferredEllipsis = !(k == reflect.Slice || k == reflect.Array) - if !deferredEllipsis { - list.AppendEllipsis(diffStats{}) - } - continue - } - if out := opts.FormatDiff(r.Value, ptrs); out != nil { - list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) - } - } - if deferredEllipsis { - list.AppendEllipsis(diffStats{}) - } - return &textWrap{Prefix: "{", Value: list, Suffix: "}"} - case diffUnknown: - default: - panic("invalid diff mode") - } - - // Handle differencing. - var numDiffs int - var list textList - var keys []reflect.Value // invariant: len(list) == len(keys) - groups := coalesceAdjacentRecords(name, recs) - maxGroup := diffStats{Name: name} - for i, ds := range groups { - if maxLen >= 0 && numDiffs >= maxLen { - maxGroup = maxGroup.Append(ds) - continue - } - - // Handle equal records. - if ds.NumDiff() == 0 { - // Compute the number of leading and trailing records to print. - var numLo, numHi int - numEqual := ds.NumIgnored + ds.NumIdentical - for numLo < numContextRecords && numLo+numHi < numEqual && i != 0 { - if r := recs[numLo].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 { - break - } - numLo++ - } - for numHi < numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 { - if r := recs[numEqual-numHi-1].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 { - break - } - numHi++ - } - if numEqual-(numLo+numHi) == 1 && ds.NumIgnored == 0 { - numHi++ // Avoid pointless coalescing of a single equal record - } - - // Format the equal values. - for _, r := range recs[:numLo] { - out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs) - list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) - keys = append(keys, r.Key) - } - if numEqual > numLo+numHi { - ds.NumIdentical -= numLo + numHi - list.AppendEllipsis(ds) - for len(keys) < len(list) { - keys = append(keys, reflect.Value{}) - } - } - for _, r := range recs[numEqual-numHi : numEqual] { - out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs) - list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) - keys = append(keys, r.Key) - } - recs = recs[numEqual:] - continue - } - - // Handle unequal records. - for _, r := range recs[:ds.NumDiff()] { - switch { - case opts.CanFormatDiffSlice(r.Value): - out := opts.FormatDiffSlice(r.Value) - list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) - keys = append(keys, r.Key) - case r.Value.NumChildren == r.Value.MaxDepth: - outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs) - outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs) - for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ { - opts2 := verbosityPreset(opts, i) - outx = opts2.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs) - outy = opts2.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs) - } - if outx != nil { - list = append(list, textRecord{Diff: diffRemoved, Key: formatKey(r.Key), Value: outx}) - keys = append(keys, r.Key) - } - if outy != nil { - list = append(list, textRecord{Diff: diffInserted, Key: formatKey(r.Key), Value: outy}) - keys = append(keys, r.Key) - } - default: - out := opts.FormatDiff(r.Value, ptrs) - list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) - keys = append(keys, r.Key) - } - } - recs = recs[ds.NumDiff():] - numDiffs += ds.NumDiff() - } - if maxGroup.IsZero() { - assert(len(recs) == 0) - } else { - list.AppendEllipsis(maxGroup) - for len(keys) < len(list) { - keys = append(keys, reflect.Value{}) - } - } - assert(len(list) == len(keys)) - - // For maps, the default formatting logic uses fmt.Stringer which may - // produce ambiguous output. Avoid calling String to disambiguate. - if k == reflect.Map { - var ambiguous bool - seenKeys := map[string]reflect.Value{} - for i, currKey := range keys { - if currKey.IsValid() { - strKey := list[i].Key - prevKey, seen := seenKeys[strKey] - if seen && prevKey.CanInterface() && currKey.CanInterface() { - ambiguous = prevKey.Interface() != currKey.Interface() - if ambiguous { - break - } - } - seenKeys[strKey] = currKey - } - } - if ambiguous { - for i, k := range keys { - if k.IsValid() { - list[i].Key = formatMapKey(k, true, ptrs) - } - } - } - } - - return &textWrap{Prefix: "{", Value: list, Suffix: "}"} -} - -// coalesceAdjacentRecords coalesces the list of records into groups of -// adjacent equal, or unequal counts. -func coalesceAdjacentRecords(name string, recs []reportRecord) (groups []diffStats) { - var prevCase int // Arbitrary index into which case last occurred - lastStats := func(i int) *diffStats { - if prevCase != i { - groups = append(groups, diffStats{Name: name}) - prevCase = i - } - return &groups[len(groups)-1] - } - for _, r := range recs { - switch rv := r.Value; { - case rv.NumIgnored > 0 && rv.NumSame+rv.NumDiff == 0: - lastStats(1).NumIgnored++ - case rv.NumDiff == 0: - lastStats(1).NumIdentical++ - case rv.NumDiff > 0 && !rv.ValueY.IsValid(): - lastStats(2).NumRemoved++ - case rv.NumDiff > 0 && !rv.ValueX.IsValid(): - lastStats(2).NumInserted++ - default: - lastStats(2).NumModified++ - } - } - return groups -} diff --git a/ui/wasm/vendor/github.com/google/go-cmp/cmp/report_references.go b/ui/wasm/vendor/github.com/google/go-cmp/cmp/report_references.go deleted file mode 100644 index be31b33a9e1..00000000000 --- a/ui/wasm/vendor/github.com/google/go-cmp/cmp/report_references.go +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright 2020, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmp - -import ( - "fmt" - "reflect" - "strings" - - "github.com/google/go-cmp/cmp/internal/flags" - "github.com/google/go-cmp/cmp/internal/value" -) - -const ( - pointerDelimPrefix = "⟪" - pointerDelimSuffix = "⟫" -) - -// formatPointer prints the address of the pointer. -func formatPointer(p value.Pointer, withDelims bool) string { - v := p.Uintptr() - if flags.Deterministic { - v = 0xdeadf00f // Only used for stable testing purposes - } - if withDelims { - return pointerDelimPrefix + formatHex(uint64(v)) + pointerDelimSuffix - } - return formatHex(uint64(v)) -} - -// pointerReferences is a stack of pointers visited so far. -type pointerReferences [][2]value.Pointer - -func (ps *pointerReferences) PushPair(vx, vy reflect.Value, d diffMode, deref bool) (pp [2]value.Pointer) { - if deref && vx.IsValid() { - vx = vx.Addr() - } - if deref && vy.IsValid() { - vy = vy.Addr() - } - switch d { - case diffUnknown, diffIdentical: - pp = [2]value.Pointer{value.PointerOf(vx), value.PointerOf(vy)} - case diffRemoved: - pp = [2]value.Pointer{value.PointerOf(vx), value.Pointer{}} - case diffInserted: - pp = [2]value.Pointer{value.Pointer{}, value.PointerOf(vy)} - } - *ps = append(*ps, pp) - return pp -} - -func (ps *pointerReferences) Push(v reflect.Value) (p value.Pointer, seen bool) { - p = value.PointerOf(v) - for _, pp := range *ps { - if p == pp[0] || p == pp[1] { - return p, true - } - } - *ps = append(*ps, [2]value.Pointer{p, p}) - return p, false -} - -func (ps *pointerReferences) Pop() { - *ps = (*ps)[:len(*ps)-1] -} - -// trunkReferences is metadata for a textNode indicating that the sub-tree -// represents the value for either pointer in a pair of references. -type trunkReferences struct{ pp [2]value.Pointer } - -// trunkReference is metadata for a textNode indicating that the sub-tree -// represents the value for the given pointer reference. -type trunkReference struct{ p value.Pointer } - -// leafReference is metadata for a textNode indicating that the value is -// truncated as it refers to another part of the tree (i.e., a trunk). -type leafReference struct{ p value.Pointer } - -func wrapTrunkReferences(pp [2]value.Pointer, s textNode) textNode { - switch { - case pp[0].IsNil(): - return &textWrap{Value: s, Metadata: trunkReference{pp[1]}} - case pp[1].IsNil(): - return &textWrap{Value: s, Metadata: trunkReference{pp[0]}} - case pp[0] == pp[1]: - return &textWrap{Value: s, Metadata: trunkReference{pp[0]}} - default: - return &textWrap{Value: s, Metadata: trunkReferences{pp}} - } -} -func wrapTrunkReference(p value.Pointer, printAddress bool, s textNode) textNode { - var prefix string - if printAddress { - prefix = formatPointer(p, true) - } - return &textWrap{Prefix: prefix, Value: s, Metadata: trunkReference{p}} -} -func makeLeafReference(p value.Pointer, printAddress bool) textNode { - out := &textWrap{Prefix: "(", Value: textEllipsis, Suffix: ")"} - var prefix string - if printAddress { - prefix = formatPointer(p, true) - } - return &textWrap{Prefix: prefix, Value: out, Metadata: leafReference{p}} -} - -// resolveReferences walks the textNode tree searching for any leaf reference -// metadata and resolves each against the corresponding trunk references. -// Since pointer addresses in memory are not particularly readable to the user, -// it replaces each pointer value with an arbitrary and unique reference ID. -func resolveReferences(s textNode) { - var walkNodes func(textNode, func(textNode)) - walkNodes = func(s textNode, f func(textNode)) { - f(s) - switch s := s.(type) { - case *textWrap: - walkNodes(s.Value, f) - case textList: - for _, r := range s { - walkNodes(r.Value, f) - } - } - } - - // Collect all trunks and leaves with reference metadata. - var trunks, leaves []*textWrap - walkNodes(s, func(s textNode) { - if s, ok := s.(*textWrap); ok { - switch s.Metadata.(type) { - case leafReference: - leaves = append(leaves, s) - case trunkReference, trunkReferences: - trunks = append(trunks, s) - } - } - }) - - // No leaf references to resolve. - if len(leaves) == 0 { - return - } - - // Collect the set of all leaf references to resolve. - leafPtrs := make(map[value.Pointer]bool) - for _, leaf := range leaves { - leafPtrs[leaf.Metadata.(leafReference).p] = true - } - - // Collect the set of trunk pointers that are always paired together. - // This allows us to assign a single ID to both pointers for brevity. - // If a pointer in a pair ever occurs by itself or as a different pair, - // then the pair is broken. - pairedTrunkPtrs := make(map[value.Pointer]value.Pointer) - unpair := func(p value.Pointer) { - if !pairedTrunkPtrs[p].IsNil() { - pairedTrunkPtrs[pairedTrunkPtrs[p]] = value.Pointer{} // invalidate other half - } - pairedTrunkPtrs[p] = value.Pointer{} // invalidate this half - } - for _, trunk := range trunks { - switch p := trunk.Metadata.(type) { - case trunkReference: - unpair(p.p) // standalone pointer cannot be part of a pair - case trunkReferences: - p0, ok0 := pairedTrunkPtrs[p.pp[0]] - p1, ok1 := pairedTrunkPtrs[p.pp[1]] - switch { - case !ok0 && !ok1: - // Register the newly seen pair. - pairedTrunkPtrs[p.pp[0]] = p.pp[1] - pairedTrunkPtrs[p.pp[1]] = p.pp[0] - case ok0 && ok1 && p0 == p.pp[1] && p1 == p.pp[0]: - // Exact pair already seen; do nothing. - default: - // Pair conflicts with some other pair; break all pairs. - unpair(p.pp[0]) - unpair(p.pp[1]) - } - } - } - - // Correlate each pointer referenced by leaves to a unique identifier, - // and print the IDs for each trunk that matches those pointers. - var nextID uint - ptrIDs := make(map[value.Pointer]uint) - newID := func() uint { - id := nextID - nextID++ - return id - } - for _, trunk := range trunks { - switch p := trunk.Metadata.(type) { - case trunkReference: - if print := leafPtrs[p.p]; print { - id, ok := ptrIDs[p.p] - if !ok { - id = newID() - ptrIDs[p.p] = id - } - trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id)) - } - case trunkReferences: - print0 := leafPtrs[p.pp[0]] - print1 := leafPtrs[p.pp[1]] - if print0 || print1 { - id0, ok0 := ptrIDs[p.pp[0]] - id1, ok1 := ptrIDs[p.pp[1]] - isPair := pairedTrunkPtrs[p.pp[0]] == p.pp[1] && pairedTrunkPtrs[p.pp[1]] == p.pp[0] - if isPair { - var id uint - assert(ok0 == ok1) // must be seen together or not at all - if ok0 { - assert(id0 == id1) // must have the same ID - id = id0 - } else { - id = newID() - ptrIDs[p.pp[0]] = id - ptrIDs[p.pp[1]] = id - } - trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id)) - } else { - if print0 && !ok0 { - id0 = newID() - ptrIDs[p.pp[0]] = id0 - } - if print1 && !ok1 { - id1 = newID() - ptrIDs[p.pp[1]] = id1 - } - switch { - case print0 && print1: - trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0)+","+formatReference(id1)) - case print0: - trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0)) - case print1: - trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id1)) - } - } - } - } - } - - // Update all leaf references with the unique identifier. - for _, leaf := range leaves { - if id, ok := ptrIDs[leaf.Metadata.(leafReference).p]; ok { - leaf.Prefix = updateReferencePrefix(leaf.Prefix, formatReference(id)) - } - } -} - -func formatReference(id uint) string { - return fmt.Sprintf("ref#%d", id) -} - -func updateReferencePrefix(prefix, ref string) string { - if prefix == "" { - return pointerDelimPrefix + ref + pointerDelimSuffix - } - suffix := strings.TrimPrefix(prefix, pointerDelimPrefix) - return pointerDelimPrefix + ref + ": " + suffix -} diff --git a/ui/wasm/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/ui/wasm/vendor/github.com/google/go-cmp/cmp/report_reflect.go deleted file mode 100644 index 33f03577f98..00000000000 --- a/ui/wasm/vendor/github.com/google/go-cmp/cmp/report_reflect.go +++ /dev/null @@ -1,402 +0,0 @@ -// Copyright 2019, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmp - -import ( - "bytes" - "fmt" - "reflect" - "strconv" - "strings" - "unicode" - "unicode/utf8" - - "github.com/google/go-cmp/cmp/internal/value" -) - -type formatValueOptions struct { - // AvoidStringer controls whether to avoid calling custom stringer - // methods like error.Error or fmt.Stringer.String. - AvoidStringer bool - - // PrintAddresses controls whether to print the address of all pointers, - // slice elements, and maps. - PrintAddresses bool - - // QualifiedNames controls whether FormatType uses the fully qualified name - // (including the full package path as opposed to just the package name). - QualifiedNames bool - - // VerbosityLevel controls the amount of output to produce. - // A higher value produces more output. A value of zero or lower produces - // no output (represented using an ellipsis). - // If LimitVerbosity is false, then the level is treated as infinite. - VerbosityLevel int - - // LimitVerbosity specifies that formatting should respect VerbosityLevel. - LimitVerbosity bool -} - -// FormatType prints the type as if it were wrapping s. -// This may return s as-is depending on the current type and TypeMode mode. -func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode { - // Check whether to emit the type or not. - switch opts.TypeMode { - case autoType: - switch t.Kind() { - case reflect.Struct, reflect.Slice, reflect.Array, reflect.Map: - if s.Equal(textNil) { - return s - } - default: - return s - } - if opts.DiffMode == diffIdentical { - return s // elide type for identical nodes - } - case elideType: - return s - } - - // Determine the type label, applying special handling for unnamed types. - typeName := value.TypeString(t, opts.QualifiedNames) - if t.Name() == "" { - // According to Go grammar, certain type literals contain symbols that - // do not strongly bind to the next lexicographical token (e.g., *T). - switch t.Kind() { - case reflect.Chan, reflect.Func, reflect.Ptr: - typeName = "(" + typeName + ")" - } - } - return &textWrap{Prefix: typeName, Value: wrapParens(s)} -} - -// wrapParens wraps s with a set of parenthesis, but avoids it if the -// wrapped node itself is already surrounded by a pair of parenthesis or braces. -// It handles unwrapping one level of pointer-reference nodes. -func wrapParens(s textNode) textNode { - var refNode *textWrap - if s2, ok := s.(*textWrap); ok { - // Unwrap a single pointer reference node. - switch s2.Metadata.(type) { - case leafReference, trunkReference, trunkReferences: - refNode = s2 - if s3, ok := refNode.Value.(*textWrap); ok { - s2 = s3 - } - } - - // Already has delimiters that make parenthesis unnecessary. - hasParens := strings.HasPrefix(s2.Prefix, "(") && strings.HasSuffix(s2.Suffix, ")") - hasBraces := strings.HasPrefix(s2.Prefix, "{") && strings.HasSuffix(s2.Suffix, "}") - if hasParens || hasBraces { - return s - } - } - if refNode != nil { - refNode.Value = &textWrap{Prefix: "(", Value: refNode.Value, Suffix: ")"} - return s - } - return &textWrap{Prefix: "(", Value: s, Suffix: ")"} -} - -// FormatValue prints the reflect.Value, taking extra care to avoid descending -// into pointers already in ptrs. As pointers are visited, ptrs is also updated. -func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, ptrs *pointerReferences) (out textNode) { - if !v.IsValid() { - return nil - } - t := v.Type() - - // Check slice element for cycles. - if parentKind == reflect.Slice { - ptrRef, visited := ptrs.Push(v.Addr()) - if visited { - return makeLeafReference(ptrRef, false) - } - defer ptrs.Pop() - defer func() { out = wrapTrunkReference(ptrRef, false, out) }() - } - - // Check whether there is an Error or String method to call. - if !opts.AvoidStringer && v.CanInterface() { - // Avoid calling Error or String methods on nil receivers since many - // implementations crash when doing so. - if (t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface) || !v.IsNil() { - var prefix, strVal string - func() { - // Swallow and ignore any panics from String or Error. - defer func() { recover() }() - switch v := v.Interface().(type) { - case error: - strVal = v.Error() - prefix = "e" - case fmt.Stringer: - strVal = v.String() - prefix = "s" - } - }() - if prefix != "" { - return opts.formatString(prefix, strVal) - } - } - } - - // Check whether to explicitly wrap the result with the type. - var skipType bool - defer func() { - if !skipType { - out = opts.FormatType(t, out) - } - }() - - switch t.Kind() { - case reflect.Bool: - return textLine(fmt.Sprint(v.Bool())) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return textLine(fmt.Sprint(v.Int())) - case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return textLine(fmt.Sprint(v.Uint())) - case reflect.Uint8: - if parentKind == reflect.Slice || parentKind == reflect.Array { - return textLine(formatHex(v.Uint())) - } - return textLine(fmt.Sprint(v.Uint())) - case reflect.Uintptr: - return textLine(formatHex(v.Uint())) - case reflect.Float32, reflect.Float64: - return textLine(fmt.Sprint(v.Float())) - case reflect.Complex64, reflect.Complex128: - return textLine(fmt.Sprint(v.Complex())) - case reflect.String: - return opts.formatString("", v.String()) - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - return textLine(formatPointer(value.PointerOf(v), true)) - case reflect.Struct: - var list textList - v := makeAddressable(v) // needed for retrieveUnexportedField - maxLen := v.NumField() - if opts.LimitVerbosity { - maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... - opts.VerbosityLevel-- - } - for i := 0; i < v.NumField(); i++ { - vv := v.Field(i) - if value.IsZero(vv) { - continue // Elide fields with zero values - } - if len(list) == maxLen { - list.AppendEllipsis(diffStats{}) - break - } - sf := t.Field(i) - if supportExporters && !isExported(sf.Name) { - vv = retrieveUnexportedField(v, sf, true) - } - s := opts.WithTypeMode(autoType).FormatValue(vv, t.Kind(), ptrs) - list = append(list, textRecord{Key: sf.Name, Value: s}) - } - return &textWrap{Prefix: "{", Value: list, Suffix: "}"} - case reflect.Slice: - if v.IsNil() { - return textNil - } - - // Check whether this is a []byte of text data. - if t.Elem() == reflect.TypeOf(byte(0)) { - b := v.Bytes() - isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) && unicode.IsSpace(r) } - if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 { - out = opts.formatString("", string(b)) - return opts.WithTypeMode(emitType).FormatType(t, out) - } - } - - fallthrough - case reflect.Array: - maxLen := v.Len() - if opts.LimitVerbosity { - maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... - opts.VerbosityLevel-- - } - var list textList - for i := 0; i < v.Len(); i++ { - if len(list) == maxLen { - list.AppendEllipsis(diffStats{}) - break - } - s := opts.WithTypeMode(elideType).FormatValue(v.Index(i), t.Kind(), ptrs) - list = append(list, textRecord{Value: s}) - } - - out = &textWrap{Prefix: "{", Value: list, Suffix: "}"} - if t.Kind() == reflect.Slice && opts.PrintAddresses { - header := fmt.Sprintf("ptr:%v, len:%d, cap:%d", formatPointer(value.PointerOf(v), false), v.Len(), v.Cap()) - out = &textWrap{Prefix: pointerDelimPrefix + header + pointerDelimSuffix, Value: out} - } - return out - case reflect.Map: - if v.IsNil() { - return textNil - } - - // Check pointer for cycles. - ptrRef, visited := ptrs.Push(v) - if visited { - return makeLeafReference(ptrRef, opts.PrintAddresses) - } - defer ptrs.Pop() - - maxLen := v.Len() - if opts.LimitVerbosity { - maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc... - opts.VerbosityLevel-- - } - var list textList - for _, k := range value.SortKeys(v.MapKeys()) { - if len(list) == maxLen { - list.AppendEllipsis(diffStats{}) - break - } - sk := formatMapKey(k, false, ptrs) - sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), t.Kind(), ptrs) - list = append(list, textRecord{Key: sk, Value: sv}) - } - - out = &textWrap{Prefix: "{", Value: list, Suffix: "}"} - out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out) - return out - case reflect.Ptr: - if v.IsNil() { - return textNil - } - - // Check pointer for cycles. - ptrRef, visited := ptrs.Push(v) - if visited { - out = makeLeafReference(ptrRef, opts.PrintAddresses) - return &textWrap{Prefix: "&", Value: out} - } - defer ptrs.Pop() - - skipType = true // Let the underlying value print the type instead - out = opts.FormatValue(v.Elem(), t.Kind(), ptrs) - out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out) - out = &textWrap{Prefix: "&", Value: out} - return out - case reflect.Interface: - if v.IsNil() { - return textNil - } - // Interfaces accept different concrete types, - // so configure the underlying value to explicitly print the type. - skipType = true // Print the concrete type instead - return opts.WithTypeMode(emitType).FormatValue(v.Elem(), t.Kind(), ptrs) - default: - panic(fmt.Sprintf("%v kind not handled", v.Kind())) - } -} - -func (opts formatOptions) formatString(prefix, s string) textNode { - maxLen := len(s) - maxLines := strings.Count(s, "\n") + 1 - if opts.LimitVerbosity { - maxLen = (1 << opts.verbosity()) << 5 // 32, 64, 128, 256, etc... - maxLines = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc... - } - - // For multiline strings, use the triple-quote syntax, - // but only use it when printing removed or inserted nodes since - // we only want the extra verbosity for those cases. - lines := strings.Split(strings.TrimSuffix(s, "\n"), "\n") - isTripleQuoted := len(lines) >= 4 && (opts.DiffMode == '-' || opts.DiffMode == '+') - for i := 0; i < len(lines) && isTripleQuoted; i++ { - lines[i] = strings.TrimPrefix(strings.TrimSuffix(lines[i], "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support - isPrintable := func(r rune) bool { - return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable - } - line := lines[i] - isTripleQuoted = !strings.HasPrefix(strings.TrimPrefix(line, prefix), `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == "" && len(line) <= maxLen - } - if isTripleQuoted { - var list textList - list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true}) - for i, line := range lines { - if numElided := len(lines) - i; i == maxLines-1 && numElided > 1 { - comment := commentString(fmt.Sprintf("%d elided lines", numElided)) - list = append(list, textRecord{Diff: opts.DiffMode, Value: textEllipsis, ElideComma: true, Comment: comment}) - break - } - list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(line), ElideComma: true}) - } - list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true}) - return &textWrap{Prefix: "(", Value: list, Suffix: ")"} - } - - // Format the string as a single-line quoted string. - if len(s) > maxLen+len(textEllipsis) { - return textLine(prefix + formatString(s[:maxLen]) + string(textEllipsis)) - } - return textLine(prefix + formatString(s)) -} - -// formatMapKey formats v as if it were a map key. -// The result is guaranteed to be a single line. -func formatMapKey(v reflect.Value, disambiguate bool, ptrs *pointerReferences) string { - var opts formatOptions - opts.DiffMode = diffIdentical - opts.TypeMode = elideType - opts.PrintAddresses = disambiguate - opts.AvoidStringer = disambiguate - opts.QualifiedNames = disambiguate - opts.VerbosityLevel = maxVerbosityPreset - opts.LimitVerbosity = true - s := opts.FormatValue(v, reflect.Map, ptrs).String() - return strings.TrimSpace(s) -} - -// formatString prints s as a double-quoted or backtick-quoted string. -func formatString(s string) string { - // Use quoted string if it the same length as a raw string literal. - // Otherwise, attempt to use the raw string form. - qs := strconv.Quote(s) - if len(qs) == 1+len(s)+1 { - return qs - } - - // Disallow newlines to ensure output is a single line. - // Only allow printable runes for readability purposes. - rawInvalid := func(r rune) bool { - return r == '`' || r == '\n' || !(unicode.IsPrint(r) || r == '\t') - } - if utf8.ValidString(s) && strings.IndexFunc(s, rawInvalid) < 0 { - return "`" + s + "`" - } - return qs -} - -// formatHex prints u as a hexadecimal integer in Go notation. -func formatHex(u uint64) string { - var f string - switch { - case u <= 0xff: - f = "0x%02x" - case u <= 0xffff: - f = "0x%04x" - case u <= 0xffffff: - f = "0x%06x" - case u <= 0xffffffff: - f = "0x%08x" - case u <= 0xffffffffff: - f = "0x%010x" - case u <= 0xffffffffffff: - f = "0x%012x" - case u <= 0xffffffffffffff: - f = "0x%014x" - case u <= 0xffffffffffffffff: - f = "0x%016x" - } - return fmt.Sprintf(f, u) -} diff --git a/ui/wasm/vendor/github.com/google/go-cmp/cmp/report_slices.go b/ui/wasm/vendor/github.com/google/go-cmp/cmp/report_slices.go deleted file mode 100644 index 2ad3bc85ba8..00000000000 --- a/ui/wasm/vendor/github.com/google/go-cmp/cmp/report_slices.go +++ /dev/null @@ -1,613 +0,0 @@ -// Copyright 2019, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmp - -import ( - "bytes" - "fmt" - "math" - "reflect" - "strconv" - "strings" - "unicode" - "unicode/utf8" - - "github.com/google/go-cmp/cmp/internal/diff" -) - -// CanFormatDiffSlice reports whether we support custom formatting for nodes -// that are slices of primitive kinds or strings. -func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { - switch { - case opts.DiffMode != diffUnknown: - return false // Must be formatting in diff mode - case v.NumDiff == 0: - return false // No differences detected - case !v.ValueX.IsValid() || !v.ValueY.IsValid(): - return false // Both values must be valid - case v.NumIgnored > 0: - return false // Some ignore option was used - case v.NumTransformed > 0: - return false // Some transform option was used - case v.NumCompared > 1: - return false // More than one comparison was used - case v.NumCompared == 1 && v.Type.Name() != "": - // The need for cmp to check applicability of options on every element - // in a slice is a significant performance detriment for large []byte. - // The workaround is to specify Comparer(bytes.Equal), - // which enables cmp to compare []byte more efficiently. - // If they differ, we still want to provide batched diffing. - // The logic disallows named types since they tend to have their own - // String method, with nicer formatting than what this provides. - return false - } - - // Check whether this is an interface with the same concrete types. - t := v.Type - vx, vy := v.ValueX, v.ValueY - if t.Kind() == reflect.Interface && !vx.IsNil() && !vy.IsNil() && vx.Elem().Type() == vy.Elem().Type() { - vx, vy = vx.Elem(), vy.Elem() - t = vx.Type() - } - - // Check whether we provide specialized diffing for this type. - switch t.Kind() { - case reflect.String: - case reflect.Array, reflect.Slice: - // Only slices of primitive types have specialized handling. - switch t.Elem().Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, - reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: - default: - return false - } - - // Both slice values have to be non-empty. - if t.Kind() == reflect.Slice && (vx.Len() == 0 || vy.Len() == 0) { - return false - } - - // If a sufficient number of elements already differ, - // use specialized formatting even if length requirement is not met. - if v.NumDiff > v.NumSame { - return true - } - default: - return false - } - - // Use specialized string diffing for longer slices or strings. - const minLength = 64 - return vx.Len() >= minLength && vy.Len() >= minLength -} - -// FormatDiffSlice prints a diff for the slices (or strings) represented by v. -// This provides custom-tailored logic to make printing of differences in -// textual strings and slices of primitive kinds more readable. -func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { - assert(opts.DiffMode == diffUnknown) - t, vx, vy := v.Type, v.ValueX, v.ValueY - if t.Kind() == reflect.Interface { - vx, vy = vx.Elem(), vy.Elem() - t = vx.Type() - opts = opts.WithTypeMode(emitType) - } - - // Auto-detect the type of the data. - var sx, sy string - var ssx, ssy []string - var isString, isMostlyText, isPureLinedText, isBinary bool - switch { - case t.Kind() == reflect.String: - sx, sy = vx.String(), vy.String() - isString = true - case t.Kind() == reflect.Slice && t.Elem() == reflect.TypeOf(byte(0)): - sx, sy = string(vx.Bytes()), string(vy.Bytes()) - isString = true - case t.Kind() == reflect.Array: - // Arrays need to be addressable for slice operations to work. - vx2, vy2 := reflect.New(t).Elem(), reflect.New(t).Elem() - vx2.Set(vx) - vy2.Set(vy) - vx, vy = vx2, vy2 - } - if isString { - var numTotalRunes, numValidRunes, numLines, lastLineIdx, maxLineLen int - for i, r := range sx + sy { - numTotalRunes++ - if (unicode.IsPrint(r) || unicode.IsSpace(r)) && r != utf8.RuneError { - numValidRunes++ - } - if r == '\n' { - if maxLineLen < i-lastLineIdx { - maxLineLen = i - lastLineIdx - } - lastLineIdx = i + 1 - numLines++ - } - } - isPureText := numValidRunes == numTotalRunes - isMostlyText = float64(numValidRunes) > math.Floor(0.90*float64(numTotalRunes)) - isPureLinedText = isPureText && numLines >= 4 && maxLineLen <= 1024 - isBinary = !isMostlyText - - // Avoid diffing by lines if it produces a significantly more complex - // edit script than diffing by bytes. - if isPureLinedText { - ssx = strings.Split(sx, "\n") - ssy = strings.Split(sy, "\n") - esLines := diff.Difference(len(ssx), len(ssy), func(ix, iy int) diff.Result { - return diff.BoolResult(ssx[ix] == ssy[iy]) - }) - esBytes := diff.Difference(len(sx), len(sy), func(ix, iy int) diff.Result { - return diff.BoolResult(sx[ix] == sy[iy]) - }) - efficiencyLines := float64(esLines.Dist()) / float64(len(esLines)) - efficiencyBytes := float64(esBytes.Dist()) / float64(len(esBytes)) - isPureLinedText = efficiencyLines < 4*efficiencyBytes - } - } - - // Format the string into printable records. - var list textList - var delim string - switch { - // If the text appears to be multi-lined text, - // then perform differencing across individual lines. - case isPureLinedText: - list = opts.formatDiffSlice( - reflect.ValueOf(ssx), reflect.ValueOf(ssy), 1, "line", - func(v reflect.Value, d diffMode) textRecord { - s := formatString(v.Index(0).String()) - return textRecord{Diff: d, Value: textLine(s)} - }, - ) - delim = "\n" - - // If possible, use a custom triple-quote (""") syntax for printing - // differences in a string literal. This format is more readable, - // but has edge-cases where differences are visually indistinguishable. - // This format is avoided under the following conditions: - // • A line starts with `"""` - // • A line starts with "..." - // • A line contains non-printable characters - // • Adjacent different lines differ only by whitespace - // - // For example: - // """ - // ... // 3 identical lines - // foo - // bar - // - baz - // + BAZ - // """ - isTripleQuoted := true - prevRemoveLines := map[string]bool{} - prevInsertLines := map[string]bool{} - var list2 textList - list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true}) - for _, r := range list { - if !r.Value.Equal(textEllipsis) { - line, _ := strconv.Unquote(string(r.Value.(textLine))) - line = strings.TrimPrefix(strings.TrimSuffix(line, "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support - normLine := strings.Map(func(r rune) rune { - if unicode.IsSpace(r) { - return -1 // drop whitespace to avoid visually indistinguishable output - } - return r - }, line) - isPrintable := func(r rune) bool { - return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable - } - isTripleQuoted = !strings.HasPrefix(line, `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == "" - switch r.Diff { - case diffRemoved: - isTripleQuoted = isTripleQuoted && !prevInsertLines[normLine] - prevRemoveLines[normLine] = true - case diffInserted: - isTripleQuoted = isTripleQuoted && !prevRemoveLines[normLine] - prevInsertLines[normLine] = true - } - if !isTripleQuoted { - break - } - r.Value = textLine(line) - r.ElideComma = true - } - if !(r.Diff == diffRemoved || r.Diff == diffInserted) { // start a new non-adjacent difference group - prevRemoveLines = map[string]bool{} - prevInsertLines = map[string]bool{} - } - list2 = append(list2, r) - } - if r := list2[len(list2)-1]; r.Diff == diffIdentical && len(r.Value.(textLine)) == 0 { - list2 = list2[:len(list2)-1] // elide single empty line at the end - } - list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true}) - if isTripleQuoted { - var out textNode = &textWrap{Prefix: "(", Value: list2, Suffix: ")"} - switch t.Kind() { - case reflect.String: - if t != reflect.TypeOf(string("")) { - out = opts.FormatType(t, out) - } - case reflect.Slice: - // Always emit type for slices since the triple-quote syntax - // looks like a string (not a slice). - opts = opts.WithTypeMode(emitType) - out = opts.FormatType(t, out) - } - return out - } - - // If the text appears to be single-lined text, - // then perform differencing in approximately fixed-sized chunks. - // The output is printed as quoted strings. - case isMostlyText: - list = opts.formatDiffSlice( - reflect.ValueOf(sx), reflect.ValueOf(sy), 64, "byte", - func(v reflect.Value, d diffMode) textRecord { - s := formatString(v.String()) - return textRecord{Diff: d, Value: textLine(s)} - }, - ) - - // If the text appears to be binary data, - // then perform differencing in approximately fixed-sized chunks. - // The output is inspired by hexdump. - case isBinary: - list = opts.formatDiffSlice( - reflect.ValueOf(sx), reflect.ValueOf(sy), 16, "byte", - func(v reflect.Value, d diffMode) textRecord { - var ss []string - for i := 0; i < v.Len(); i++ { - ss = append(ss, formatHex(v.Index(i).Uint())) - } - s := strings.Join(ss, ", ") - comment := commentString(fmt.Sprintf("%c|%v|", d, formatASCII(v.String()))) - return textRecord{Diff: d, Value: textLine(s), Comment: comment} - }, - ) - - // For all other slices of primitive types, - // then perform differencing in approximately fixed-sized chunks. - // The size of each chunk depends on the width of the element kind. - default: - var chunkSize int - if t.Elem().Kind() == reflect.Bool { - chunkSize = 16 - } else { - switch t.Elem().Bits() { - case 8: - chunkSize = 16 - case 16: - chunkSize = 12 - case 32: - chunkSize = 8 - default: - chunkSize = 8 - } - } - list = opts.formatDiffSlice( - vx, vy, chunkSize, t.Elem().Kind().String(), - func(v reflect.Value, d diffMode) textRecord { - var ss []string - for i := 0; i < v.Len(); i++ { - switch t.Elem().Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - ss = append(ss, fmt.Sprint(v.Index(i).Int())) - case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: - ss = append(ss, fmt.Sprint(v.Index(i).Uint())) - case reflect.Uint8, reflect.Uintptr: - ss = append(ss, formatHex(v.Index(i).Uint())) - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: - ss = append(ss, fmt.Sprint(v.Index(i).Interface())) - } - } - s := strings.Join(ss, ", ") - return textRecord{Diff: d, Value: textLine(s)} - }, - ) - } - - // Wrap the output with appropriate type information. - var out textNode = &textWrap{Prefix: "{", Value: list, Suffix: "}"} - if !isMostlyText { - // The "{...}" byte-sequence literal is not valid Go syntax for strings. - // Emit the type for extra clarity (e.g. "string{...}"). - if t.Kind() == reflect.String { - opts = opts.WithTypeMode(emitType) - } - return opts.FormatType(t, out) - } - switch t.Kind() { - case reflect.String: - out = &textWrap{Prefix: "strings.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)} - if t != reflect.TypeOf(string("")) { - out = opts.FormatType(t, out) - } - case reflect.Slice: - out = &textWrap{Prefix: "bytes.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)} - if t != reflect.TypeOf([]byte(nil)) { - out = opts.FormatType(t, out) - } - } - return out -} - -// formatASCII formats s as an ASCII string. -// This is useful for printing binary strings in a semi-legible way. -func formatASCII(s string) string { - b := bytes.Repeat([]byte{'.'}, len(s)) - for i := 0; i < len(s); i++ { - if ' ' <= s[i] && s[i] <= '~' { - b[i] = s[i] - } - } - return string(b) -} - -func (opts formatOptions) formatDiffSlice( - vx, vy reflect.Value, chunkSize int, name string, - makeRec func(reflect.Value, diffMode) textRecord, -) (list textList) { - eq := func(ix, iy int) bool { - return vx.Index(ix).Interface() == vy.Index(iy).Interface() - } - es := diff.Difference(vx.Len(), vy.Len(), func(ix, iy int) diff.Result { - return diff.BoolResult(eq(ix, iy)) - }) - - appendChunks := func(v reflect.Value, d diffMode) int { - n0 := v.Len() - for v.Len() > 0 { - n := chunkSize - if n > v.Len() { - n = v.Len() - } - list = append(list, makeRec(v.Slice(0, n), d)) - v = v.Slice(n, v.Len()) - } - return n0 - v.Len() - } - - var numDiffs int - maxLen := -1 - if opts.LimitVerbosity { - maxLen = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc... - opts.VerbosityLevel-- - } - - groups := coalesceAdjacentEdits(name, es) - groups = coalesceInterveningIdentical(groups, chunkSize/4) - groups = cleanupSurroundingIdentical(groups, eq) - maxGroup := diffStats{Name: name} - for i, ds := range groups { - if maxLen >= 0 && numDiffs >= maxLen { - maxGroup = maxGroup.Append(ds) - continue - } - - // Print equal. - if ds.NumDiff() == 0 { - // Compute the number of leading and trailing equal bytes to print. - var numLo, numHi int - numEqual := ds.NumIgnored + ds.NumIdentical - for numLo < chunkSize*numContextRecords && numLo+numHi < numEqual && i != 0 { - numLo++ - } - for numHi < chunkSize*numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 { - numHi++ - } - if numEqual-(numLo+numHi) <= chunkSize && ds.NumIgnored == 0 { - numHi = numEqual - numLo // Avoid pointless coalescing of single equal row - } - - // Print the equal bytes. - appendChunks(vx.Slice(0, numLo), diffIdentical) - if numEqual > numLo+numHi { - ds.NumIdentical -= numLo + numHi - list.AppendEllipsis(ds) - } - appendChunks(vx.Slice(numEqual-numHi, numEqual), diffIdentical) - vx = vx.Slice(numEqual, vx.Len()) - vy = vy.Slice(numEqual, vy.Len()) - continue - } - - // Print unequal. - len0 := len(list) - nx := appendChunks(vx.Slice(0, ds.NumIdentical+ds.NumRemoved+ds.NumModified), diffRemoved) - vx = vx.Slice(nx, vx.Len()) - ny := appendChunks(vy.Slice(0, ds.NumIdentical+ds.NumInserted+ds.NumModified), diffInserted) - vy = vy.Slice(ny, vy.Len()) - numDiffs += len(list) - len0 - } - if maxGroup.IsZero() { - assert(vx.Len() == 0 && vy.Len() == 0) - } else { - list.AppendEllipsis(maxGroup) - } - return list -} - -// coalesceAdjacentEdits coalesces the list of edits into groups of adjacent -// equal or unequal counts. -// -// Example: -// -// Input: "..XXY...Y" -// Output: [ -// {NumIdentical: 2}, -// {NumRemoved: 2, NumInserted 1}, -// {NumIdentical: 3}, -// {NumInserted: 1}, -// ] -// -func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) { - var prevMode byte - lastStats := func(mode byte) *diffStats { - if prevMode != mode { - groups = append(groups, diffStats{Name: name}) - prevMode = mode - } - return &groups[len(groups)-1] - } - for _, e := range es { - switch e { - case diff.Identity: - lastStats('=').NumIdentical++ - case diff.UniqueX: - lastStats('!').NumRemoved++ - case diff.UniqueY: - lastStats('!').NumInserted++ - case diff.Modified: - lastStats('!').NumModified++ - } - } - return groups -} - -// coalesceInterveningIdentical coalesces sufficiently short (<= windowSize) -// equal groups into adjacent unequal groups that currently result in a -// dual inserted/removed printout. This acts as a high-pass filter to smooth -// out high-frequency changes within the windowSize. -// -// Example: -// -// WindowSize: 16, -// Input: [ -// {NumIdentical: 61}, // group 0 -// {NumRemoved: 3, NumInserted: 1}, // group 1 -// {NumIdentical: 6}, // ├── coalesce -// {NumInserted: 2}, // ├── coalesce -// {NumIdentical: 1}, // ├── coalesce -// {NumRemoved: 9}, // └── coalesce -// {NumIdentical: 64}, // group 2 -// {NumRemoved: 3, NumInserted: 1}, // group 3 -// {NumIdentical: 6}, // ├── coalesce -// {NumInserted: 2}, // ├── coalesce -// {NumIdentical: 1}, // ├── coalesce -// {NumRemoved: 7}, // ├── coalesce -// {NumIdentical: 1}, // ├── coalesce -// {NumRemoved: 2}, // └── coalesce -// {NumIdentical: 63}, // group 4 -// ] -// Output: [ -// {NumIdentical: 61}, -// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3}, -// {NumIdentical: 64}, -// {NumIdentical: 8, NumRemoved: 12, NumInserted: 3}, -// {NumIdentical: 63}, -// ] -// -func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats { - groups, groupsOrig := groups[:0], groups - for i, ds := range groupsOrig { - if len(groups) >= 2 && ds.NumDiff() > 0 { - prev := &groups[len(groups)-2] // Unequal group - curr := &groups[len(groups)-1] // Equal group - next := &groupsOrig[i] // Unequal group - hadX, hadY := prev.NumRemoved > 0, prev.NumInserted > 0 - hasX, hasY := next.NumRemoved > 0, next.NumInserted > 0 - if ((hadX || hasX) && (hadY || hasY)) && curr.NumIdentical <= windowSize { - *prev = prev.Append(*curr).Append(*next) - groups = groups[:len(groups)-1] // Truncate off equal group - continue - } - } - groups = append(groups, ds) - } - return groups -} - -// cleanupSurroundingIdentical scans through all unequal groups, and -// moves any leading sequence of equal elements to the preceding equal group and -// moves and trailing sequence of equal elements to the succeeding equal group. -// -// This is necessary since coalesceInterveningIdentical may coalesce edit groups -// together such that leading/trailing spans of equal elements becomes possible. -// Note that this can occur even with an optimal diffing algorithm. -// -// Example: -// -// Input: [ -// {NumIdentical: 61}, -// {NumIdentical: 1 , NumRemoved: 11, NumInserted: 2}, // assume 3 leading identical elements -// {NumIdentical: 67}, -// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3}, // assume 10 trailing identical elements -// {NumIdentical: 54}, -// ] -// Output: [ -// {NumIdentical: 64}, // incremented by 3 -// {NumRemoved: 9}, -// {NumIdentical: 67}, -// {NumRemoved: 9}, -// {NumIdentical: 64}, // incremented by 10 -// ] -// -func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []diffStats { - var ix, iy int // indexes into sequence x and y - for i, ds := range groups { - // Handle equal group. - if ds.NumDiff() == 0 { - ix += ds.NumIdentical - iy += ds.NumIdentical - continue - } - - // Handle unequal group. - nx := ds.NumIdentical + ds.NumRemoved + ds.NumModified - ny := ds.NumIdentical + ds.NumInserted + ds.NumModified - var numLeadingIdentical, numTrailingIdentical int - for i := 0; i < nx && i < ny && eq(ix+i, iy+i); i++ { - numLeadingIdentical++ - } - for i := 0; i < nx && i < ny && eq(ix+nx-1-i, iy+ny-1-i); i++ { - numTrailingIdentical++ - } - if numIdentical := numLeadingIdentical + numTrailingIdentical; numIdentical > 0 { - if numLeadingIdentical > 0 { - // Remove leading identical span from this group and - // insert it into the preceding group. - if i-1 >= 0 { - groups[i-1].NumIdentical += numLeadingIdentical - } else { - // No preceding group exists, so prepend a new group, - // but do so after we finish iterating over all groups. - defer func() { - groups = append([]diffStats{{Name: groups[0].Name, NumIdentical: numLeadingIdentical}}, groups...) - }() - } - // Increment indexes since the preceding group would have handled this. - ix += numLeadingIdentical - iy += numLeadingIdentical - } - if numTrailingIdentical > 0 { - // Remove trailing identical span from this group and - // insert it into the succeeding group. - if i+1 < len(groups) { - groups[i+1].NumIdentical += numTrailingIdentical - } else { - // No succeeding group exists, so append a new group, - // but do so after we finish iterating over all groups. - defer func() { - groups = append(groups, diffStats{Name: groups[len(groups)-1].Name, NumIdentical: numTrailingIdentical}) - }() - } - // Do not increment indexes since the succeeding group will handle this. - } - - // Update this group since some identical elements were removed. - nx -= numIdentical - ny -= numIdentical - groups[i] = diffStats{Name: ds.Name, NumRemoved: nx, NumInserted: ny} - } - ix += nx - iy += ny - } - return groups -} diff --git a/ui/wasm/vendor/github.com/google/go-cmp/cmp/report_text.go b/ui/wasm/vendor/github.com/google/go-cmp/cmp/report_text.go deleted file mode 100644 index 0fd46d7ffb6..00000000000 --- a/ui/wasm/vendor/github.com/google/go-cmp/cmp/report_text.go +++ /dev/null @@ -1,431 +0,0 @@ -// Copyright 2019, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmp - -import ( - "bytes" - "fmt" - "math/rand" - "strings" - "time" - "unicode/utf8" - - "github.com/google/go-cmp/cmp/internal/flags" -) - -var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0 - -const maxColumnLength = 80 - -type indentMode int - -func (n indentMode) appendIndent(b []byte, d diffMode) []byte { - // The output of Diff is documented as being unstable to provide future - // flexibility in changing the output for more humanly readable reports. - // This logic intentionally introduces instability to the exact output - // so that users can detect accidental reliance on stability early on, - // rather than much later when an actual change to the format occurs. - if flags.Deterministic || randBool { - // Use regular spaces (U+0020). - switch d { - case diffUnknown, diffIdentical: - b = append(b, " "...) - case diffRemoved: - b = append(b, "- "...) - case diffInserted: - b = append(b, "+ "...) - } - } else { - // Use non-breaking spaces (U+00a0). - switch d { - case diffUnknown, diffIdentical: - b = append(b, "  "...) - case diffRemoved: - b = append(b, "- "...) - case diffInserted: - b = append(b, "+ "...) - } - } - return repeatCount(n).appendChar(b, '\t') -} - -type repeatCount int - -func (n repeatCount) appendChar(b []byte, c byte) []byte { - for ; n > 0; n-- { - b = append(b, c) - } - return b -} - -// textNode is a simplified tree-based representation of structured text. -// Possible node types are textWrap, textList, or textLine. -type textNode interface { - // Len reports the length in bytes of a single-line version of the tree. - // Nested textRecord.Diff and textRecord.Comment fields are ignored. - Len() int - // Equal reports whether the two trees are structurally identical. - // Nested textRecord.Diff and textRecord.Comment fields are compared. - Equal(textNode) bool - // String returns the string representation of the text tree. - // It is not guaranteed that len(x.String()) == x.Len(), - // nor that x.String() == y.String() implies that x.Equal(y). - String() string - - // formatCompactTo formats the contents of the tree as a single-line string - // to the provided buffer. Any nested textRecord.Diff and textRecord.Comment - // fields are ignored. - // - // However, not all nodes in the tree should be collapsed as a single-line. - // If a node can be collapsed as a single-line, it is replaced by a textLine - // node. Since the top-level node cannot replace itself, this also returns - // the current node itself. - // - // This does not mutate the receiver. - formatCompactTo([]byte, diffMode) ([]byte, textNode) - // formatExpandedTo formats the contents of the tree as a multi-line string - // to the provided buffer. In order for column alignment to operate well, - // formatCompactTo must be called before calling formatExpandedTo. - formatExpandedTo([]byte, diffMode, indentMode) []byte -} - -// textWrap is a wrapper that concatenates a prefix and/or a suffix -// to the underlying node. -type textWrap struct { - Prefix string // e.g., "bytes.Buffer{" - Value textNode // textWrap | textList | textLine - Suffix string // e.g., "}" - Metadata interface{} // arbitrary metadata; has no effect on formatting -} - -func (s *textWrap) Len() int { - return len(s.Prefix) + s.Value.Len() + len(s.Suffix) -} -func (s1 *textWrap) Equal(s2 textNode) bool { - if s2, ok := s2.(*textWrap); ok { - return s1.Prefix == s2.Prefix && s1.Value.Equal(s2.Value) && s1.Suffix == s2.Suffix - } - return false -} -func (s *textWrap) String() string { - var d diffMode - var n indentMode - _, s2 := s.formatCompactTo(nil, d) - b := n.appendIndent(nil, d) // Leading indent - b = s2.formatExpandedTo(b, d, n) // Main body - b = append(b, '\n') // Trailing newline - return string(b) -} -func (s *textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { - n0 := len(b) // Original buffer length - b = append(b, s.Prefix...) - b, s.Value = s.Value.formatCompactTo(b, d) - b = append(b, s.Suffix...) - if _, ok := s.Value.(textLine); ok { - return b, textLine(b[n0:]) - } - return b, s -} -func (s *textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { - b = append(b, s.Prefix...) - b = s.Value.formatExpandedTo(b, d, n) - b = append(b, s.Suffix...) - return b -} - -// textList is a comma-separated list of textWrap or textLine nodes. -// The list may be formatted as multi-lines or single-line at the discretion -// of the textList.formatCompactTo method. -type textList []textRecord -type textRecord struct { - Diff diffMode // e.g., 0 or '-' or '+' - Key string // e.g., "MyField" - Value textNode // textWrap | textLine - ElideComma bool // avoid trailing comma - Comment fmt.Stringer // e.g., "6 identical fields" -} - -// AppendEllipsis appends a new ellipsis node to the list if none already -// exists at the end. If cs is non-zero it coalesces the statistics with the -// previous diffStats. -func (s *textList) AppendEllipsis(ds diffStats) { - hasStats := !ds.IsZero() - if len(*s) == 0 || !(*s)[len(*s)-1].Value.Equal(textEllipsis) { - if hasStats { - *s = append(*s, textRecord{Value: textEllipsis, ElideComma: true, Comment: ds}) - } else { - *s = append(*s, textRecord{Value: textEllipsis, ElideComma: true}) - } - return - } - if hasStats { - (*s)[len(*s)-1].Comment = (*s)[len(*s)-1].Comment.(diffStats).Append(ds) - } -} - -func (s textList) Len() (n int) { - for i, r := range s { - n += len(r.Key) - if r.Key != "" { - n += len(": ") - } - n += r.Value.Len() - if i < len(s)-1 { - n += len(", ") - } - } - return n -} - -func (s1 textList) Equal(s2 textNode) bool { - if s2, ok := s2.(textList); ok { - if len(s1) != len(s2) { - return false - } - for i := range s1 { - r1, r2 := s1[i], s2[i] - if !(r1.Diff == r2.Diff && r1.Key == r2.Key && r1.Value.Equal(r2.Value) && r1.Comment == r2.Comment) { - return false - } - } - return true - } - return false -} - -func (s textList) String() string { - return (&textWrap{Prefix: "{", Value: s, Suffix: "}"}).String() -} - -func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { - s = append(textList(nil), s...) // Avoid mutating original - - // Determine whether we can collapse this list as a single line. - n0 := len(b) // Original buffer length - var multiLine bool - for i, r := range s { - if r.Diff == diffInserted || r.Diff == diffRemoved { - multiLine = true - } - b = append(b, r.Key...) - if r.Key != "" { - b = append(b, ": "...) - } - b, s[i].Value = r.Value.formatCompactTo(b, d|r.Diff) - if _, ok := s[i].Value.(textLine); !ok { - multiLine = true - } - if r.Comment != nil { - multiLine = true - } - if i < len(s)-1 { - b = append(b, ", "...) - } - } - // Force multi-lined output when printing a removed/inserted node that - // is sufficiently long. - if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > maxColumnLength { - multiLine = true - } - if !multiLine { - return b, textLine(b[n0:]) - } - return b, s -} - -func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { - alignKeyLens := s.alignLens( - func(r textRecord) bool { - _, isLine := r.Value.(textLine) - return r.Key == "" || !isLine - }, - func(r textRecord) int { return utf8.RuneCountInString(r.Key) }, - ) - alignValueLens := s.alignLens( - func(r textRecord) bool { - _, isLine := r.Value.(textLine) - return !isLine || r.Value.Equal(textEllipsis) || r.Comment == nil - }, - func(r textRecord) int { return utf8.RuneCount(r.Value.(textLine)) }, - ) - - // Format lists of simple lists in a batched form. - // If the list is sequence of only textLine values, - // then batch multiple values on a single line. - var isSimple bool - for _, r := range s { - _, isLine := r.Value.(textLine) - isSimple = r.Diff == 0 && r.Key == "" && isLine && r.Comment == nil - if !isSimple { - break - } - } - if isSimple { - n++ - var batch []byte - emitBatch := func() { - if len(batch) > 0 { - b = n.appendIndent(append(b, '\n'), d) - b = append(b, bytes.TrimRight(batch, " ")...) - batch = batch[:0] - } - } - for _, r := range s { - line := r.Value.(textLine) - if len(batch)+len(line)+len(", ") > maxColumnLength { - emitBatch() - } - batch = append(batch, line...) - batch = append(batch, ", "...) - } - emitBatch() - n-- - return n.appendIndent(append(b, '\n'), d) - } - - // Format the list as a multi-lined output. - n++ - for i, r := range s { - b = n.appendIndent(append(b, '\n'), d|r.Diff) - if r.Key != "" { - b = append(b, r.Key+": "...) - } - b = alignKeyLens[i].appendChar(b, ' ') - - b = r.Value.formatExpandedTo(b, d|r.Diff, n) - if !r.ElideComma { - b = append(b, ',') - } - b = alignValueLens[i].appendChar(b, ' ') - - if r.Comment != nil { - b = append(b, " // "+r.Comment.String()...) - } - } - n-- - - return n.appendIndent(append(b, '\n'), d) -} - -func (s textList) alignLens( - skipFunc func(textRecord) bool, - lenFunc func(textRecord) int, -) []repeatCount { - var startIdx, endIdx, maxLen int - lens := make([]repeatCount, len(s)) - for i, r := range s { - if skipFunc(r) { - for j := startIdx; j < endIdx && j < len(s); j++ { - lens[j] = repeatCount(maxLen - lenFunc(s[j])) - } - startIdx, endIdx, maxLen = i+1, i+1, 0 - } else { - if maxLen < lenFunc(r) { - maxLen = lenFunc(r) - } - endIdx = i + 1 - } - } - for j := startIdx; j < endIdx && j < len(s); j++ { - lens[j] = repeatCount(maxLen - lenFunc(s[j])) - } - return lens -} - -// textLine is a single-line segment of text and is always a leaf node -// in the textNode tree. -type textLine []byte - -var ( - textNil = textLine("nil") - textEllipsis = textLine("...") -) - -func (s textLine) Len() int { - return len(s) -} -func (s1 textLine) Equal(s2 textNode) bool { - if s2, ok := s2.(textLine); ok { - return bytes.Equal([]byte(s1), []byte(s2)) - } - return false -} -func (s textLine) String() string { - return string(s) -} -func (s textLine) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { - return append(b, s...), s -} -func (s textLine) formatExpandedTo(b []byte, _ diffMode, _ indentMode) []byte { - return append(b, s...) -} - -type diffStats struct { - Name string - NumIgnored int - NumIdentical int - NumRemoved int - NumInserted int - NumModified int -} - -func (s diffStats) IsZero() bool { - s.Name = "" - return s == diffStats{} -} - -func (s diffStats) NumDiff() int { - return s.NumRemoved + s.NumInserted + s.NumModified -} - -func (s diffStats) Append(ds diffStats) diffStats { - assert(s.Name == ds.Name) - s.NumIgnored += ds.NumIgnored - s.NumIdentical += ds.NumIdentical - s.NumRemoved += ds.NumRemoved - s.NumInserted += ds.NumInserted - s.NumModified += ds.NumModified - return s -} - -// String prints a humanly-readable summary of coalesced records. -// -// Example: -// diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields" -func (s diffStats) String() string { - var ss []string - var sum int - labels := [...]string{"ignored", "identical", "removed", "inserted", "modified"} - counts := [...]int{s.NumIgnored, s.NumIdentical, s.NumRemoved, s.NumInserted, s.NumModified} - for i, n := range counts { - if n > 0 { - ss = append(ss, fmt.Sprintf("%d %v", n, labels[i])) - } - sum += n - } - - // Pluralize the name (adjusting for some obscure English grammar rules). - name := s.Name - if sum > 1 { - name += "s" - if strings.HasSuffix(name, "ys") { - name = name[:len(name)-2] + "ies" // e.g., "entrys" => "entries" - } - } - - // Format the list according to English grammar (with Oxford comma). - switch n := len(ss); n { - case 0: - return "" - case 1, 2: - return strings.Join(ss, " and ") + " " + name - default: - return strings.Join(ss[:n-1], ", ") + ", and " + ss[n-1] + " " + name - } -} - -type commentString string - -func (s commentString) String() string { return string(s) } diff --git a/ui/wasm/vendor/github.com/google/go-cmp/cmp/report_value.go b/ui/wasm/vendor/github.com/google/go-cmp/cmp/report_value.go deleted file mode 100644 index 668d470fd83..00000000000 --- a/ui/wasm/vendor/github.com/google/go-cmp/cmp/report_value.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright 2019, The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package cmp - -import "reflect" - -// valueNode represents a single node within a report, which is a -// structured representation of the value tree, containing information -// regarding which nodes are equal or not. -type valueNode struct { - parent *valueNode - - Type reflect.Type - ValueX reflect.Value - ValueY reflect.Value - - // NumSame is the number of leaf nodes that are equal. - // All descendants are equal only if NumDiff is 0. - NumSame int - // NumDiff is the number of leaf nodes that are not equal. - NumDiff int - // NumIgnored is the number of leaf nodes that are ignored. - NumIgnored int - // NumCompared is the number of leaf nodes that were compared - // using an Equal method or Comparer function. - NumCompared int - // NumTransformed is the number of non-leaf nodes that were transformed. - NumTransformed int - // NumChildren is the number of transitive descendants of this node. - // This counts from zero; thus, leaf nodes have no descendants. - NumChildren int - // MaxDepth is the maximum depth of the tree. This counts from zero; - // thus, leaf nodes have a depth of zero. - MaxDepth int - - // Records is a list of struct fields, slice elements, or map entries. - Records []reportRecord // If populated, implies Value is not populated - - // Value is the result of a transformation, pointer indirect, of - // type assertion. - Value *valueNode // If populated, implies Records is not populated - - // TransformerName is the name of the transformer. - TransformerName string // If non-empty, implies Value is populated -} -type reportRecord struct { - Key reflect.Value // Invalid for slice element - Value *valueNode -} - -func (parent *valueNode) PushStep(ps PathStep) (child *valueNode) { - vx, vy := ps.Values() - child = &valueNode{parent: parent, Type: ps.Type(), ValueX: vx, ValueY: vy} - switch s := ps.(type) { - case StructField: - assert(parent.Value == nil) - parent.Records = append(parent.Records, reportRecord{Key: reflect.ValueOf(s.Name()), Value: child}) - case SliceIndex: - assert(parent.Value == nil) - parent.Records = append(parent.Records, reportRecord{Value: child}) - case MapIndex: - assert(parent.Value == nil) - parent.Records = append(parent.Records, reportRecord{Key: s.Key(), Value: child}) - case Indirect: - assert(parent.Value == nil && parent.Records == nil) - parent.Value = child - case TypeAssertion: - assert(parent.Value == nil && parent.Records == nil) - parent.Value = child - case Transform: - assert(parent.Value == nil && parent.Records == nil) - parent.Value = child - parent.TransformerName = s.Name() - parent.NumTransformed++ - default: - assert(parent == nil) // Must be the root step - } - return child -} - -func (r *valueNode) Report(rs Result) { - assert(r.MaxDepth == 0) // May only be called on leaf nodes - - if rs.ByIgnore() { - r.NumIgnored++ - } else { - if rs.Equal() { - r.NumSame++ - } else { - r.NumDiff++ - } - } - assert(r.NumSame+r.NumDiff+r.NumIgnored == 1) - - if rs.ByMethod() { - r.NumCompared++ - } - if rs.ByFunc() { - r.NumCompared++ - } - assert(r.NumCompared <= 1) -} - -func (child *valueNode) PopStep() (parent *valueNode) { - if child.parent == nil { - return nil - } - parent = child.parent - parent.NumSame += child.NumSame - parent.NumDiff += child.NumDiff - parent.NumIgnored += child.NumIgnored - parent.NumCompared += child.NumCompared - parent.NumTransformed += child.NumTransformed - parent.NumChildren += child.NumChildren + 1 - if parent.MaxDepth < child.MaxDepth+1 { - parent.MaxDepth = child.MaxDepth + 1 - } - return parent -} diff --git a/ui/wasm/vendor/github.com/google/gofuzz/.travis.yml b/ui/wasm/vendor/github.com/google/gofuzz/.travis.yml deleted file mode 100644 index 061d72ae079..00000000000 --- a/ui/wasm/vendor/github.com/google/gofuzz/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: go - -go: - - 1.11.x - - 1.12.x - - 1.13.x - - master - -script: - - go test -cover diff --git a/ui/wasm/vendor/github.com/google/gofuzz/CONTRIBUTING.md b/ui/wasm/vendor/github.com/google/gofuzz/CONTRIBUTING.md deleted file mode 100644 index 97c1b34fd5e..00000000000 --- a/ui/wasm/vendor/github.com/google/gofuzz/CONTRIBUTING.md +++ /dev/null @@ -1,67 +0,0 @@ -# How to contribute # - -We'd love to accept your patches and contributions to this project. There are -just a few small guidelines you need to follow. - - -## Contributor License Agreement ## - -Contributions to any Google project must be accompanied by a Contributor -License Agreement. This is not a copyright **assignment**, it simply gives -Google permission to use and redistribute your contributions as part of the -project. - - * If you are an individual writing original source code and you're sure you - own the intellectual property, then you'll need to sign an [individual - CLA][]. - - * If you work for a company that wants to allow you to contribute your work, - then you'll need to sign a [corporate CLA][]. - -You generally only need to submit a CLA once, so if you've already submitted -one (even if it was for a different project), you probably don't need to do it -again. - -[individual CLA]: https://developers.google.com/open-source/cla/individual -[corporate CLA]: https://developers.google.com/open-source/cla/corporate - - -## Submitting a patch ## - - 1. It's generally best to start by opening a new issue describing the bug or - feature you're intending to fix. Even if you think it's relatively minor, - it's helpful to know what people are working on. Mention in the initial - issue that you are planning to work on that bug or feature so that it can - be assigned to you. - - 1. Follow the normal process of [forking][] the project, and setup a new - branch to work in. It's important that each group of changes be done in - separate branches in order to ensure that a pull request only includes the - commits related to that bug or feature. - - 1. Go makes it very simple to ensure properly formatted code, so always run - `go fmt` on your code before committing it. You should also run - [golint][] over your code. As noted in the [golint readme][], it's not - strictly necessary that your code be completely "lint-free", but this will - help you find common style issues. - - 1. Any significant changes should almost always be accompanied by tests. The - project already has good test coverage, so look at some of the existing - tests if you're unsure how to go about it. [gocov][] and [gocov-html][] - are invaluable tools for seeing which parts of your code aren't being - exercised by your tests. - - 1. Do your best to have [well-formed commit messages][] for each change. - This provides consistency throughout the project, and ensures that commit - messages are able to be formatted properly by various git tools. - - 1. Finally, push the commits to your fork and submit a [pull request][]. - -[forking]: https://help.github.com/articles/fork-a-repo -[golint]: https://github.com/golang/lint -[golint readme]: https://github.com/golang/lint/blob/master/README -[gocov]: https://github.com/axw/gocov -[gocov-html]: https://github.com/matm/gocov-html -[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html -[squash]: http://git-scm.com/book/en/Git-Tools-Rewriting-History#Squashing-Commits -[pull request]: https://help.github.com/articles/creating-a-pull-request diff --git a/ui/wasm/vendor/github.com/google/gofuzz/LICENSE b/ui/wasm/vendor/github.com/google/gofuzz/LICENSE deleted file mode 100644 index d6456956733..00000000000 --- a/ui/wasm/vendor/github.com/google/gofuzz/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/ui/wasm/vendor/github.com/google/gofuzz/README.md b/ui/wasm/vendor/github.com/google/gofuzz/README.md deleted file mode 100644 index b503aae7d71..00000000000 --- a/ui/wasm/vendor/github.com/google/gofuzz/README.md +++ /dev/null @@ -1,89 +0,0 @@ -gofuzz -====== - -gofuzz is a library for populating go objects with random values. - -[![GoDoc](https://godoc.org/github.com/google/gofuzz?status.svg)](https://godoc.org/github.com/google/gofuzz) -[![Travis](https://travis-ci.org/google/gofuzz.svg?branch=master)](https://travis-ci.org/google/gofuzz) - -This is useful for testing: - -* Do your project's objects really serialize/unserialize correctly in all cases? -* Is there an incorrectly formatted object that will cause your project to panic? - -Import with ```import "github.com/google/gofuzz"``` - -You can use it on single variables: -```go -f := fuzz.New() -var myInt int -f.Fuzz(&myInt) // myInt gets a random value. -``` - -You can use it on maps: -```go -f := fuzz.New().NilChance(0).NumElements(1, 1) -var myMap map[ComplexKeyType]string -f.Fuzz(&myMap) // myMap will have exactly one element. -``` - -Customize the chance of getting a nil pointer: -```go -f := fuzz.New().NilChance(.5) -var fancyStruct struct { - A, B, C, D *string -} -f.Fuzz(&fancyStruct) // About half the pointers should be set. -``` - -You can even customize the randomization completely if needed: -```go -type MyEnum string -const ( - A MyEnum = "A" - B MyEnum = "B" -) -type MyInfo struct { - Type MyEnum - AInfo *string - BInfo *string -} - -f := fuzz.New().NilChance(0).Funcs( - func(e *MyInfo, c fuzz.Continue) { - switch c.Intn(2) { - case 0: - e.Type = A - c.Fuzz(&e.AInfo) - case 1: - e.Type = B - c.Fuzz(&e.BInfo) - } - }, -) - -var myObject MyInfo -f.Fuzz(&myObject) // Type will correspond to whether A or B info is set. -``` - -See more examples in ```example_test.go```. - -You can use this library for easier [go-fuzz](https://github.com/dvyukov/go-fuzz)ing. -go-fuzz provides the user a byte-slice, which should be converted to different inputs -for the tested function. This library can help convert the byte slice. Consider for -example a fuzz test for a the function `mypackage.MyFunc` that takes an int arguments: -```go -// +build gofuzz -package mypackage - -import fuzz "github.com/google/gofuzz" - -func Fuzz(data []byte) int { - var i int - fuzz.NewFromGoFuzz(data).Fuzz(&i) - MyFunc(i) - return 0 -} -``` - -Happy testing! diff --git a/ui/wasm/vendor/github.com/google/gofuzz/bytesource/bytesource.go b/ui/wasm/vendor/github.com/google/gofuzz/bytesource/bytesource.go deleted file mode 100644 index 5bb36594969..00000000000 --- a/ui/wasm/vendor/github.com/google/gofuzz/bytesource/bytesource.go +++ /dev/null @@ -1,81 +0,0 @@ -/* -Copyright 2014 Google Inc. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package bytesource provides a rand.Source64 that is determined by a slice of bytes. -package bytesource - -import ( - "bytes" - "encoding/binary" - "io" - "math/rand" -) - -// ByteSource implements rand.Source64 determined by a slice of bytes. The random numbers are -// generated from each 8 bytes in the slice, until the last bytes are consumed, from which a -// fallback pseudo random source is created in case more random numbers are required. -// It also exposes a `bytes.Reader` API, which lets callers consume the bytes directly. -type ByteSource struct { - *bytes.Reader - fallback rand.Source -} - -// New returns a new ByteSource from a given slice of bytes. -func New(input []byte) *ByteSource { - s := &ByteSource{ - Reader: bytes.NewReader(input), - fallback: rand.NewSource(0), - } - if len(input) > 0 { - s.fallback = rand.NewSource(int64(s.consumeUint64())) - } - return s -} - -func (s *ByteSource) Uint64() uint64 { - // Return from input if it was not exhausted. - if s.Len() > 0 { - return s.consumeUint64() - } - - // Input was exhausted, return random number from fallback (in this case fallback should not be - // nil). Try first having a Uint64 output (Should work in current rand implementation), - // otherwise return a conversion of Int63. - if s64, ok := s.fallback.(rand.Source64); ok { - return s64.Uint64() - } - return uint64(s.fallback.Int63()) -} - -func (s *ByteSource) Int63() int64 { - return int64(s.Uint64() >> 1) -} - -func (s *ByteSource) Seed(seed int64) { - s.fallback = rand.NewSource(seed) - s.Reader = bytes.NewReader(nil) -} - -// consumeUint64 reads 8 bytes from the input and convert them to a uint64. It assumes that the the -// bytes reader is not empty. -func (s *ByteSource) consumeUint64() uint64 { - var bytes [8]byte - _, err := s.Read(bytes[:]) - if err != nil && err != io.EOF { - panic("failed reading source") // Should not happen. - } - return binary.BigEndian.Uint64(bytes[:]) -} diff --git a/ui/wasm/vendor/github.com/google/gofuzz/doc.go b/ui/wasm/vendor/github.com/google/gofuzz/doc.go deleted file mode 100644 index 9f9956d4a64..00000000000 --- a/ui/wasm/vendor/github.com/google/gofuzz/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2014 Google Inc. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package fuzz is a library for populating go objects with random values. -package fuzz diff --git a/ui/wasm/vendor/github.com/google/gofuzz/fuzz.go b/ui/wasm/vendor/github.com/google/gofuzz/fuzz.go deleted file mode 100644 index 761520a8cee..00000000000 --- a/ui/wasm/vendor/github.com/google/gofuzz/fuzz.go +++ /dev/null @@ -1,605 +0,0 @@ -/* -Copyright 2014 Google Inc. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fuzz - -import ( - "fmt" - "math/rand" - "reflect" - "regexp" - "time" - - "github.com/google/gofuzz/bytesource" - "strings" -) - -// fuzzFuncMap is a map from a type to a fuzzFunc that handles that type. -type fuzzFuncMap map[reflect.Type]reflect.Value - -// Fuzzer knows how to fill any object with random fields. -type Fuzzer struct { - fuzzFuncs fuzzFuncMap - defaultFuzzFuncs fuzzFuncMap - r *rand.Rand - nilChance float64 - minElements int - maxElements int - maxDepth int - skipFieldPatterns []*regexp.Regexp -} - -// New returns a new Fuzzer. Customize your Fuzzer further by calling Funcs, -// RandSource, NilChance, or NumElements in any order. -func New() *Fuzzer { - return NewWithSeed(time.Now().UnixNano()) -} - -func NewWithSeed(seed int64) *Fuzzer { - f := &Fuzzer{ - defaultFuzzFuncs: fuzzFuncMap{ - reflect.TypeOf(&time.Time{}): reflect.ValueOf(fuzzTime), - }, - - fuzzFuncs: fuzzFuncMap{}, - r: rand.New(rand.NewSource(seed)), - nilChance: .2, - minElements: 1, - maxElements: 10, - maxDepth: 100, - } - return f -} - -// NewFromGoFuzz is a helper function that enables using gofuzz (this -// project) with go-fuzz (https://github.com/dvyukov/go-fuzz) for continuous -// fuzzing. Essentially, it enables translating the fuzzing bytes from -// go-fuzz to any Go object using this library. -// -// This implementation promises a constant translation from a given slice of -// bytes to the fuzzed objects. This promise will remain over future -// versions of Go and of this library. -// -// Note: the returned Fuzzer should not be shared between multiple goroutines, -// as its deterministic output will no longer be available. -// -// Example: use go-fuzz to test the function `MyFunc(int)` in the package -// `mypackage`. Add the file: "mypacakge_fuzz.go" with the content: -// -// // +build gofuzz -// package mypacakge -// import fuzz "github.com/google/gofuzz" -// func Fuzz(data []byte) int { -// var i int -// fuzz.NewFromGoFuzz(data).Fuzz(&i) -// MyFunc(i) -// return 0 -// } -func NewFromGoFuzz(data []byte) *Fuzzer { - return New().RandSource(bytesource.New(data)) -} - -// Funcs adds each entry in fuzzFuncs as a custom fuzzing function. -// -// Each entry in fuzzFuncs must be a function taking two parameters. -// The first parameter must be a pointer or map. It is the variable that -// function will fill with random data. The second parameter must be a -// fuzz.Continue, which will provide a source of randomness and a way -// to automatically continue fuzzing smaller pieces of the first parameter. -// -// These functions are called sensibly, e.g., if you wanted custom string -// fuzzing, the function `func(s *string, c fuzz.Continue)` would get -// called and passed the address of strings. Maps and pointers will always -// be made/new'd for you, ignoring the NilChange option. For slices, it -// doesn't make much sense to pre-create them--Fuzzer doesn't know how -// long you want your slice--so take a pointer to a slice, and make it -// yourself. (If you don't want your map/pointer type pre-made, take a -// pointer to it, and make it yourself.) See the examples for a range of -// custom functions. -func (f *Fuzzer) Funcs(fuzzFuncs ...interface{}) *Fuzzer { - for i := range fuzzFuncs { - v := reflect.ValueOf(fuzzFuncs[i]) - if v.Kind() != reflect.Func { - panic("Need only funcs!") - } - t := v.Type() - if t.NumIn() != 2 || t.NumOut() != 0 { - panic("Need 2 in and 0 out params!") - } - argT := t.In(0) - switch argT.Kind() { - case reflect.Ptr, reflect.Map: - default: - panic("fuzzFunc must take pointer or map type") - } - if t.In(1) != reflect.TypeOf(Continue{}) { - panic("fuzzFunc's second parameter must be type fuzz.Continue") - } - f.fuzzFuncs[argT] = v - } - return f -} - -// RandSource causes f to get values from the given source of randomness. -// Use if you want deterministic fuzzing. -func (f *Fuzzer) RandSource(s rand.Source) *Fuzzer { - f.r = rand.New(s) - return f -} - -// NilChance sets the probability of creating a nil pointer, map, or slice to -// 'p'. 'p' should be between 0 (no nils) and 1 (all nils), inclusive. -func (f *Fuzzer) NilChance(p float64) *Fuzzer { - if p < 0 || p > 1 { - panic("p should be between 0 and 1, inclusive.") - } - f.nilChance = p - return f -} - -// NumElements sets the minimum and maximum number of elements that will be -// added to a non-nil map or slice. -func (f *Fuzzer) NumElements(atLeast, atMost int) *Fuzzer { - if atLeast > atMost { - panic("atLeast must be <= atMost") - } - if atLeast < 0 { - panic("atLeast must be >= 0") - } - f.minElements = atLeast - f.maxElements = atMost - return f -} - -func (f *Fuzzer) genElementCount() int { - if f.minElements == f.maxElements { - return f.minElements - } - return f.minElements + f.r.Intn(f.maxElements-f.minElements+1) -} - -func (f *Fuzzer) genShouldFill() bool { - return f.r.Float64() >= f.nilChance -} - -// MaxDepth sets the maximum number of recursive fuzz calls that will be made -// before stopping. This includes struct members, pointers, and map and slice -// elements. -func (f *Fuzzer) MaxDepth(d int) *Fuzzer { - f.maxDepth = d - return f -} - -// Skip fields which match the supplied pattern. Call this multiple times if needed -// This is useful to skip XXX_ fields generated by protobuf -func (f *Fuzzer) SkipFieldsWithPattern(pattern *regexp.Regexp) *Fuzzer { - f.skipFieldPatterns = append(f.skipFieldPatterns, pattern) - return f -} - -// Fuzz recursively fills all of obj's fields with something random. First -// this tries to find a custom fuzz function (see Funcs). If there is no -// custom function this tests whether the object implements fuzz.Interface and, -// if so, calls Fuzz on it to fuzz itself. If that fails, this will see if -// there is a default fuzz function provided by this package. If all of that -// fails, this will generate random values for all primitive fields and then -// recurse for all non-primitives. -// -// This is safe for cyclic or tree-like structs, up to a limit. Use the -// MaxDepth method to adjust how deep you need it to recurse. -// -// obj must be a pointer. Only exported (public) fields can be set (thanks, -// golang :/ ) Intended for tests, so will panic on bad input or unimplemented -// fields. -func (f *Fuzzer) Fuzz(obj interface{}) { - v := reflect.ValueOf(obj) - if v.Kind() != reflect.Ptr { - panic("needed ptr!") - } - v = v.Elem() - f.fuzzWithContext(v, 0) -} - -// FuzzNoCustom is just like Fuzz, except that any custom fuzz function for -// obj's type will not be called and obj will not be tested for fuzz.Interface -// conformance. This applies only to obj and not other instances of obj's -// type. -// Not safe for cyclic or tree-like structs! -// obj must be a pointer. Only exported (public) fields can be set (thanks, golang :/ ) -// Intended for tests, so will panic on bad input or unimplemented fields. -func (f *Fuzzer) FuzzNoCustom(obj interface{}) { - v := reflect.ValueOf(obj) - if v.Kind() != reflect.Ptr { - panic("needed ptr!") - } - v = v.Elem() - f.fuzzWithContext(v, flagNoCustomFuzz) -} - -const ( - // Do not try to find a custom fuzz function. Does not apply recursively. - flagNoCustomFuzz uint64 = 1 << iota -) - -func (f *Fuzzer) fuzzWithContext(v reflect.Value, flags uint64) { - fc := &fuzzerContext{fuzzer: f} - fc.doFuzz(v, flags) -} - -// fuzzerContext carries context about a single fuzzing run, which lets Fuzzer -// be thread-safe. -type fuzzerContext struct { - fuzzer *Fuzzer - curDepth int -} - -func (fc *fuzzerContext) doFuzz(v reflect.Value, flags uint64) { - if fc.curDepth >= fc.fuzzer.maxDepth { - return - } - fc.curDepth++ - defer func() { fc.curDepth-- }() - - if !v.CanSet() { - return - } - - if flags&flagNoCustomFuzz == 0 { - // Check for both pointer and non-pointer custom functions. - if v.CanAddr() && fc.tryCustom(v.Addr()) { - return - } - if fc.tryCustom(v) { - return - } - } - - if fn, ok := fillFuncMap[v.Kind()]; ok { - fn(v, fc.fuzzer.r) - return - } - - switch v.Kind() { - case reflect.Map: - if fc.fuzzer.genShouldFill() { - v.Set(reflect.MakeMap(v.Type())) - n := fc.fuzzer.genElementCount() - for i := 0; i < n; i++ { - key := reflect.New(v.Type().Key()).Elem() - fc.doFuzz(key, 0) - val := reflect.New(v.Type().Elem()).Elem() - fc.doFuzz(val, 0) - v.SetMapIndex(key, val) - } - return - } - v.Set(reflect.Zero(v.Type())) - case reflect.Ptr: - if fc.fuzzer.genShouldFill() { - v.Set(reflect.New(v.Type().Elem())) - fc.doFuzz(v.Elem(), 0) - return - } - v.Set(reflect.Zero(v.Type())) - case reflect.Slice: - if fc.fuzzer.genShouldFill() { - n := fc.fuzzer.genElementCount() - v.Set(reflect.MakeSlice(v.Type(), n, n)) - for i := 0; i < n; i++ { - fc.doFuzz(v.Index(i), 0) - } - return - } - v.Set(reflect.Zero(v.Type())) - case reflect.Array: - if fc.fuzzer.genShouldFill() { - n := v.Len() - for i := 0; i < n; i++ { - fc.doFuzz(v.Index(i), 0) - } - return - } - v.Set(reflect.Zero(v.Type())) - case reflect.Struct: - for i := 0; i < v.NumField(); i++ { - skipField := false - fieldName := v.Type().Field(i).Name - for _, pattern := range fc.fuzzer.skipFieldPatterns { - if pattern.MatchString(fieldName) { - skipField = true - break - } - } - if !skipField { - fc.doFuzz(v.Field(i), 0) - } - } - case reflect.Chan: - fallthrough - case reflect.Func: - fallthrough - case reflect.Interface: - fallthrough - default: - panic(fmt.Sprintf("Can't handle %#v", v.Interface())) - } -} - -// tryCustom searches for custom handlers, and returns true iff it finds a match -// and successfully randomizes v. -func (fc *fuzzerContext) tryCustom(v reflect.Value) bool { - // First: see if we have a fuzz function for it. - doCustom, ok := fc.fuzzer.fuzzFuncs[v.Type()] - if !ok { - // Second: see if it can fuzz itself. - if v.CanInterface() { - intf := v.Interface() - if fuzzable, ok := intf.(Interface); ok { - fuzzable.Fuzz(Continue{fc: fc, Rand: fc.fuzzer.r}) - return true - } - } - // Finally: see if there is a default fuzz function. - doCustom, ok = fc.fuzzer.defaultFuzzFuncs[v.Type()] - if !ok { - return false - } - } - - switch v.Kind() { - case reflect.Ptr: - if v.IsNil() { - if !v.CanSet() { - return false - } - v.Set(reflect.New(v.Type().Elem())) - } - case reflect.Map: - if v.IsNil() { - if !v.CanSet() { - return false - } - v.Set(reflect.MakeMap(v.Type())) - } - default: - return false - } - - doCustom.Call([]reflect.Value{v, reflect.ValueOf(Continue{ - fc: fc, - Rand: fc.fuzzer.r, - })}) - return true -} - -// Interface represents an object that knows how to fuzz itself. Any time we -// find a type that implements this interface we will delegate the act of -// fuzzing itself. -type Interface interface { - Fuzz(c Continue) -} - -// Continue can be passed to custom fuzzing functions to allow them to use -// the correct source of randomness and to continue fuzzing their members. -type Continue struct { - fc *fuzzerContext - - // For convenience, Continue implements rand.Rand via embedding. - // Use this for generating any randomness if you want your fuzzing - // to be repeatable for a given seed. - *rand.Rand -} - -// Fuzz continues fuzzing obj. obj must be a pointer. -func (c Continue) Fuzz(obj interface{}) { - v := reflect.ValueOf(obj) - if v.Kind() != reflect.Ptr { - panic("needed ptr!") - } - v = v.Elem() - c.fc.doFuzz(v, 0) -} - -// FuzzNoCustom continues fuzzing obj, except that any custom fuzz function for -// obj's type will not be called and obj will not be tested for fuzz.Interface -// conformance. This applies only to obj and not other instances of obj's -// type. -func (c Continue) FuzzNoCustom(obj interface{}) { - v := reflect.ValueOf(obj) - if v.Kind() != reflect.Ptr { - panic("needed ptr!") - } - v = v.Elem() - c.fc.doFuzz(v, flagNoCustomFuzz) -} - -// RandString makes a random string up to 20 characters long. The returned string -// may include a variety of (valid) UTF-8 encodings. -func (c Continue) RandString() string { - return randString(c.Rand) -} - -// RandUint64 makes random 64 bit numbers. -// Weirdly, rand doesn't have a function that gives you 64 random bits. -func (c Continue) RandUint64() uint64 { - return randUint64(c.Rand) -} - -// RandBool returns true or false randomly. -func (c Continue) RandBool() bool { - return randBool(c.Rand) -} - -func fuzzInt(v reflect.Value, r *rand.Rand) { - v.SetInt(int64(randUint64(r))) -} - -func fuzzUint(v reflect.Value, r *rand.Rand) { - v.SetUint(randUint64(r)) -} - -func fuzzTime(t *time.Time, c Continue) { - var sec, nsec int64 - // Allow for about 1000 years of random time values, which keeps things - // like JSON parsing reasonably happy. - sec = c.Rand.Int63n(1000 * 365 * 24 * 60 * 60) - c.Fuzz(&nsec) - *t = time.Unix(sec, nsec) -} - -var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){ - reflect.Bool: func(v reflect.Value, r *rand.Rand) { - v.SetBool(randBool(r)) - }, - reflect.Int: fuzzInt, - reflect.Int8: fuzzInt, - reflect.Int16: fuzzInt, - reflect.Int32: fuzzInt, - reflect.Int64: fuzzInt, - reflect.Uint: fuzzUint, - reflect.Uint8: fuzzUint, - reflect.Uint16: fuzzUint, - reflect.Uint32: fuzzUint, - reflect.Uint64: fuzzUint, - reflect.Uintptr: fuzzUint, - reflect.Float32: func(v reflect.Value, r *rand.Rand) { - v.SetFloat(float64(r.Float32())) - }, - reflect.Float64: func(v reflect.Value, r *rand.Rand) { - v.SetFloat(r.Float64()) - }, - reflect.Complex64: func(v reflect.Value, r *rand.Rand) { - v.SetComplex(complex128(complex(r.Float32(), r.Float32()))) - }, - reflect.Complex128: func(v reflect.Value, r *rand.Rand) { - v.SetComplex(complex(r.Float64(), r.Float64())) - }, - reflect.String: func(v reflect.Value, r *rand.Rand) { - v.SetString(randString(r)) - }, - reflect.UnsafePointer: func(v reflect.Value, r *rand.Rand) { - panic("unimplemented") - }, -} - -// randBool returns true or false randomly. -func randBool(r *rand.Rand) bool { - return r.Int31()&(1<<30) == 0 -} - -type int63nPicker interface { - Int63n(int64) int64 -} - -// UnicodeRange describes a sequential range of unicode characters. -// Last must be numerically greater than First. -type UnicodeRange struct { - First, Last rune -} - -// UnicodeRanges describes an arbitrary number of sequential ranges of unicode characters. -// To be useful, each range must have at least one character (First <= Last) and -// there must be at least one range. -type UnicodeRanges []UnicodeRange - -// choose returns a random unicode character from the given range, using the -// given randomness source. -func (ur UnicodeRange) choose(r int63nPicker) rune { - count := int64(ur.Last - ur.First + 1) - return ur.First + rune(r.Int63n(count)) -} - -// CustomStringFuzzFunc constructs a FuzzFunc which produces random strings. -// Each character is selected from the range ur. If there are no characters -// in the range (cr.Last < cr.First), this will panic. -func (ur UnicodeRange) CustomStringFuzzFunc() func(s *string, c Continue) { - ur.check() - return func(s *string, c Continue) { - *s = ur.randString(c.Rand) - } -} - -// check is a function that used to check whether the first of ur(UnicodeRange) -// is greater than the last one. -func (ur UnicodeRange) check() { - if ur.Last < ur.First { - panic("The last encoding must be greater than the first one.") - } -} - -// randString of UnicodeRange makes a random string up to 20 characters long. -// Each character is selected form ur(UnicodeRange). -func (ur UnicodeRange) randString(r *rand.Rand) string { - n := r.Intn(20) - sb := strings.Builder{} - sb.Grow(n) - for i := 0; i < n; i++ { - sb.WriteRune(ur.choose(r)) - } - return sb.String() -} - -// defaultUnicodeRanges sets a default unicode range when user do not set -// CustomStringFuzzFunc() but wants fuzz string. -var defaultUnicodeRanges = UnicodeRanges{ - {' ', '~'}, // ASCII characters - {'\u00a0', '\u02af'}, // Multi-byte encoded characters - {'\u4e00', '\u9fff'}, // Common CJK (even longer encodings) -} - -// CustomStringFuzzFunc constructs a FuzzFunc which produces random strings. -// Each character is selected from one of the ranges of ur(UnicodeRanges). -// Each range has an equal probability of being chosen. If there are no ranges, -// or a selected range has no characters (.Last < .First), this will panic. -// Do not modify any of the ranges in ur after calling this function. -func (ur UnicodeRanges) CustomStringFuzzFunc() func(s *string, c Continue) { - // Check unicode ranges slice is empty. - if len(ur) == 0 { - panic("UnicodeRanges is empty.") - } - // if not empty, each range should be checked. - for i := range ur { - ur[i].check() - } - return func(s *string, c Continue) { - *s = ur.randString(c.Rand) - } -} - -// randString of UnicodeRanges makes a random string up to 20 characters long. -// Each character is selected form one of the ranges of ur(UnicodeRanges), -// and each range has an equal probability of being chosen. -func (ur UnicodeRanges) randString(r *rand.Rand) string { - n := r.Intn(20) - sb := strings.Builder{} - sb.Grow(n) - for i := 0; i < n; i++ { - sb.WriteRune(ur[r.Intn(len(ur))].choose(r)) - } - return sb.String() -} - -// randString makes a random string up to 20 characters long. The returned string -// may include a variety of (valid) UTF-8 encodings. -func randString(r *rand.Rand) string { - return defaultUnicodeRanges.randString(r) -} - -// randUint64 makes random 64 bit numbers. -// Weirdly, rand doesn't have a function that gives you 64 random bits. -func randUint64(r *rand.Rand) uint64 { - return uint64(r.Uint32())<<32 | uint64(r.Uint32()) -} diff --git a/ui/wasm/vendor/github.com/googleapis/gnostic/LICENSE b/ui/wasm/vendor/github.com/googleapis/gnostic/LICENSE deleted file mode 100644 index 6b0b1270ff0..00000000000 --- a/ui/wasm/vendor/github.com/googleapis/gnostic/LICENSE +++ /dev/null @@ -1,203 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/ui/wasm/vendor/github.com/googleapis/gnostic/compiler/README.md b/ui/wasm/vendor/github.com/googleapis/gnostic/compiler/README.md deleted file mode 100644 index ee9783d232f..00000000000 --- a/ui/wasm/vendor/github.com/googleapis/gnostic/compiler/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# Compiler support code - -This directory contains compiler support code used by Gnostic and Gnostic -extensions. diff --git a/ui/wasm/vendor/github.com/googleapis/gnostic/compiler/context.go b/ui/wasm/vendor/github.com/googleapis/gnostic/compiler/context.go deleted file mode 100644 index 1bfe9612194..00000000000 --- a/ui/wasm/vendor/github.com/googleapis/gnostic/compiler/context.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2017 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package compiler - -import ( - yaml "gopkg.in/yaml.v3" -) - -// Context contains state of the compiler as it traverses a document. -type Context struct { - Parent *Context - Name string - Node *yaml.Node - ExtensionHandlers *[]ExtensionHandler -} - -// NewContextWithExtensions returns a new object representing the compiler state -func NewContextWithExtensions(name string, node *yaml.Node, parent *Context, extensionHandlers *[]ExtensionHandler) *Context { - return &Context{Name: name, Node: node, Parent: parent, ExtensionHandlers: extensionHandlers} -} - -// NewContext returns a new object representing the compiler state -func NewContext(name string, node *yaml.Node, parent *Context) *Context { - if parent != nil { - return &Context{Name: name, Node: node, Parent: parent, ExtensionHandlers: parent.ExtensionHandlers} - } - return &Context{Name: name, Parent: parent, ExtensionHandlers: nil} -} - -// Description returns a text description of the compiler state -func (context *Context) Description() string { - name := context.Name - if context.Parent != nil { - name = context.Parent.Description() + "." + name - } - return name -} diff --git a/ui/wasm/vendor/github.com/googleapis/gnostic/compiler/error.go b/ui/wasm/vendor/github.com/googleapis/gnostic/compiler/error.go deleted file mode 100644 index 6f40515d6b6..00000000000 --- a/ui/wasm/vendor/github.com/googleapis/gnostic/compiler/error.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2017 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package compiler - -import "fmt" - -// Error represents compiler errors and their location in the document. -type Error struct { - Context *Context - Message string -} - -// NewError creates an Error. -func NewError(context *Context, message string) *Error { - return &Error{Context: context, Message: message} -} - -func (err *Error) locationDescription() string { - if err.Context.Node != nil { - return fmt.Sprintf("[%d,%d] %s", err.Context.Node.Line, err.Context.Node.Column, err.Context.Description()) - } - return err.Context.Description() -} - -// Error returns the string value of an Error. -func (err *Error) Error() string { - if err.Context == nil { - return err.Message - } - return err.locationDescription() + " " + err.Message -} - -// ErrorGroup is a container for groups of Error values. -type ErrorGroup struct { - Errors []error -} - -// NewErrorGroupOrNil returns a new ErrorGroup for a slice of errors or nil if the slice is empty. -func NewErrorGroupOrNil(errors []error) error { - if len(errors) == 0 { - return nil - } else if len(errors) == 1 { - return errors[0] - } else { - return &ErrorGroup{Errors: errors} - } -} - -func (group *ErrorGroup) Error() string { - result := "" - for i, err := range group.Errors { - if i > 0 { - result += "\n" - } - result += err.Error() - } - return result -} diff --git a/ui/wasm/vendor/github.com/googleapis/gnostic/compiler/extensions.go b/ui/wasm/vendor/github.com/googleapis/gnostic/compiler/extensions.go deleted file mode 100644 index 20848a0a163..00000000000 --- a/ui/wasm/vendor/github.com/googleapis/gnostic/compiler/extensions.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2017 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package compiler - -import ( - "bytes" - "fmt" - "os/exec" - "strings" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes/any" - extensions "github.com/googleapis/gnostic/extensions" - yaml "gopkg.in/yaml.v3" -) - -// ExtensionHandler describes a binary that is called by the compiler to handle specification extensions. -type ExtensionHandler struct { - Name string -} - -// CallExtension calls a binary extension handler. -func CallExtension(context *Context, in *yaml.Node, extensionName string) (handled bool, response *any.Any, err error) { - if context == nil || context.ExtensionHandlers == nil { - return false, nil, nil - } - handled = false - for _, handler := range *(context.ExtensionHandlers) { - response, err = handler.handle(in, extensionName) - if response == nil { - continue - } else { - handled = true - break - } - } - return handled, response, err -} - -func (extensionHandlers *ExtensionHandler) handle(in *yaml.Node, extensionName string) (*any.Any, error) { - if extensionHandlers.Name != "" { - yamlData, _ := yaml.Marshal(in) - request := &extensions.ExtensionHandlerRequest{ - CompilerVersion: &extensions.Version{ - Major: 0, - Minor: 1, - Patch: 0, - }, - Wrapper: &extensions.Wrapper{ - Version: "unknown", // TODO: set this to the type/version of spec being parsed. - Yaml: string(yamlData), - ExtensionName: extensionName, - }, - } - requestBytes, _ := proto.Marshal(request) - cmd := exec.Command(extensionHandlers.Name) - cmd.Stdin = bytes.NewReader(requestBytes) - output, err := cmd.Output() - if err != nil { - return nil, err - } - response := &extensions.ExtensionHandlerResponse{} - err = proto.Unmarshal(output, response) - if err != nil || !response.Handled { - return nil, err - } - if len(response.Errors) != 0 { - return nil, fmt.Errorf("Errors when parsing: %+v for field %s by vendor extension handler %s. Details %+v", in, extensionName, extensionHandlers.Name, strings.Join(response.Errors, ",")) - } - return response.Value, nil - } - return nil, nil -} diff --git a/ui/wasm/vendor/github.com/googleapis/gnostic/compiler/helpers.go b/ui/wasm/vendor/github.com/googleapis/gnostic/compiler/helpers.go deleted file mode 100644 index 48f02f3950e..00000000000 --- a/ui/wasm/vendor/github.com/googleapis/gnostic/compiler/helpers.go +++ /dev/null @@ -1,396 +0,0 @@ -// Copyright 2017 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package compiler - -import ( - "fmt" - "regexp" - "sort" - "strconv" - - "github.com/googleapis/gnostic/jsonschema" - "gopkg.in/yaml.v3" -) - -// compiler helper functions, usually called from generated code - -// UnpackMap gets a *yaml.Node if possible. -func UnpackMap(in *yaml.Node) (*yaml.Node, bool) { - if in == nil { - return nil, false - } - return in, true -} - -// SortedKeysForMap returns the sorted keys of a yamlv2.MapSlice. -func SortedKeysForMap(m *yaml.Node) []string { - keys := make([]string, 0) - if m.Kind == yaml.MappingNode { - for i := 0; i < len(m.Content); i += 2 { - keys = append(keys, m.Content[i].Value) - } - } - sort.Strings(keys) - return keys -} - -// MapHasKey returns true if a yamlv2.MapSlice contains a specified key. -func MapHasKey(m *yaml.Node, key string) bool { - if m == nil { - return false - } - if m.Kind == yaml.MappingNode { - for i := 0; i < len(m.Content); i += 2 { - itemKey := m.Content[i].Value - if key == itemKey { - return true - } - } - } - return false -} - -// MapValueForKey gets the value of a map value for a specified key. -func MapValueForKey(m *yaml.Node, key string) *yaml.Node { - if m == nil { - return nil - } - if m.Kind == yaml.MappingNode { - for i := 0; i < len(m.Content); i += 2 { - itemKey := m.Content[i].Value - if key == itemKey { - return m.Content[i+1] - } - } - } - return nil -} - -// ConvertInterfaceArrayToStringArray converts an array of interfaces to an array of strings, if possible. -func ConvertInterfaceArrayToStringArray(interfaceArray []interface{}) []string { - stringArray := make([]string, 0) - for _, item := range interfaceArray { - v, ok := item.(string) - if ok { - stringArray = append(stringArray, v) - } - } - return stringArray -} - -// SequenceNodeForNode returns a node if it is a SequenceNode. -func SequenceNodeForNode(node *yaml.Node) (*yaml.Node, bool) { - if node.Kind != yaml.SequenceNode { - return nil, false - } - return node, true -} - -// BoolForScalarNode returns the bool value of a node. -func BoolForScalarNode(node *yaml.Node) (bool, bool) { - if node == nil { - return false, false - } - if node.Kind == yaml.DocumentNode { - return BoolForScalarNode(node.Content[0]) - } - if node.Kind != yaml.ScalarNode { - return false, false - } - if node.Tag != "!!bool" { - return false, false - } - v, err := strconv.ParseBool(node.Value) - if err != nil { - return false, false - } - return v, true -} - -// IntForScalarNode returns the integer value of a node. -func IntForScalarNode(node *yaml.Node) (int64, bool) { - if node == nil { - return 0, false - } - if node.Kind == yaml.DocumentNode { - return IntForScalarNode(node.Content[0]) - } - if node.Kind != yaml.ScalarNode { - return 0, false - } - if node.Tag != "!!int" { - return 0, false - } - v, err := strconv.ParseInt(node.Value, 10, 64) - if err != nil { - return 0, false - } - return v, true -} - -// FloatForScalarNode returns the float value of a node. -func FloatForScalarNode(node *yaml.Node) (float64, bool) { - if node == nil { - return 0.0, false - } - if node.Kind == yaml.DocumentNode { - return FloatForScalarNode(node.Content[0]) - } - if node.Kind != yaml.ScalarNode { - return 0.0, false - } - if (node.Tag != "!!int") && (node.Tag != "!!float") { - return 0.0, false - } - v, err := strconv.ParseFloat(node.Value, 64) - if err != nil { - return 0.0, false - } - return v, true -} - -// StringForScalarNode returns the string value of a node. -func StringForScalarNode(node *yaml.Node) (string, bool) { - if node == nil { - return "", false - } - if node.Kind == yaml.DocumentNode { - return StringForScalarNode(node.Content[0]) - } - switch node.Kind { - case yaml.ScalarNode: - switch node.Tag { - case "!!int": - return node.Value, true - case "!!str": - return node.Value, true - case "!!timestamp": - return node.Value, true - case "!!null": - return "", true - default: - return "", false - } - default: - return "", false - } -} - -// StringArrayForSequenceNode converts a sequence node to an array of strings, if possible. -func StringArrayForSequenceNode(node *yaml.Node) []string { - stringArray := make([]string, 0) - for _, item := range node.Content { - v, ok := StringForScalarNode(item) - if ok { - stringArray = append(stringArray, v) - } - } - return stringArray -} - -// MissingKeysInMap identifies which keys from a list of required keys are not in a map. -func MissingKeysInMap(m *yaml.Node, requiredKeys []string) []string { - missingKeys := make([]string, 0) - for _, k := range requiredKeys { - if !MapHasKey(m, k) { - missingKeys = append(missingKeys, k) - } - } - return missingKeys -} - -// InvalidKeysInMap returns keys in a map that don't match a list of allowed keys and patterns. -func InvalidKeysInMap(m *yaml.Node, allowedKeys []string, allowedPatterns []*regexp.Regexp) []string { - invalidKeys := make([]string, 0) - if m == nil || m.Kind != yaml.MappingNode { - return invalidKeys - } - for i := 0; i < len(m.Content); i += 2 { - key := m.Content[i].Value - found := false - // does the key match an allowed key? - for _, allowedKey := range allowedKeys { - if key == allowedKey { - found = true - break - } - } - if !found { - // does the key match an allowed pattern? - for _, allowedPattern := range allowedPatterns { - if allowedPattern.MatchString(key) { - found = true - break - } - } - if !found { - invalidKeys = append(invalidKeys, key) - } - } - } - return invalidKeys -} - -// NewNullNode creates a new Null node. -func NewNullNode() *yaml.Node { - node := &yaml.Node{ - Kind: yaml.ScalarNode, - Tag: "!!null", - } - return node -} - -// NewMappingNode creates a new Mapping node. -func NewMappingNode() *yaml.Node { - return &yaml.Node{ - Kind: yaml.MappingNode, - Content: make([]*yaml.Node, 0), - } -} - -// NewSequenceNode creates a new Sequence node. -func NewSequenceNode() *yaml.Node { - node := &yaml.Node{ - Kind: yaml.SequenceNode, - Content: make([]*yaml.Node, 0), - } - return node -} - -// NewScalarNodeForString creates a new node to hold a string. -func NewScalarNodeForString(s string) *yaml.Node { - return &yaml.Node{ - Kind: yaml.ScalarNode, - Tag: "!!str", - Value: s, - } -} - -// NewSequenceNodeForStringArray creates a new node to hold an array of strings. -func NewSequenceNodeForStringArray(strings []string) *yaml.Node { - node := &yaml.Node{ - Kind: yaml.SequenceNode, - Content: make([]*yaml.Node, 0), - } - for _, s := range strings { - node.Content = append(node.Content, NewScalarNodeForString(s)) - } - return node -} - -// NewScalarNodeForBool creates a new node to hold a bool. -func NewScalarNodeForBool(b bool) *yaml.Node { - return &yaml.Node{ - Kind: yaml.ScalarNode, - Tag: "!!bool", - Value: fmt.Sprintf("%t", b), - } -} - -// NewScalarNodeForFloat creates a new node to hold a float. -func NewScalarNodeForFloat(f float64) *yaml.Node { - return &yaml.Node{ - Kind: yaml.ScalarNode, - Tag: "!!float", - Value: fmt.Sprintf("%g", f), - } -} - -// NewScalarNodeForInt creates a new node to hold an integer. -func NewScalarNodeForInt(i int64) *yaml.Node { - return &yaml.Node{ - Kind: yaml.ScalarNode, - Tag: "!!int", - Value: fmt.Sprintf("%d", i), - } -} - -// PluralProperties returns the string "properties" pluralized. -func PluralProperties(count int) string { - if count == 1 { - return "property" - } - return "properties" -} - -// StringArrayContainsValue returns true if a string array contains a specified value. -func StringArrayContainsValue(array []string, value string) bool { - for _, item := range array { - if item == value { - return true - } - } - return false -} - -// StringArrayContainsValues returns true if a string array contains all of a list of specified values. -func StringArrayContainsValues(array []string, values []string) bool { - for _, value := range values { - if !StringArrayContainsValue(array, value) { - return false - } - } - return true -} - -// StringValue returns the string value of an item. -func StringValue(item interface{}) (value string, ok bool) { - value, ok = item.(string) - if ok { - return value, ok - } - intValue, ok := item.(int) - if ok { - return strconv.Itoa(intValue), true - } - return "", false -} - -// Description returns a human-readable represention of an item. -func Description(item interface{}) string { - value, ok := item.(*yaml.Node) - if ok { - return jsonschema.Render(value) - } - return fmt.Sprintf("%+v", item) -} - -// Display returns a description of a node for use in error messages. -func Display(node *yaml.Node) string { - switch node.Kind { - case yaml.ScalarNode: - switch node.Tag { - case "!!str": - return fmt.Sprintf("%s (string)", node.Value) - } - } - return fmt.Sprintf("%+v (%T)", node, node) -} - -// Marshal creates a yaml version of a structure in our preferred style -func Marshal(in *yaml.Node) []byte { - clearStyle(in) - //bytes, _ := yaml.Marshal(&yaml.Node{Kind: yaml.DocumentNode, Content: []*yaml.Node{in}}) - bytes, _ := yaml.Marshal(in) - - return bytes -} - -func clearStyle(node *yaml.Node) { - node.Style = 0 - for _, c := range node.Content { - clearStyle(c) - } -} diff --git a/ui/wasm/vendor/github.com/googleapis/gnostic/compiler/main.go b/ui/wasm/vendor/github.com/googleapis/gnostic/compiler/main.go deleted file mode 100644 index ce9fcc456cc..00000000000 --- a/ui/wasm/vendor/github.com/googleapis/gnostic/compiler/main.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2017 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package compiler provides support functions to generated compiler code. -package compiler diff --git a/ui/wasm/vendor/github.com/googleapis/gnostic/compiler/reader.go b/ui/wasm/vendor/github.com/googleapis/gnostic/compiler/reader.go deleted file mode 100644 index be0e8b40c8c..00000000000 --- a/ui/wasm/vendor/github.com/googleapis/gnostic/compiler/reader.go +++ /dev/null @@ -1,307 +0,0 @@ -// Copyright 2017 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package compiler - -import ( - "fmt" - "io/ioutil" - "log" - "net/http" - "net/url" - "path/filepath" - "strings" - "sync" - - yaml "gopkg.in/yaml.v3" -) - -var verboseReader = false - -var fileCache map[string][]byte -var infoCache map[string]*yaml.Node - -var fileCacheEnable = true -var infoCacheEnable = true - -// These locks are used to synchronize accesses to the fileCache and infoCache -// maps (above). They are global state and can throw thread-related errors -// when modified from separate goroutines. The general strategy is to protect -// all public functions in this file with mutex Lock() calls. As a result, to -// avoid deadlock, these public functions should not call other public -// functions, so some public functions have private equivalents. -// In the future, we might consider replacing the maps with sync.Map and -// eliminating these mutexes. -var fileCacheMutex sync.Mutex -var infoCacheMutex sync.Mutex - -func initializeFileCache() { - if fileCache == nil { - fileCache = make(map[string][]byte, 0) - } -} - -func initializeInfoCache() { - if infoCache == nil { - infoCache = make(map[string]*yaml.Node, 0) - } -} - -// EnableFileCache turns on file caching. -func EnableFileCache() { - fileCacheMutex.Lock() - defer fileCacheMutex.Unlock() - fileCacheEnable = true -} - -// EnableInfoCache turns on parsed info caching. -func EnableInfoCache() { - infoCacheMutex.Lock() - defer infoCacheMutex.Unlock() - infoCacheEnable = true -} - -// DisableFileCache turns off file caching. -func DisableFileCache() { - fileCacheMutex.Lock() - defer fileCacheMutex.Unlock() - fileCacheEnable = false -} - -// DisableInfoCache turns off parsed info caching. -func DisableInfoCache() { - infoCacheMutex.Lock() - defer infoCacheMutex.Unlock() - infoCacheEnable = false -} - -// RemoveFromFileCache removes an entry from the file cache. -func RemoveFromFileCache(fileurl string) { - fileCacheMutex.Lock() - defer fileCacheMutex.Unlock() - if !fileCacheEnable { - return - } - initializeFileCache() - delete(fileCache, fileurl) -} - -// RemoveFromInfoCache removes an entry from the info cache. -func RemoveFromInfoCache(filename string) { - infoCacheMutex.Lock() - defer infoCacheMutex.Unlock() - if !infoCacheEnable { - return - } - initializeInfoCache() - delete(infoCache, filename) -} - -// GetInfoCache returns the info cache map. -func GetInfoCache() map[string]*yaml.Node { - infoCacheMutex.Lock() - defer infoCacheMutex.Unlock() - if infoCache == nil { - initializeInfoCache() - } - return infoCache -} - -// ClearFileCache clears the file cache. -func ClearFileCache() { - fileCacheMutex.Lock() - defer fileCacheMutex.Unlock() - fileCache = make(map[string][]byte, 0) -} - -// ClearInfoCache clears the info cache. -func ClearInfoCache() { - infoCacheMutex.Lock() - defer infoCacheMutex.Unlock() - infoCache = make(map[string]*yaml.Node) -} - -// ClearCaches clears all caches. -func ClearCaches() { - ClearFileCache() - ClearInfoCache() -} - -// FetchFile gets a specified file from the local filesystem or a remote location. -func FetchFile(fileurl string) ([]byte, error) { - fileCacheMutex.Lock() - defer fileCacheMutex.Unlock() - return fetchFile(fileurl) -} - -func fetchFile(fileurl string) ([]byte, error) { - var bytes []byte - initializeFileCache() - if fileCacheEnable { - bytes, ok := fileCache[fileurl] - if ok { - if verboseReader { - log.Printf("Cache hit %s", fileurl) - } - return bytes, nil - } - if verboseReader { - log.Printf("Fetching %s", fileurl) - } - } - response, err := http.Get(fileurl) - if err != nil { - return nil, err - } - defer response.Body.Close() - if response.StatusCode != 200 { - return nil, fmt.Errorf("Error downloading %s: %s", fileurl, response.Status) - } - bytes, err = ioutil.ReadAll(response.Body) - if fileCacheEnable && err == nil { - fileCache[fileurl] = bytes - } - return bytes, err -} - -// ReadBytesForFile reads the bytes of a file. -func ReadBytesForFile(filename string) ([]byte, error) { - fileCacheMutex.Lock() - defer fileCacheMutex.Unlock() - return readBytesForFile(filename) -} - -func readBytesForFile(filename string) ([]byte, error) { - // is the filename a url? - fileurl, _ := url.Parse(filename) - if fileurl.Scheme != "" { - // yes, fetch it - bytes, err := fetchFile(filename) - if err != nil { - return nil, err - } - return bytes, nil - } - // no, it's a local filename - bytes, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - return bytes, nil -} - -// ReadInfoFromBytes unmarshals a file as a *yaml.Node. -func ReadInfoFromBytes(filename string, bytes []byte) (*yaml.Node, error) { - infoCacheMutex.Lock() - defer infoCacheMutex.Unlock() - return readInfoFromBytes(filename, bytes) -} - -func readInfoFromBytes(filename string, bytes []byte) (*yaml.Node, error) { - initializeInfoCache() - if infoCacheEnable { - cachedInfo, ok := infoCache[filename] - if ok { - if verboseReader { - log.Printf("Cache hit info for file %s", filename) - } - return cachedInfo, nil - } - if verboseReader { - log.Printf("Reading info for file %s", filename) - } - } - var info yaml.Node - err := yaml.Unmarshal(bytes, &info) - if err != nil { - return nil, err - } - if infoCacheEnable && len(filename) > 0 { - infoCache[filename] = &info - } - return &info, nil -} - -// ReadInfoForRef reads a file and return the fragment needed to resolve a $ref. -func ReadInfoForRef(basefile string, ref string) (*yaml.Node, error) { - fileCacheMutex.Lock() - defer fileCacheMutex.Unlock() - infoCacheMutex.Lock() - defer infoCacheMutex.Unlock() - initializeInfoCache() - if infoCacheEnable { - info, ok := infoCache[ref] - if ok { - if verboseReader { - log.Printf("Cache hit for ref %s#%s", basefile, ref) - } - return info, nil - } - if verboseReader { - log.Printf("Reading info for ref %s#%s", basefile, ref) - } - } - basedir, _ := filepath.Split(basefile) - parts := strings.Split(ref, "#") - var filename string - if parts[0] != "" { - filename = parts[0] - if _, err := url.ParseRequestURI(parts[0]); err != nil { - // It is not an URL, so the file is local - filename = basedir + parts[0] - } - } else { - filename = basefile - } - bytes, err := readBytesForFile(filename) - if err != nil { - return nil, err - } - info, err := readInfoFromBytes(filename, bytes) - if info != nil && info.Kind == yaml.DocumentNode { - info = info.Content[0] - } - if err != nil { - log.Printf("File error: %v\n", err) - } else { - if info == nil { - return nil, NewError(nil, fmt.Sprintf("could not resolve %s", ref)) - } - if len(parts) > 1 { - path := strings.Split(parts[1], "/") - for i, key := range path { - if i > 0 { - m := info - if true { - found := false - for i := 0; i < len(m.Content); i += 2 { - if m.Content[i].Value == key { - info = m.Content[i+1] - found = true - } - } - if !found { - infoCache[ref] = nil - return nil, NewError(nil, fmt.Sprintf("could not resolve %s", ref)) - } - } - } - } - } - } - if infoCacheEnable { - infoCache[ref] = info - } - return info, nil -} diff --git a/ui/wasm/vendor/github.com/googleapis/gnostic/extensions/README.md b/ui/wasm/vendor/github.com/googleapis/gnostic/extensions/README.md deleted file mode 100644 index 4b5d63e5856..00000000000 --- a/ui/wasm/vendor/github.com/googleapis/gnostic/extensions/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# Extensions - -**Extension Support is experimental.** - -This directory contains support code for building Gnostic extensio handlers and -associated examples. - -Extension handlers can be used to compile vendor or specification extensions -into protocol buffer structures. - -Like plugins, extension handlers are built as separate executables. Extension -bodies are written to extension handlers as serialized -ExtensionHandlerRequests. diff --git a/ui/wasm/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go b/ui/wasm/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go deleted file mode 100644 index 5aab58ebfbe..00000000000 --- a/ui/wasm/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go +++ /dev/null @@ -1,461 +0,0 @@ -// Copyright 2017 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.26.0 -// protoc v3.15.5 -// source: extensions/extension.proto - -package gnostic_extension_v1 - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - anypb "google.golang.org/protobuf/types/known/anypb" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// The version number of Gnostic. -type Version struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Major int32 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"` - Minor int32 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"` - Patch int32 `protobuf:"varint,3,opt,name=patch,proto3" json:"patch,omitempty"` - // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should - // be empty for mainline stable releases. - Suffix string `protobuf:"bytes,4,opt,name=suffix,proto3" json:"suffix,omitempty"` -} - -func (x *Version) Reset() { - *x = Version{} - if protoimpl.UnsafeEnabled { - mi := &file_extensions_extension_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Version) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Version) ProtoMessage() {} - -func (x *Version) ProtoReflect() protoreflect.Message { - mi := &file_extensions_extension_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Version.ProtoReflect.Descriptor instead. -func (*Version) Descriptor() ([]byte, []int) { - return file_extensions_extension_proto_rawDescGZIP(), []int{0} -} - -func (x *Version) GetMajor() int32 { - if x != nil { - return x.Major - } - return 0 -} - -func (x *Version) GetMinor() int32 { - if x != nil { - return x.Minor - } - return 0 -} - -func (x *Version) GetPatch() int32 { - if x != nil { - return x.Patch - } - return 0 -} - -func (x *Version) GetSuffix() string { - if x != nil { - return x.Suffix - } - return "" -} - -// An encoded Request is written to the ExtensionHandler's stdin. -type ExtensionHandlerRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The extension to process. - Wrapper *Wrapper `protobuf:"bytes,1,opt,name=wrapper,proto3" json:"wrapper,omitempty"` - // The version number of Gnostic. - CompilerVersion *Version `protobuf:"bytes,2,opt,name=compiler_version,json=compilerVersion,proto3" json:"compiler_version,omitempty"` -} - -func (x *ExtensionHandlerRequest) Reset() { - *x = ExtensionHandlerRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_extensions_extension_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ExtensionHandlerRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ExtensionHandlerRequest) ProtoMessage() {} - -func (x *ExtensionHandlerRequest) ProtoReflect() protoreflect.Message { - mi := &file_extensions_extension_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ExtensionHandlerRequest.ProtoReflect.Descriptor instead. -func (*ExtensionHandlerRequest) Descriptor() ([]byte, []int) { - return file_extensions_extension_proto_rawDescGZIP(), []int{1} -} - -func (x *ExtensionHandlerRequest) GetWrapper() *Wrapper { - if x != nil { - return x.Wrapper - } - return nil -} - -func (x *ExtensionHandlerRequest) GetCompilerVersion() *Version { - if x != nil { - return x.CompilerVersion - } - return nil -} - -// The extensions writes an encoded ExtensionHandlerResponse to stdout. -type ExtensionHandlerResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // true if the extension is handled by the extension handler; false otherwise - Handled bool `protobuf:"varint,1,opt,name=handled,proto3" json:"handled,omitempty"` - // Error message(s). If non-empty, the extension handling failed. - // The extension handler process should exit with status code zero - // even if it reports an error in this way. - // - // This should be used to indicate errors which prevent the extension from - // operating as intended. Errors which indicate a problem in gnostic - // itself -- such as the input Document being unparseable -- should be - // reported by writing a message to stderr and exiting with a non-zero - // status code. - Errors []string `protobuf:"bytes,2,rep,name=errors,proto3" json:"errors,omitempty"` - // text output - Value *anypb.Any `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *ExtensionHandlerResponse) Reset() { - *x = ExtensionHandlerResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_extensions_extension_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ExtensionHandlerResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ExtensionHandlerResponse) ProtoMessage() {} - -func (x *ExtensionHandlerResponse) ProtoReflect() protoreflect.Message { - mi := &file_extensions_extension_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ExtensionHandlerResponse.ProtoReflect.Descriptor instead. -func (*ExtensionHandlerResponse) Descriptor() ([]byte, []int) { - return file_extensions_extension_proto_rawDescGZIP(), []int{2} -} - -func (x *ExtensionHandlerResponse) GetHandled() bool { - if x != nil { - return x.Handled - } - return false -} - -func (x *ExtensionHandlerResponse) GetErrors() []string { - if x != nil { - return x.Errors - } - return nil -} - -func (x *ExtensionHandlerResponse) GetValue() *anypb.Any { - if x != nil { - return x.Value - } - return nil -} - -type Wrapper struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // version of the OpenAPI specification in which this extension was written. - Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` - // Name of the extension. - ExtensionName string `protobuf:"bytes,2,opt,name=extension_name,json=extensionName,proto3" json:"extension_name,omitempty"` - // YAML-formatted extension value. - Yaml string `protobuf:"bytes,3,opt,name=yaml,proto3" json:"yaml,omitempty"` -} - -func (x *Wrapper) Reset() { - *x = Wrapper{} - if protoimpl.UnsafeEnabled { - mi := &file_extensions_extension_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Wrapper) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Wrapper) ProtoMessage() {} - -func (x *Wrapper) ProtoReflect() protoreflect.Message { - mi := &file_extensions_extension_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Wrapper.ProtoReflect.Descriptor instead. -func (*Wrapper) Descriptor() ([]byte, []int) { - return file_extensions_extension_proto_rawDescGZIP(), []int{3} -} - -func (x *Wrapper) GetVersion() string { - if x != nil { - return x.Version - } - return "" -} - -func (x *Wrapper) GetExtensionName() string { - if x != nil { - return x.ExtensionName - } - return "" -} - -func (x *Wrapper) GetYaml() string { - if x != nil { - return x.Yaml - } - return "" -} - -var File_extensions_extension_proto protoreflect.FileDescriptor - -var file_extensions_extension_proto_rawDesc = []byte{ - 0x0a, 0x1a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x65, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6e, - 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, - 0x76, 0x31, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x63, 0x0a, - 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, - 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, - 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d, - 0x69, 0x6e, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x75, - 0x66, 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x75, 0x66, 0x66, - 0x69, 0x78, 0x22, 0x9c, 0x01, 0x0a, 0x17, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x37, - 0x0a, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1d, 0x2e, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x52, 0x07, - 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x48, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x69, - 0x6c, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x22, 0x78, 0x0a, 0x18, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x61, - 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, - 0x07, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, - 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x12, - 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x5e, 0x0a, 0x07, 0x57, - 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x42, 0x4d, 0x0a, 0x0e, 0x6f, - 0x72, 0x67, 0x2e, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x76, 0x31, 0x42, 0x10, 0x47, - 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x50, - 0x01, 0x5a, 0x21, 0x2e, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x3b, - 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x5f, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x47, 0x4e, 0x58, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, -} - -var ( - file_extensions_extension_proto_rawDescOnce sync.Once - file_extensions_extension_proto_rawDescData = file_extensions_extension_proto_rawDesc -) - -func file_extensions_extension_proto_rawDescGZIP() []byte { - file_extensions_extension_proto_rawDescOnce.Do(func() { - file_extensions_extension_proto_rawDescData = protoimpl.X.CompressGZIP(file_extensions_extension_proto_rawDescData) - }) - return file_extensions_extension_proto_rawDescData -} - -var file_extensions_extension_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_extensions_extension_proto_goTypes = []interface{}{ - (*Version)(nil), // 0: gnostic.extension.v1.Version - (*ExtensionHandlerRequest)(nil), // 1: gnostic.extension.v1.ExtensionHandlerRequest - (*ExtensionHandlerResponse)(nil), // 2: gnostic.extension.v1.ExtensionHandlerResponse - (*Wrapper)(nil), // 3: gnostic.extension.v1.Wrapper - (*anypb.Any)(nil), // 4: google.protobuf.Any -} -var file_extensions_extension_proto_depIdxs = []int32{ - 3, // 0: gnostic.extension.v1.ExtensionHandlerRequest.wrapper:type_name -> gnostic.extension.v1.Wrapper - 0, // 1: gnostic.extension.v1.ExtensionHandlerRequest.compiler_version:type_name -> gnostic.extension.v1.Version - 4, // 2: gnostic.extension.v1.ExtensionHandlerResponse.value:type_name -> google.protobuf.Any - 3, // [3:3] is the sub-list for method output_type - 3, // [3:3] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 3, // [3:3] is the sub-list for extension extendee - 0, // [0:3] is the sub-list for field type_name -} - -func init() { file_extensions_extension_proto_init() } -func file_extensions_extension_proto_init() { - if File_extensions_extension_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_extensions_extension_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Version); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_extensions_extension_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExtensionHandlerRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_extensions_extension_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExtensionHandlerResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_extensions_extension_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Wrapper); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_extensions_extension_proto_rawDesc, - NumEnums: 0, - NumMessages: 4, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_extensions_extension_proto_goTypes, - DependencyIndexes: file_extensions_extension_proto_depIdxs, - MessageInfos: file_extensions_extension_proto_msgTypes, - }.Build() - File_extensions_extension_proto = out.File - file_extensions_extension_proto_rawDesc = nil - file_extensions_extension_proto_goTypes = nil - file_extensions_extension_proto_depIdxs = nil -} diff --git a/ui/wasm/vendor/github.com/googleapis/gnostic/extensions/extension.proto b/ui/wasm/vendor/github.com/googleapis/gnostic/extensions/extension.proto deleted file mode 100644 index 875137c1a86..00000000000 --- a/ui/wasm/vendor/github.com/googleapis/gnostic/extensions/extension.proto +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2017 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package gnostic.extension.v1; - -import "google/protobuf/any.proto"; - -// This option lets the proto compiler generate Java code inside the package -// name (see below) instead of inside an outer class. It creates a simpler -// developer experience by reducing one-level of name nesting and be -// consistent with most programming languages that don't support outer classes. -option java_multiple_files = true; - -// The Java outer classname should be the filename in UpperCamelCase. This -// class is only used to hold proto descriptor, so developers don't need to -// work with it directly. -option java_outer_classname = "GnosticExtension"; - -// The Java package name must be proto package name with proper prefix. -option java_package = "org.gnostic.v1"; - -// A reasonable prefix for the Objective-C symbols generated from the package. -// It should at a minimum be 3 characters long, all uppercase, and convention -// is to use an abbreviation of the package name. Something short, but -// hopefully unique enough to not conflict with things that may come along in -// the future. 'GPB' is reserved for the protocol buffer implementation itself. -// -// "Gnostic Extension" -option objc_class_prefix = "GNX"; - -// The Go package name. -option go_package = "./extensions;gnostic_extension_v1"; - -// The version number of Gnostic. -message Version { - int32 major = 1; - int32 minor = 2; - int32 patch = 3; - // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should - // be empty for mainline stable releases. - string suffix = 4; -} - -// An encoded Request is written to the ExtensionHandler's stdin. -message ExtensionHandlerRequest { - - // The extension to process. - Wrapper wrapper = 1; - - // The version number of Gnostic. - Version compiler_version = 2; -} - -// The extensions writes an encoded ExtensionHandlerResponse to stdout. -message ExtensionHandlerResponse { - - // true if the extension is handled by the extension handler; false otherwise - bool handled = 1; - - // Error message(s). If non-empty, the extension handling failed. - // The extension handler process should exit with status code zero - // even if it reports an error in this way. - // - // This should be used to indicate errors which prevent the extension from - // operating as intended. Errors which indicate a problem in gnostic - // itself -- such as the input Document being unparseable -- should be - // reported by writing a message to stderr and exiting with a non-zero - // status code. - repeated string errors = 2; - - // text output - google.protobuf.Any value = 3; -} - -message Wrapper { - // version of the OpenAPI specification in which this extension was written. - string version = 1; - - // Name of the extension. - string extension_name = 2; - - // YAML-formatted extension value. - string yaml = 3; -} diff --git a/ui/wasm/vendor/github.com/googleapis/gnostic/extensions/extensions.go b/ui/wasm/vendor/github.com/googleapis/gnostic/extensions/extensions.go deleted file mode 100644 index ec8afd00923..00000000000 --- a/ui/wasm/vendor/github.com/googleapis/gnostic/extensions/extensions.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2017 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package gnostic_extension_v1 - -import ( - "io/ioutil" - "log" - "os" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" -) - -type extensionHandler func(name string, yamlInput string) (bool, proto.Message, error) - -// Main implements the main program of an extension handler. -func Main(handler extensionHandler) { - // unpack the request - data, err := ioutil.ReadAll(os.Stdin) - if err != nil { - log.Println("File error:", err.Error()) - os.Exit(1) - } - if len(data) == 0 { - log.Println("No input data.") - os.Exit(1) - } - request := &ExtensionHandlerRequest{} - err = proto.Unmarshal(data, request) - if err != nil { - log.Println("Input error:", err.Error()) - os.Exit(1) - } - // call the handler - handled, output, err := handler(request.Wrapper.ExtensionName, request.Wrapper.Yaml) - // respond with the output of the handler - response := &ExtensionHandlerResponse{ - Handled: false, // default assumption - Errors: make([]string, 0), - } - if err != nil { - response.Errors = append(response.Errors, err.Error()) - } else if handled { - response.Handled = true - response.Value, err = ptypes.MarshalAny(output) - if err != nil { - response.Errors = append(response.Errors, err.Error()) - } - } - responseBytes, _ := proto.Marshal(response) - os.Stdout.Write(responseBytes) -} diff --git a/ui/wasm/vendor/github.com/googleapis/gnostic/jsonschema/README.md b/ui/wasm/vendor/github.com/googleapis/gnostic/jsonschema/README.md deleted file mode 100644 index 6793c5179c8..00000000000 --- a/ui/wasm/vendor/github.com/googleapis/gnostic/jsonschema/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# jsonschema - -This directory contains code for reading, writing, and manipulating JSON -schemas. diff --git a/ui/wasm/vendor/github.com/googleapis/gnostic/jsonschema/base.go b/ui/wasm/vendor/github.com/googleapis/gnostic/jsonschema/base.go deleted file mode 100644 index 0af8b148b9c..00000000000 --- a/ui/wasm/vendor/github.com/googleapis/gnostic/jsonschema/base.go +++ /dev/null @@ -1,84 +0,0 @@ - -// THIS FILE IS AUTOMATICALLY GENERATED. - -package jsonschema - -import ( - "encoding/base64" -) - -func baseSchemaBytes() ([]byte, error){ - return base64.StdEncoding.DecodeString( -`ewogICAgImlkIjogImh0dHA6Ly9qc29uLXNjaGVtYS5vcmcvZHJhZnQtMDQvc2NoZW1hIyIsCiAgICAi -JHNjaGVtYSI6ICJodHRwOi8vanNvbi1zY2hlbWEub3JnL2RyYWZ0LTA0L3NjaGVtYSMiLAogICAgImRl -c2NyaXB0aW9uIjogIkNvcmUgc2NoZW1hIG1ldGEtc2NoZW1hIiwKICAgICJkZWZpbml0aW9ucyI6IHsK -ICAgICAgICAic2NoZW1hQXJyYXkiOiB7CiAgICAgICAgICAgICJ0eXBlIjogImFycmF5IiwKICAgICAg -ICAgICAgIm1pbkl0ZW1zIjogMSwKICAgICAgICAgICAgIml0ZW1zIjogeyAiJHJlZiI6ICIjIiB9CiAg -ICAgICAgfSwKICAgICAgICAicG9zaXRpdmVJbnRlZ2VyIjogewogICAgICAgICAgICAidHlwZSI6ICJp -bnRlZ2VyIiwKICAgICAgICAgICAgIm1pbmltdW0iOiAwCiAgICAgICAgfSwKICAgICAgICAicG9zaXRp -dmVJbnRlZ2VyRGVmYXVsdDAiOiB7CiAgICAgICAgICAgICJhbGxPZiI6IFsgeyAiJHJlZiI6ICIjL2Rl -ZmluaXRpb25zL3Bvc2l0aXZlSW50ZWdlciIgfSwgeyAiZGVmYXVsdCI6IDAgfSBdCiAgICAgICAgfSwK -ICAgICAgICAic2ltcGxlVHlwZXMiOiB7CiAgICAgICAgICAgICJlbnVtIjogWyAiYXJyYXkiLCAiYm9v -bGVhbiIsICJpbnRlZ2VyIiwgIm51bGwiLCAibnVtYmVyIiwgIm9iamVjdCIsICJzdHJpbmciIF0KICAg -ICAgICB9LAogICAgICAgICJzdHJpbmdBcnJheSI6IHsKICAgICAgICAgICAgInR5cGUiOiAiYXJyYXki -LAogICAgICAgICAgICAiaXRlbXMiOiB7ICJ0eXBlIjogInN0cmluZyIgfSwKICAgICAgICAgICAgIm1p -bkl0ZW1zIjogMSwKICAgICAgICAgICAgInVuaXF1ZUl0ZW1zIjogdHJ1ZQogICAgICAgIH0KICAgIH0s -CiAgICAidHlwZSI6ICJvYmplY3QiLAogICAgInByb3BlcnRpZXMiOiB7CiAgICAgICAgImlkIjogewog -ICAgICAgICAgICAidHlwZSI6ICJzdHJpbmciLAogICAgICAgICAgICAiZm9ybWF0IjogInVyaSIKICAg -ICAgICB9LAogICAgICAgICIkc2NoZW1hIjogewogICAgICAgICAgICAidHlwZSI6ICJzdHJpbmciLAog -ICAgICAgICAgICAiZm9ybWF0IjogInVyaSIKICAgICAgICB9LAogICAgICAgICJ0aXRsZSI6IHsKICAg -ICAgICAgICAgInR5cGUiOiAic3RyaW5nIgogICAgICAgIH0sCiAgICAgICAgImRlc2NyaXB0aW9uIjog -ewogICAgICAgICAgICAidHlwZSI6ICJzdHJpbmciCiAgICAgICAgfSwKICAgICAgICAiZGVmYXVsdCI6 -IHt9LAogICAgICAgICJtdWx0aXBsZU9mIjogewogICAgICAgICAgICAidHlwZSI6ICJudW1iZXIiLAog -ICAgICAgICAgICAibWluaW11bSI6IDAsCiAgICAgICAgICAgICJleGNsdXNpdmVNaW5pbXVtIjogdHJ1 -ZQogICAgICAgIH0sCiAgICAgICAgIm1heGltdW0iOiB7CiAgICAgICAgICAgICJ0eXBlIjogIm51bWJl -ciIKICAgICAgICB9LAogICAgICAgICJleGNsdXNpdmVNYXhpbXVtIjogewogICAgICAgICAgICAidHlw -ZSI6ICJib29sZWFuIiwKICAgICAgICAgICAgImRlZmF1bHQiOiBmYWxzZQogICAgICAgIH0sCiAgICAg -ICAgIm1pbmltdW0iOiB7CiAgICAgICAgICAgICJ0eXBlIjogIm51bWJlciIKICAgICAgICB9LAogICAg -ICAgICJleGNsdXNpdmVNaW5pbXVtIjogewogICAgICAgICAgICAidHlwZSI6ICJib29sZWFuIiwKICAg -ICAgICAgICAgImRlZmF1bHQiOiBmYWxzZQogICAgICAgIH0sCiAgICAgICAgIm1heExlbmd0aCI6IHsg -IiRyZWYiOiAiIy9kZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXIiIH0sCiAgICAgICAgIm1pbkxlbmd0 -aCI6IHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXJEZWZhdWx0MCIgfSwKICAg -ICAgICAicGF0dGVybiI6IHsKICAgICAgICAgICAgInR5cGUiOiAic3RyaW5nIiwKICAgICAgICAgICAg -ImZvcm1hdCI6ICJyZWdleCIKICAgICAgICB9LAogICAgICAgICJhZGRpdGlvbmFsSXRlbXMiOiB7CiAg -ICAgICAgICAgICJhbnlPZiI6IFsKICAgICAgICAgICAgICAgIHsgInR5cGUiOiAiYm9vbGVhbiIgfSwK -ICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIyIgfQogICAgICAgICAgICBdLAogICAgICAgICAgICAi -ZGVmYXVsdCI6IHt9CiAgICAgICAgfSwKICAgICAgICAiaXRlbXMiOiB7CiAgICAgICAgICAgICJhbnlP -ZiI6IFsKICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIyIgfSwKICAgICAgICAgICAgICAgIHsgIiRy -ZWYiOiAiIy9kZWZpbml0aW9ucy9zY2hlbWFBcnJheSIgfQogICAgICAgICAgICBdLAogICAgICAgICAg -ICAiZGVmYXVsdCI6IHt9CiAgICAgICAgfSwKICAgICAgICAibWF4SXRlbXMiOiB7ICIkcmVmIjogIiMv -ZGVmaW5pdGlvbnMvcG9zaXRpdmVJbnRlZ2VyIiB9LAogICAgICAgICJtaW5JdGVtcyI6IHsgIiRyZWYi -OiAiIy9kZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXJEZWZhdWx0MCIgfSwKICAgICAgICAidW5pcXVl -SXRlbXMiOiB7CiAgICAgICAgICAgICJ0eXBlIjogImJvb2xlYW4iLAogICAgICAgICAgICAiZGVmYXVs -dCI6IGZhbHNlCiAgICAgICAgfSwKICAgICAgICAibWF4UHJvcGVydGllcyI6IHsgIiRyZWYiOiAiIy9k -ZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXIiIH0sCiAgICAgICAgIm1pblByb3BlcnRpZXMiOiB7ICIk -cmVmIjogIiMvZGVmaW5pdGlvbnMvcG9zaXRpdmVJbnRlZ2VyRGVmYXVsdDAiIH0sCiAgICAgICAgInJl -cXVpcmVkIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3N0cmluZ0FycmF5IiB9LAogICAgICAgICJh -ZGRpdGlvbmFsUHJvcGVydGllcyI6IHsKICAgICAgICAgICAgImFueU9mIjogWwogICAgICAgICAgICAg -ICAgeyAidHlwZSI6ICJib29sZWFuIiB9LAogICAgICAgICAgICAgICAgeyAiJHJlZiI6ICIjIiB9CiAg -ICAgICAgICAgIF0sCiAgICAgICAgICAgICJkZWZhdWx0Ijoge30KICAgICAgICB9LAogICAgICAgICJk -ZWZpbml0aW9ucyI6IHsKICAgICAgICAgICAgInR5cGUiOiAib2JqZWN0IiwKICAgICAgICAgICAgImFk -ZGl0aW9uYWxQcm9wZXJ0aWVzIjogeyAiJHJlZiI6ICIjIiB9LAogICAgICAgICAgICAiZGVmYXVsdCI6 -IHt9CiAgICAgICAgfSwKICAgICAgICAicHJvcGVydGllcyI6IHsKICAgICAgICAgICAgInR5cGUiOiAi -b2JqZWN0IiwKICAgICAgICAgICAgImFkZGl0aW9uYWxQcm9wZXJ0aWVzIjogeyAiJHJlZiI6ICIjIiB9 -LAogICAgICAgICAgICAiZGVmYXVsdCI6IHt9CiAgICAgICAgfSwKICAgICAgICAicGF0dGVyblByb3Bl -cnRpZXMiOiB7CiAgICAgICAgICAgICJ0eXBlIjogIm9iamVjdCIsCiAgICAgICAgICAgICJhZGRpdGlv -bmFsUHJvcGVydGllcyI6IHsgIiRyZWYiOiAiIyIgfSwKICAgICAgICAgICAgImRlZmF1bHQiOiB7fQog -ICAgICAgIH0sCiAgICAgICAgImRlcGVuZGVuY2llcyI6IHsKICAgICAgICAgICAgInR5cGUiOiAib2Jq -ZWN0IiwKICAgICAgICAgICAgImFkZGl0aW9uYWxQcm9wZXJ0aWVzIjogewogICAgICAgICAgICAgICAg -ImFueU9mIjogWwogICAgICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIyIgfSwKICAgICAgICAgICAg -ICAgICAgICB7ICIkcmVmIjogIiMvZGVmaW5pdGlvbnMvc3RyaW5nQXJyYXkiIH0KICAgICAgICAgICAg -ICAgIF0KICAgICAgICAgICAgfQogICAgICAgIH0sCiAgICAgICAgImVudW0iOiB7CiAgICAgICAgICAg -ICJ0eXBlIjogImFycmF5IiwKICAgICAgICAgICAgIm1pbkl0ZW1zIjogMSwKICAgICAgICAgICAgInVu -aXF1ZUl0ZW1zIjogdHJ1ZQogICAgICAgIH0sCiAgICAgICAgInR5cGUiOiB7CiAgICAgICAgICAgICJh -bnlPZiI6IFsKICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9zaW1wbGVUeXBl -cyIgfSwKICAgICAgICAgICAgICAgIHsKICAgICAgICAgICAgICAgICAgICAidHlwZSI6ICJhcnJheSIs -CiAgICAgICAgICAgICAgICAgICAgIml0ZW1zIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3NpbXBs -ZVR5cGVzIiB9LAogICAgICAgICAgICAgICAgICAgICJtaW5JdGVtcyI6IDEsCiAgICAgICAgICAgICAg -ICAgICAgInVuaXF1ZUl0ZW1zIjogdHJ1ZQogICAgICAgICAgICAgICAgfQogICAgICAgICAgICBdCiAg -ICAgICAgfSwKICAgICAgICAiYWxsT2YiOiB7ICIkcmVmIjogIiMvZGVmaW5pdGlvbnMvc2NoZW1hQXJy -YXkiIH0sCiAgICAgICAgImFueU9mIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3NjaGVtYUFycmF5 -IiB9LAogICAgICAgICJvbmVPZiI6IHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9zY2hlbWFBcnJheSIg -fSwKICAgICAgICAibm90IjogeyAiJHJlZiI6ICIjIiB9CiAgICB9LAogICAgImRlcGVuZGVuY2llcyI6 -IHsKICAgICAgICAiZXhjbHVzaXZlTWF4aW11bSI6IFsgIm1heGltdW0iIF0sCiAgICAgICAgImV4Y2x1 -c2l2ZU1pbmltdW0iOiBbICJtaW5pbXVtIiBdCiAgICB9LAogICAgImRlZmF1bHQiOiB7fQp9Cg==`)} \ No newline at end of file diff --git a/ui/wasm/vendor/github.com/googleapis/gnostic/jsonschema/display.go b/ui/wasm/vendor/github.com/googleapis/gnostic/jsonschema/display.go deleted file mode 100644 index 028a760a91b..00000000000 --- a/ui/wasm/vendor/github.com/googleapis/gnostic/jsonschema/display.go +++ /dev/null @@ -1,229 +0,0 @@ -// Copyright 2017 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jsonschema - -import ( - "fmt" - "strings" -) - -// -// DISPLAY -// The following methods display Schemas. -// - -// Description returns a string representation of a string or string array. -func (s *StringOrStringArray) Description() string { - if s.String != nil { - return *s.String - } - if s.StringArray != nil { - return strings.Join(*s.StringArray, ", ") - } - return "" -} - -// Returns a string representation of a Schema. -func (schema *Schema) String() string { - return schema.describeSchema("") -} - -// Helper: Returns a string representation of a Schema indented by a specified string. -func (schema *Schema) describeSchema(indent string) string { - result := "" - if schema.Schema != nil { - result += indent + "$schema: " + *(schema.Schema) + "\n" - } - if schema.ID != nil { - result += indent + "id: " + *(schema.ID) + "\n" - } - if schema.MultipleOf != nil { - result += indent + fmt.Sprintf("multipleOf: %+v\n", *(schema.MultipleOf)) - } - if schema.Maximum != nil { - result += indent + fmt.Sprintf("maximum: %+v\n", *(schema.Maximum)) - } - if schema.ExclusiveMaximum != nil { - result += indent + fmt.Sprintf("exclusiveMaximum: %+v\n", *(schema.ExclusiveMaximum)) - } - if schema.Minimum != nil { - result += indent + fmt.Sprintf("minimum: %+v\n", *(schema.Minimum)) - } - if schema.ExclusiveMinimum != nil { - result += indent + fmt.Sprintf("exclusiveMinimum: %+v\n", *(schema.ExclusiveMinimum)) - } - if schema.MaxLength != nil { - result += indent + fmt.Sprintf("maxLength: %+v\n", *(schema.MaxLength)) - } - if schema.MinLength != nil { - result += indent + fmt.Sprintf("minLength: %+v\n", *(schema.MinLength)) - } - if schema.Pattern != nil { - result += indent + fmt.Sprintf("pattern: %+v\n", *(schema.Pattern)) - } - if schema.AdditionalItems != nil { - s := schema.AdditionalItems.Schema - if s != nil { - result += indent + "additionalItems:\n" - result += s.describeSchema(indent + " ") - } else { - b := *(schema.AdditionalItems.Boolean) - result += indent + fmt.Sprintf("additionalItems: %+v\n", b) - } - } - if schema.Items != nil { - result += indent + "items:\n" - items := schema.Items - if items.SchemaArray != nil { - for i, s := range *(items.SchemaArray) { - result += indent + " " + fmt.Sprintf("%d", i) + ":\n" - result += s.describeSchema(indent + " " + " ") - } - } else if items.Schema != nil { - result += items.Schema.describeSchema(indent + " " + " ") - } - } - if schema.MaxItems != nil { - result += indent + fmt.Sprintf("maxItems: %+v\n", *(schema.MaxItems)) - } - if schema.MinItems != nil { - result += indent + fmt.Sprintf("minItems: %+v\n", *(schema.MinItems)) - } - if schema.UniqueItems != nil { - result += indent + fmt.Sprintf("uniqueItems: %+v\n", *(schema.UniqueItems)) - } - if schema.MaxProperties != nil { - result += indent + fmt.Sprintf("maxProperties: %+v\n", *(schema.MaxProperties)) - } - if schema.MinProperties != nil { - result += indent + fmt.Sprintf("minProperties: %+v\n", *(schema.MinProperties)) - } - if schema.Required != nil { - result += indent + fmt.Sprintf("required: %+v\n", *(schema.Required)) - } - if schema.AdditionalProperties != nil { - s := schema.AdditionalProperties.Schema - if s != nil { - result += indent + "additionalProperties:\n" - result += s.describeSchema(indent + " ") - } else { - b := *(schema.AdditionalProperties.Boolean) - result += indent + fmt.Sprintf("additionalProperties: %+v\n", b) - } - } - if schema.Properties != nil { - result += indent + "properties:\n" - for _, pair := range *(schema.Properties) { - name := pair.Name - s := pair.Value - result += indent + " " + name + ":\n" - result += s.describeSchema(indent + " " + " ") - } - } - if schema.PatternProperties != nil { - result += indent + "patternProperties:\n" - for _, pair := range *(schema.PatternProperties) { - name := pair.Name - s := pair.Value - result += indent + " " + name + ":\n" - result += s.describeSchema(indent + " " + " ") - } - } - if schema.Dependencies != nil { - result += indent + "dependencies:\n" - for _, pair := range *(schema.Dependencies) { - name := pair.Name - schemaOrStringArray := pair.Value - s := schemaOrStringArray.Schema - if s != nil { - result += indent + " " + name + ":\n" - result += s.describeSchema(indent + " " + " ") - } else { - a := schemaOrStringArray.StringArray - if a != nil { - result += indent + " " + name + ":\n" - for _, s2 := range *a { - result += indent + " " + " " + s2 + "\n" - } - } - } - - } - } - if schema.Enumeration != nil { - result += indent + "enumeration:\n" - for _, value := range *(schema.Enumeration) { - if value.String != nil { - result += indent + " " + fmt.Sprintf("%+v\n", *value.String) - } else { - result += indent + " " + fmt.Sprintf("%+v\n", *value.Bool) - } - } - } - if schema.Type != nil { - result += indent + fmt.Sprintf("type: %+v\n", schema.Type.Description()) - } - if schema.AllOf != nil { - result += indent + "allOf:\n" - for _, s := range *(schema.AllOf) { - result += s.describeSchema(indent + " ") - result += indent + "-\n" - } - } - if schema.AnyOf != nil { - result += indent + "anyOf:\n" - for _, s := range *(schema.AnyOf) { - result += s.describeSchema(indent + " ") - result += indent + "-\n" - } - } - if schema.OneOf != nil { - result += indent + "oneOf:\n" - for _, s := range *(schema.OneOf) { - result += s.describeSchema(indent + " ") - result += indent + "-\n" - } - } - if schema.Not != nil { - result += indent + "not:\n" - result += schema.Not.describeSchema(indent + " ") - } - if schema.Definitions != nil { - result += indent + "definitions:\n" - for _, pair := range *(schema.Definitions) { - name := pair.Name - s := pair.Value - result += indent + " " + name + ":\n" - result += s.describeSchema(indent + " " + " ") - } - } - if schema.Title != nil { - result += indent + "title: " + *(schema.Title) + "\n" - } - if schema.Description != nil { - result += indent + "description: " + *(schema.Description) + "\n" - } - if schema.Default != nil { - result += indent + "default:\n" - result += indent + fmt.Sprintf(" %+v\n", *(schema.Default)) - } - if schema.Format != nil { - result += indent + "format: " + *(schema.Format) + "\n" - } - if schema.Ref != nil { - result += indent + "$ref: " + *(schema.Ref) + "\n" - } - return result -} diff --git a/ui/wasm/vendor/github.com/googleapis/gnostic/jsonschema/models.go b/ui/wasm/vendor/github.com/googleapis/gnostic/jsonschema/models.go deleted file mode 100644 index 4781bdc5f50..00000000000 --- a/ui/wasm/vendor/github.com/googleapis/gnostic/jsonschema/models.go +++ /dev/null @@ -1,228 +0,0 @@ -// Copyright 2017 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package jsonschema supports the reading, writing, and manipulation -// of JSON Schemas. -package jsonschema - -import "gopkg.in/yaml.v3" - -// The Schema struct models a JSON Schema and, because schemas are -// defined hierarchically, contains many references to itself. -// All fields are pointers and are nil if the associated values -// are not specified. -type Schema struct { - Schema *string // $schema - ID *string // id keyword used for $ref resolution scope - Ref *string // $ref, i.e. JSON Pointers - - // http://json-schema.org/latest/json-schema-validation.html - // 5.1. Validation keywords for numeric instances (number and integer) - MultipleOf *SchemaNumber - Maximum *SchemaNumber - ExclusiveMaximum *bool - Minimum *SchemaNumber - ExclusiveMinimum *bool - - // 5.2. Validation keywords for strings - MaxLength *int64 - MinLength *int64 - Pattern *string - - // 5.3. Validation keywords for arrays - AdditionalItems *SchemaOrBoolean - Items *SchemaOrSchemaArray - MaxItems *int64 - MinItems *int64 - UniqueItems *bool - - // 5.4. Validation keywords for objects - MaxProperties *int64 - MinProperties *int64 - Required *[]string - AdditionalProperties *SchemaOrBoolean - Properties *[]*NamedSchema - PatternProperties *[]*NamedSchema - Dependencies *[]*NamedSchemaOrStringArray - - // 5.5. Validation keywords for any instance type - Enumeration *[]SchemaEnumValue - Type *StringOrStringArray - AllOf *[]*Schema - AnyOf *[]*Schema - OneOf *[]*Schema - Not *Schema - Definitions *[]*NamedSchema - - // 6. Metadata keywords - Title *string - Description *string - Default *yaml.Node - - // 7. Semantic validation with "format" - Format *string -} - -// These helper structs represent "combination" types that generally can -// have values of one type or another. All are used to represent parts -// of Schemas. - -// SchemaNumber represents a value that can be either an Integer or a Float. -type SchemaNumber struct { - Integer *int64 - Float *float64 -} - -// NewSchemaNumberWithInteger creates and returns a new object -func NewSchemaNumberWithInteger(i int64) *SchemaNumber { - result := &SchemaNumber{} - result.Integer = &i - return result -} - -// NewSchemaNumberWithFloat creates and returns a new object -func NewSchemaNumberWithFloat(f float64) *SchemaNumber { - result := &SchemaNumber{} - result.Float = &f - return result -} - -// SchemaOrBoolean represents a value that can be either a Schema or a Boolean. -type SchemaOrBoolean struct { - Schema *Schema - Boolean *bool -} - -// NewSchemaOrBooleanWithSchema creates and returns a new object -func NewSchemaOrBooleanWithSchema(s *Schema) *SchemaOrBoolean { - result := &SchemaOrBoolean{} - result.Schema = s - return result -} - -// NewSchemaOrBooleanWithBoolean creates and returns a new object -func NewSchemaOrBooleanWithBoolean(b bool) *SchemaOrBoolean { - result := &SchemaOrBoolean{} - result.Boolean = &b - return result -} - -// StringOrStringArray represents a value that can be either -// a String or an Array of Strings. -type StringOrStringArray struct { - String *string - StringArray *[]string -} - -// NewStringOrStringArrayWithString creates and returns a new object -func NewStringOrStringArrayWithString(s string) *StringOrStringArray { - result := &StringOrStringArray{} - result.String = &s - return result -} - -// NewStringOrStringArrayWithStringArray creates and returns a new object -func NewStringOrStringArrayWithStringArray(a []string) *StringOrStringArray { - result := &StringOrStringArray{} - result.StringArray = &a - return result -} - -// SchemaOrStringArray represents a value that can be either -// a Schema or an Array of Strings. -type SchemaOrStringArray struct { - Schema *Schema - StringArray *[]string -} - -// SchemaOrSchemaArray represents a value that can be either -// a Schema or an Array of Schemas. -type SchemaOrSchemaArray struct { - Schema *Schema - SchemaArray *[]*Schema -} - -// NewSchemaOrSchemaArrayWithSchema creates and returns a new object -func NewSchemaOrSchemaArrayWithSchema(s *Schema) *SchemaOrSchemaArray { - result := &SchemaOrSchemaArray{} - result.Schema = s - return result -} - -// NewSchemaOrSchemaArrayWithSchemaArray creates and returns a new object -func NewSchemaOrSchemaArrayWithSchemaArray(a []*Schema) *SchemaOrSchemaArray { - result := &SchemaOrSchemaArray{} - result.SchemaArray = &a - return result -} - -// SchemaEnumValue represents a value that can be part of an -// enumeration in a Schema. -type SchemaEnumValue struct { - String *string - Bool *bool -} - -// NamedSchema is a name-value pair that is used to emulate maps -// with ordered keys. -type NamedSchema struct { - Name string - Value *Schema -} - -// NewNamedSchema creates and returns a new object -func NewNamedSchema(name string, value *Schema) *NamedSchema { - return &NamedSchema{Name: name, Value: value} -} - -// NamedSchemaOrStringArray is a name-value pair that is used -// to emulate maps with ordered keys. -type NamedSchemaOrStringArray struct { - Name string - Value *SchemaOrStringArray -} - -// Access named subschemas by name - -func namedSchemaArrayElementWithName(array *[]*NamedSchema, name string) *Schema { - if array == nil { - return nil - } - for _, pair := range *array { - if pair.Name == name { - return pair.Value - } - } - return nil -} - -// PropertyWithName returns the selected element. -func (s *Schema) PropertyWithName(name string) *Schema { - return namedSchemaArrayElementWithName(s.Properties, name) -} - -// PatternPropertyWithName returns the selected element. -func (s *Schema) PatternPropertyWithName(name string) *Schema { - return namedSchemaArrayElementWithName(s.PatternProperties, name) -} - -// DefinitionWithName returns the selected element. -func (s *Schema) DefinitionWithName(name string) *Schema { - return namedSchemaArrayElementWithName(s.Definitions, name) -} - -// AddProperty adds a named property. -func (s *Schema) AddProperty(name string, property *Schema) { - *s.Properties = append(*s.Properties, NewNamedSchema(name, property)) -} diff --git a/ui/wasm/vendor/github.com/googleapis/gnostic/jsonschema/operations.go b/ui/wasm/vendor/github.com/googleapis/gnostic/jsonschema/operations.go deleted file mode 100644 index ba8dd4a91b9..00000000000 --- a/ui/wasm/vendor/github.com/googleapis/gnostic/jsonschema/operations.go +++ /dev/null @@ -1,394 +0,0 @@ -// Copyright 2017 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jsonschema - -import ( - "fmt" - "log" - "strings" -) - -// -// OPERATIONS -// The following methods perform operations on Schemas. -// - -// IsEmpty returns true if no members of the Schema are specified. -func (schema *Schema) IsEmpty() bool { - return (schema.Schema == nil) && - (schema.ID == nil) && - (schema.MultipleOf == nil) && - (schema.Maximum == nil) && - (schema.ExclusiveMaximum == nil) && - (schema.Minimum == nil) && - (schema.ExclusiveMinimum == nil) && - (schema.MaxLength == nil) && - (schema.MinLength == nil) && - (schema.Pattern == nil) && - (schema.AdditionalItems == nil) && - (schema.Items == nil) && - (schema.MaxItems == nil) && - (schema.MinItems == nil) && - (schema.UniqueItems == nil) && - (schema.MaxProperties == nil) && - (schema.MinProperties == nil) && - (schema.Required == nil) && - (schema.AdditionalProperties == nil) && - (schema.Properties == nil) && - (schema.PatternProperties == nil) && - (schema.Dependencies == nil) && - (schema.Enumeration == nil) && - (schema.Type == nil) && - (schema.AllOf == nil) && - (schema.AnyOf == nil) && - (schema.OneOf == nil) && - (schema.Not == nil) && - (schema.Definitions == nil) && - (schema.Title == nil) && - (schema.Description == nil) && - (schema.Default == nil) && - (schema.Format == nil) && - (schema.Ref == nil) -} - -// IsEqual returns true if two schemas are equal. -func (schema *Schema) IsEqual(schema2 *Schema) bool { - return schema.String() == schema2.String() -} - -// SchemaOperation represents a function that can be applied to a Schema. -type SchemaOperation func(schema *Schema, context string) - -// Applies a specified function to a Schema and all of the Schemas that it contains. -func (schema *Schema) applyToSchemas(operation SchemaOperation, context string) { - - if schema.AdditionalItems != nil { - s := schema.AdditionalItems.Schema - if s != nil { - s.applyToSchemas(operation, "AdditionalItems") - } - } - - if schema.Items != nil { - if schema.Items.SchemaArray != nil { - for _, s := range *(schema.Items.SchemaArray) { - s.applyToSchemas(operation, "Items.SchemaArray") - } - } else if schema.Items.Schema != nil { - schema.Items.Schema.applyToSchemas(operation, "Items.Schema") - } - } - - if schema.AdditionalProperties != nil { - s := schema.AdditionalProperties.Schema - if s != nil { - s.applyToSchemas(operation, "AdditionalProperties") - } - } - - if schema.Properties != nil { - for _, pair := range *(schema.Properties) { - s := pair.Value - s.applyToSchemas(operation, "Properties") - } - } - if schema.PatternProperties != nil { - for _, pair := range *(schema.PatternProperties) { - s := pair.Value - s.applyToSchemas(operation, "PatternProperties") - } - } - - if schema.Dependencies != nil { - for _, pair := range *(schema.Dependencies) { - schemaOrStringArray := pair.Value - s := schemaOrStringArray.Schema - if s != nil { - s.applyToSchemas(operation, "Dependencies") - } - } - } - - if schema.AllOf != nil { - for _, s := range *(schema.AllOf) { - s.applyToSchemas(operation, "AllOf") - } - } - if schema.AnyOf != nil { - for _, s := range *(schema.AnyOf) { - s.applyToSchemas(operation, "AnyOf") - } - } - if schema.OneOf != nil { - for _, s := range *(schema.OneOf) { - s.applyToSchemas(operation, "OneOf") - } - } - if schema.Not != nil { - schema.Not.applyToSchemas(operation, "Not") - } - - if schema.Definitions != nil { - for _, pair := range *(schema.Definitions) { - s := pair.Value - s.applyToSchemas(operation, "Definitions") - } - } - - operation(schema, context) -} - -// CopyProperties copies all non-nil properties from the source Schema to the schema Schema. -func (schema *Schema) CopyProperties(source *Schema) { - if source.Schema != nil { - schema.Schema = source.Schema - } - if source.ID != nil { - schema.ID = source.ID - } - if source.MultipleOf != nil { - schema.MultipleOf = source.MultipleOf - } - if source.Maximum != nil { - schema.Maximum = source.Maximum - } - if source.ExclusiveMaximum != nil { - schema.ExclusiveMaximum = source.ExclusiveMaximum - } - if source.Minimum != nil { - schema.Minimum = source.Minimum - } - if source.ExclusiveMinimum != nil { - schema.ExclusiveMinimum = source.ExclusiveMinimum - } - if source.MaxLength != nil { - schema.MaxLength = source.MaxLength - } - if source.MinLength != nil { - schema.MinLength = source.MinLength - } - if source.Pattern != nil { - schema.Pattern = source.Pattern - } - if source.AdditionalItems != nil { - schema.AdditionalItems = source.AdditionalItems - } - if source.Items != nil { - schema.Items = source.Items - } - if source.MaxItems != nil { - schema.MaxItems = source.MaxItems - } - if source.MinItems != nil { - schema.MinItems = source.MinItems - } - if source.UniqueItems != nil { - schema.UniqueItems = source.UniqueItems - } - if source.MaxProperties != nil { - schema.MaxProperties = source.MaxProperties - } - if source.MinProperties != nil { - schema.MinProperties = source.MinProperties - } - if source.Required != nil { - schema.Required = source.Required - } - if source.AdditionalProperties != nil { - schema.AdditionalProperties = source.AdditionalProperties - } - if source.Properties != nil { - schema.Properties = source.Properties - } - if source.PatternProperties != nil { - schema.PatternProperties = source.PatternProperties - } - if source.Dependencies != nil { - schema.Dependencies = source.Dependencies - } - if source.Enumeration != nil { - schema.Enumeration = source.Enumeration - } - if source.Type != nil { - schema.Type = source.Type - } - if source.AllOf != nil { - schema.AllOf = source.AllOf - } - if source.AnyOf != nil { - schema.AnyOf = source.AnyOf - } - if source.OneOf != nil { - schema.OneOf = source.OneOf - } - if source.Not != nil { - schema.Not = source.Not - } - if source.Definitions != nil { - schema.Definitions = source.Definitions - } - if source.Title != nil { - schema.Title = source.Title - } - if source.Description != nil { - schema.Description = source.Description - } - if source.Default != nil { - schema.Default = source.Default - } - if source.Format != nil { - schema.Format = source.Format - } - if source.Ref != nil { - schema.Ref = source.Ref - } -} - -// TypeIs returns true if the Type of a Schema includes the specified type -func (schema *Schema) TypeIs(typeName string) bool { - if schema.Type != nil { - // the schema Type is either a string or an array of strings - if schema.Type.String != nil { - return (*(schema.Type.String) == typeName) - } else if schema.Type.StringArray != nil { - for _, n := range *(schema.Type.StringArray) { - if n == typeName { - return true - } - } - } - } - return false -} - -// ResolveRefs resolves "$ref" elements in a Schema and its children. -// But if a reference refers to an object type, is inside a oneOf, or contains a oneOf, -// the reference is kept and we expect downstream tools to separately model these -// referenced schemas. -func (schema *Schema) ResolveRefs() { - rootSchema := schema - count := 1 - for count > 0 { - count = 0 - schema.applyToSchemas( - func(schema *Schema, context string) { - if schema.Ref != nil { - resolvedRef, err := rootSchema.resolveJSONPointer(*(schema.Ref)) - if err != nil { - log.Printf("%+v", err) - } else if resolvedRef.TypeIs("object") { - // don't substitute for objects, we'll model the referenced schema with a class - } else if context == "OneOf" { - // don't substitute for references inside oneOf declarations - } else if resolvedRef.OneOf != nil { - // don't substitute for references that contain oneOf declarations - } else if resolvedRef.AdditionalProperties != nil { - // don't substitute for references that look like objects - } else { - schema.Ref = nil - schema.CopyProperties(resolvedRef) - count++ - } - } - }, "") - } -} - -// resolveJSONPointer resolves JSON pointers. -// This current implementation is very crude and custom for OpenAPI 2.0 schemas. -// It panics for any pointer that it is unable to resolve. -func (schema *Schema) resolveJSONPointer(ref string) (result *Schema, err error) { - parts := strings.Split(ref, "#") - if len(parts) == 2 { - documentName := parts[0] + "#" - if documentName == "#" && schema.ID != nil { - documentName = *(schema.ID) - } - path := parts[1] - document := schemas[documentName] - pathParts := strings.Split(path, "/") - - // we currently do a very limited (hard-coded) resolution of certain paths and log errors for missed cases - if len(pathParts) == 1 { - return document, nil - } else if len(pathParts) == 3 { - switch pathParts[1] { - case "definitions": - dictionary := document.Definitions - for _, pair := range *dictionary { - if pair.Name == pathParts[2] { - result = pair.Value - } - } - case "properties": - dictionary := document.Properties - for _, pair := range *dictionary { - if pair.Name == pathParts[2] { - result = pair.Value - } - } - default: - break - } - } - } - if result == nil { - return nil, fmt.Errorf("unresolved pointer: %+v", ref) - } - return result, nil -} - -// ResolveAllOfs replaces "allOf" elements by merging their properties into the parent Schema. -func (schema *Schema) ResolveAllOfs() { - schema.applyToSchemas( - func(schema *Schema, context string) { - if schema.AllOf != nil { - for _, allOf := range *(schema.AllOf) { - schema.CopyProperties(allOf) - } - schema.AllOf = nil - } - }, "resolveAllOfs") -} - -// ResolveAnyOfs replaces all "anyOf" elements with "oneOf". -func (schema *Schema) ResolveAnyOfs() { - schema.applyToSchemas( - func(schema *Schema, context string) { - if schema.AnyOf != nil { - schema.OneOf = schema.AnyOf - schema.AnyOf = nil - } - }, "resolveAnyOfs") -} - -// return a pointer to a copy of a passed-in string -func stringptr(input string) (output *string) { - return &input -} - -// CopyOfficialSchemaProperty copies a named property from the official JSON Schema definition -func (schema *Schema) CopyOfficialSchemaProperty(name string) { - *schema.Properties = append(*schema.Properties, - NewNamedSchema(name, - &Schema{Ref: stringptr("http://json-schema.org/draft-04/schema#/properties/" + name)})) -} - -// CopyOfficialSchemaProperties copies named properties from the official JSON Schema definition -func (schema *Schema) CopyOfficialSchemaProperties(names []string) { - for _, name := range names { - schema.CopyOfficialSchemaProperty(name) - } -} diff --git a/ui/wasm/vendor/github.com/googleapis/gnostic/jsonschema/reader.go b/ui/wasm/vendor/github.com/googleapis/gnostic/jsonschema/reader.go deleted file mode 100644 index b8583d46602..00000000000 --- a/ui/wasm/vendor/github.com/googleapis/gnostic/jsonschema/reader.go +++ /dev/null @@ -1,442 +0,0 @@ -// Copyright 2017 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:generate go run generate-base.go - -package jsonschema - -import ( - "fmt" - "io/ioutil" - "strconv" - - "gopkg.in/yaml.v3" -) - -// This is a global map of all known Schemas. -// It is initialized when the first Schema is created and inserted. -var schemas map[string]*Schema - -// NewBaseSchema builds a schema object from an embedded json representation. -func NewBaseSchema() (schema *Schema, err error) { - b, err := baseSchemaBytes() - if err != nil { - return nil, err - } - var node yaml.Node - err = yaml.Unmarshal(b, &node) - if err != nil { - return nil, err - } - return NewSchemaFromObject(&node), nil -} - -// NewSchemaFromFile reads a schema from a file. -// Currently this assumes that schemas are stored in the source distribution of this project. -func NewSchemaFromFile(filename string) (schema *Schema, err error) { - file, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - var node yaml.Node - err = yaml.Unmarshal(file, &node) - if err != nil { - return nil, err - } - return NewSchemaFromObject(&node), nil -} - -// NewSchemaFromObject constructs a schema from a parsed JSON object. -// Due to the complexity of the schema representation, this is a -// custom reader and not the standard Go JSON reader (encoding/json). -func NewSchemaFromObject(jsonData *yaml.Node) *Schema { - switch jsonData.Kind { - case yaml.DocumentNode: - return NewSchemaFromObject(jsonData.Content[0]) - case yaml.MappingNode: - schema := &Schema{} - - for i := 0; i < len(jsonData.Content); i += 2 { - k := jsonData.Content[i].Value - v := jsonData.Content[i+1] - - switch k { - case "$schema": - schema.Schema = schema.stringValue(v) - case "id": - schema.ID = schema.stringValue(v) - - case "multipleOf": - schema.MultipleOf = schema.numberValue(v) - case "maximum": - schema.Maximum = schema.numberValue(v) - case "exclusiveMaximum": - schema.ExclusiveMaximum = schema.boolValue(v) - case "minimum": - schema.Minimum = schema.numberValue(v) - case "exclusiveMinimum": - schema.ExclusiveMinimum = schema.boolValue(v) - - case "maxLength": - schema.MaxLength = schema.intValue(v) - case "minLength": - schema.MinLength = schema.intValue(v) - case "pattern": - schema.Pattern = schema.stringValue(v) - - case "additionalItems": - schema.AdditionalItems = schema.schemaOrBooleanValue(v) - case "items": - schema.Items = schema.schemaOrSchemaArrayValue(v) - case "maxItems": - schema.MaxItems = schema.intValue(v) - case "minItems": - schema.MinItems = schema.intValue(v) - case "uniqueItems": - schema.UniqueItems = schema.boolValue(v) - - case "maxProperties": - schema.MaxProperties = schema.intValue(v) - case "minProperties": - schema.MinProperties = schema.intValue(v) - case "required": - schema.Required = schema.arrayOfStringsValue(v) - case "additionalProperties": - schema.AdditionalProperties = schema.schemaOrBooleanValue(v) - case "properties": - schema.Properties = schema.mapOfSchemasValue(v) - case "patternProperties": - schema.PatternProperties = schema.mapOfSchemasValue(v) - case "dependencies": - schema.Dependencies = schema.mapOfSchemasOrStringArraysValue(v) - - case "enum": - schema.Enumeration = schema.arrayOfEnumValuesValue(v) - - case "type": - schema.Type = schema.stringOrStringArrayValue(v) - case "allOf": - schema.AllOf = schema.arrayOfSchemasValue(v) - case "anyOf": - schema.AnyOf = schema.arrayOfSchemasValue(v) - case "oneOf": - schema.OneOf = schema.arrayOfSchemasValue(v) - case "not": - schema.Not = NewSchemaFromObject(v) - case "definitions": - schema.Definitions = schema.mapOfSchemasValue(v) - - case "title": - schema.Title = schema.stringValue(v) - case "description": - schema.Description = schema.stringValue(v) - - case "default": - schema.Default = v - - case "format": - schema.Format = schema.stringValue(v) - case "$ref": - schema.Ref = schema.stringValue(v) - default: - fmt.Printf("UNSUPPORTED (%s)\n", k) - } - } - - // insert schema in global map - if schema.ID != nil { - if schemas == nil { - schemas = make(map[string]*Schema, 0) - } - schemas[*(schema.ID)] = schema - } - return schema - - default: - fmt.Printf("schemaValue: unexpected node %+v\n", jsonData) - return nil - } - - return nil -} - -// -// BUILDERS -// The following methods build elements of Schemas from interface{} values. -// Each returns nil if it is unable to build the desired element. -// - -// Gets the string value of an interface{} value if possible. -func (schema *Schema) stringValue(v *yaml.Node) *string { - switch v.Kind { - case yaml.ScalarNode: - return &v.Value - default: - fmt.Printf("stringValue: unexpected node %+v\n", v) - } - return nil -} - -// Gets the numeric value of an interface{} value if possible. -func (schema *Schema) numberValue(v *yaml.Node) *SchemaNumber { - number := &SchemaNumber{} - switch v.Kind { - case yaml.ScalarNode: - switch v.Tag { - case "!!float": - v2, _ := strconv.ParseFloat(v.Value, 64) - number.Float = &v2 - return number - case "!!int": - v2, _ := strconv.ParseInt(v.Value, 10, 64) - number.Integer = &v2 - return number - default: - fmt.Printf("stringValue: unexpected node %+v\n", v) - } - default: - fmt.Printf("stringValue: unexpected node %+v\n", v) - } - return nil -} - -// Gets the integer value of an interface{} value if possible. -func (schema *Schema) intValue(v *yaml.Node) *int64 { - switch v.Kind { - case yaml.ScalarNode: - switch v.Tag { - case "!!float": - v2, _ := strconv.ParseFloat(v.Value, 64) - v3 := int64(v2) - return &v3 - case "!!int": - v2, _ := strconv.ParseInt(v.Value, 10, 64) - return &v2 - default: - fmt.Printf("intValue: unexpected node %+v\n", v) - } - default: - fmt.Printf("intValue: unexpected node %+v\n", v) - } - return nil -} - -// Gets the bool value of an interface{} value if possible. -func (schema *Schema) boolValue(v *yaml.Node) *bool { - switch v.Kind { - case yaml.ScalarNode: - switch v.Tag { - case "!!bool": - v2, _ := strconv.ParseBool(v.Value) - return &v2 - default: - fmt.Printf("boolValue: unexpected node %+v\n", v) - } - default: - fmt.Printf("boolValue: unexpected node %+v\n", v) - } - return nil -} - -// Gets a map of Schemas from an interface{} value if possible. -func (schema *Schema) mapOfSchemasValue(v *yaml.Node) *[]*NamedSchema { - switch v.Kind { - case yaml.MappingNode: - m := make([]*NamedSchema, 0) - for i := 0; i < len(v.Content); i += 2 { - k2 := v.Content[i].Value - v2 := v.Content[i+1] - pair := &NamedSchema{Name: k2, Value: NewSchemaFromObject(v2)} - m = append(m, pair) - } - return &m - default: - fmt.Printf("mapOfSchemasValue: unexpected node %+v\n", v) - } - return nil -} - -// Gets an array of Schemas from an interface{} value if possible. -func (schema *Schema) arrayOfSchemasValue(v *yaml.Node) *[]*Schema { - switch v.Kind { - case yaml.SequenceNode: - m := make([]*Schema, 0) - for _, v2 := range v.Content { - switch v2.Kind { - case yaml.MappingNode: - s := NewSchemaFromObject(v2) - m = append(m, s) - default: - fmt.Printf("arrayOfSchemasValue: unexpected node %+v\n", v2) - } - } - return &m - case yaml.MappingNode: - m := make([]*Schema, 0) - s := NewSchemaFromObject(v) - m = append(m, s) - return &m - default: - fmt.Printf("arrayOfSchemasValue: unexpected node %+v\n", v) - } - return nil -} - -// Gets a Schema or an array of Schemas from an interface{} value if possible. -func (schema *Schema) schemaOrSchemaArrayValue(v *yaml.Node) *SchemaOrSchemaArray { - switch v.Kind { - case yaml.SequenceNode: - m := make([]*Schema, 0) - for _, v2 := range v.Content { - switch v2.Kind { - case yaml.MappingNode: - s := NewSchemaFromObject(v2) - m = append(m, s) - default: - fmt.Printf("schemaOrSchemaArrayValue: unexpected node %+v\n", v2) - } - } - return &SchemaOrSchemaArray{SchemaArray: &m} - case yaml.MappingNode: - s := NewSchemaFromObject(v) - return &SchemaOrSchemaArray{Schema: s} - default: - fmt.Printf("schemaOrSchemaArrayValue: unexpected node %+v\n", v) - } - return nil -} - -// Gets an array of strings from an interface{} value if possible. -func (schema *Schema) arrayOfStringsValue(v *yaml.Node) *[]string { - switch v.Kind { - case yaml.ScalarNode: - a := []string{v.Value} - return &a - case yaml.SequenceNode: - a := make([]string, 0) - for _, v2 := range v.Content { - switch v2.Kind { - case yaml.ScalarNode: - a = append(a, v2.Value) - default: - fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v2) - } - } - return &a - default: - fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v) - } - return nil -} - -// Gets a string or an array of strings from an interface{} value if possible. -func (schema *Schema) stringOrStringArrayValue(v *yaml.Node) *StringOrStringArray { - switch v.Kind { - case yaml.ScalarNode: - s := &StringOrStringArray{} - s.String = &v.Value - return s - case yaml.SequenceNode: - a := make([]string, 0) - for _, v2 := range v.Content { - switch v2.Kind { - case yaml.ScalarNode: - a = append(a, v2.Value) - default: - fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v2) - } - } - s := &StringOrStringArray{} - s.StringArray = &a - return s - default: - fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v) - } - return nil -} - -// Gets an array of enum values from an interface{} value if possible. -func (schema *Schema) arrayOfEnumValuesValue(v *yaml.Node) *[]SchemaEnumValue { - a := make([]SchemaEnumValue, 0) - switch v.Kind { - case yaml.SequenceNode: - for _, v2 := range v.Content { - switch v2.Kind { - case yaml.ScalarNode: - switch v2.Tag { - case "!!str": - a = append(a, SchemaEnumValue{String: &v2.Value}) - case "!!bool": - v3, _ := strconv.ParseBool(v2.Value) - a = append(a, SchemaEnumValue{Bool: &v3}) - default: - fmt.Printf("arrayOfEnumValuesValue: unexpected type %s\n", v2.Tag) - } - default: - fmt.Printf("arrayOfEnumValuesValue: unexpected node %+v\n", v2) - } - } - default: - fmt.Printf("arrayOfEnumValuesValue: unexpected node %+v\n", v) - } - return &a -} - -// Gets a map of schemas or string arrays from an interface{} value if possible. -func (schema *Schema) mapOfSchemasOrStringArraysValue(v *yaml.Node) *[]*NamedSchemaOrStringArray { - m := make([]*NamedSchemaOrStringArray, 0) - switch v.Kind { - case yaml.MappingNode: - for i := 0; i < len(v.Content); i += 2 { - k2 := v.Content[i].Value - v2 := v.Content[i+1] - switch v2.Kind { - case yaml.SequenceNode: - a := make([]string, 0) - for _, v3 := range v2.Content { - switch v3.Kind { - case yaml.ScalarNode: - a = append(a, v3.Value) - default: - fmt.Printf("mapOfSchemasOrStringArraysValue: unexpected node %+v\n", v3) - } - } - s := &SchemaOrStringArray{} - s.StringArray = &a - pair := &NamedSchemaOrStringArray{Name: k2, Value: s} - m = append(m, pair) - default: - fmt.Printf("mapOfSchemasOrStringArraysValue: unexpected node %+v\n", v2) - } - } - default: - fmt.Printf("mapOfSchemasOrStringArraysValue: unexpected node %+v\n", v) - } - return &m -} - -// Gets a schema or a boolean value from an interface{} value if possible. -func (schema *Schema) schemaOrBooleanValue(v *yaml.Node) *SchemaOrBoolean { - schemaOrBoolean := &SchemaOrBoolean{} - switch v.Kind { - case yaml.ScalarNode: - v2, _ := strconv.ParseBool(v.Value) - schemaOrBoolean.Boolean = &v2 - case yaml.MappingNode: - schemaOrBoolean.Schema = NewSchemaFromObject(v) - default: - fmt.Printf("schemaOrBooleanValue: unexpected node %+v\n", v) - } - return schemaOrBoolean -} diff --git a/ui/wasm/vendor/github.com/googleapis/gnostic/jsonschema/schema.json b/ui/wasm/vendor/github.com/googleapis/gnostic/jsonschema/schema.json deleted file mode 100644 index 85eb502a680..00000000000 --- a/ui/wasm/vendor/github.com/googleapis/gnostic/jsonschema/schema.json +++ /dev/null @@ -1,150 +0,0 @@ -{ - "id": "http://json-schema.org/draft-04/schema#", - "$schema": "http://json-schema.org/draft-04/schema#", - "description": "Core schema meta-schema", - "definitions": { - "schemaArray": { - "type": "array", - "minItems": 1, - "items": { "$ref": "#" } - }, - "positiveInteger": { - "type": "integer", - "minimum": 0 - }, - "positiveIntegerDefault0": { - "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ] - }, - "simpleTypes": { - "enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ] - }, - "stringArray": { - "type": "array", - "items": { "type": "string" }, - "minItems": 1, - "uniqueItems": true - } - }, - "type": "object", - "properties": { - "id": { - "type": "string", - "format": "uri" - }, - "$schema": { - "type": "string", - "format": "uri" - }, - "title": { - "type": "string" - }, - "description": { - "type": "string" - }, - "default": {}, - "multipleOf": { - "type": "number", - "minimum": 0, - "exclusiveMinimum": true - }, - "maximum": { - "type": "number" - }, - "exclusiveMaximum": { - "type": "boolean", - "default": false - }, - "minimum": { - "type": "number" - }, - "exclusiveMinimum": { - "type": "boolean", - "default": false - }, - "maxLength": { "$ref": "#/definitions/positiveInteger" }, - "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" }, - "pattern": { - "type": "string", - "format": "regex" - }, - "additionalItems": { - "anyOf": [ - { "type": "boolean" }, - { "$ref": "#" } - ], - "default": {} - }, - "items": { - "anyOf": [ - { "$ref": "#" }, - { "$ref": "#/definitions/schemaArray" } - ], - "default": {} - }, - "maxItems": { "$ref": "#/definitions/positiveInteger" }, - "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" }, - "uniqueItems": { - "type": "boolean", - "default": false - }, - "maxProperties": { "$ref": "#/definitions/positiveInteger" }, - "minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" }, - "required": { "$ref": "#/definitions/stringArray" }, - "additionalProperties": { - "anyOf": [ - { "type": "boolean" }, - { "$ref": "#" } - ], - "default": {} - }, - "definitions": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "properties": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "patternProperties": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "dependencies": { - "type": "object", - "additionalProperties": { - "anyOf": [ - { "$ref": "#" }, - { "$ref": "#/definitions/stringArray" } - ] - } - }, - "enum": { - "type": "array", - "minItems": 1, - "uniqueItems": true - }, - "type": { - "anyOf": [ - { "$ref": "#/definitions/simpleTypes" }, - { - "type": "array", - "items": { "$ref": "#/definitions/simpleTypes" }, - "minItems": 1, - "uniqueItems": true - } - ] - }, - "allOf": { "$ref": "#/definitions/schemaArray" }, - "anyOf": { "$ref": "#/definitions/schemaArray" }, - "oneOf": { "$ref": "#/definitions/schemaArray" }, - "not": { "$ref": "#" } - }, - "dependencies": { - "exclusiveMaximum": [ "maximum" ], - "exclusiveMinimum": [ "minimum" ] - }, - "default": {} -} diff --git a/ui/wasm/vendor/github.com/googleapis/gnostic/jsonschema/writer.go b/ui/wasm/vendor/github.com/googleapis/gnostic/jsonschema/writer.go deleted file mode 100644 index 340dc5f9330..00000000000 --- a/ui/wasm/vendor/github.com/googleapis/gnostic/jsonschema/writer.go +++ /dev/null @@ -1,369 +0,0 @@ -// Copyright 2017 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jsonschema - -import ( - "fmt" - - "gopkg.in/yaml.v3" -) - -const indentation = " " - -func renderMappingNode(node *yaml.Node, indent string) (result string) { - result = "{\n" - innerIndent := indent + indentation - for i := 0; i < len(node.Content); i += 2 { - // first print the key - key := node.Content[i].Value - result += fmt.Sprintf("%s\"%+v\": ", innerIndent, key) - // then the value - value := node.Content[i+1] - switch value.Kind { - case yaml.ScalarNode: - result += "\"" + value.Value + "\"" - case yaml.MappingNode: - result += renderMappingNode(value, innerIndent) - case yaml.SequenceNode: - result += renderSequenceNode(value, innerIndent) - default: - result += fmt.Sprintf("???MapItem(Key:%+v, Value:%T)", value, value) - } - if i < len(node.Content)-2 { - result += "," - } - result += "\n" - } - - result += indent + "}" - return result -} - -func renderSequenceNode(node *yaml.Node, indent string) (result string) { - result = "[\n" - innerIndent := indent + indentation - for i := 0; i < len(node.Content); i++ { - item := node.Content[i] - switch item.Kind { - case yaml.ScalarNode: - result += innerIndent + "\"" + item.Value + "\"" - case yaml.MappingNode: - result += innerIndent + renderMappingNode(item, innerIndent) + "" - default: - result += innerIndent + fmt.Sprintf("???ArrayItem(%+v)", item) - } - if i < len(node.Content)-1 { - result += "," - } - result += "\n" - } - result += indent + "]" - return result -} - -func renderStringArray(array []string, indent string) (result string) { - result = "[\n" - innerIndent := indent + indentation - for i, item := range array { - result += innerIndent + "\"" + item + "\"" - if i < len(array)-1 { - result += "," - } - result += "\n" - } - result += indent + "]" - return result -} - -// Render renders a yaml.Node as JSON -func Render(node *yaml.Node) string { - if node.Kind == yaml.DocumentNode { - if len(node.Content) == 1 { - return Render(node.Content[0]) - } - } else if node.Kind == yaml.MappingNode { - return renderMappingNode(node, "") + "\n" - } else if node.Kind == yaml.SequenceNode { - return renderSequenceNode(node, "") + "\n" - } - return "" -} - -func (object *SchemaNumber) nodeValue() *yaml.Node { - if object.Integer != nil { - return nodeForInt64(*object.Integer) - } else if object.Float != nil { - return nodeForFloat64(*object.Float) - } else { - return nil - } -} - -func (object *SchemaOrBoolean) nodeValue() *yaml.Node { - if object.Schema != nil { - return object.Schema.nodeValue() - } else if object.Boolean != nil { - return nodeForBoolean(*object.Boolean) - } else { - return nil - } -} - -func nodeForStringArray(array []string) *yaml.Node { - content := make([]*yaml.Node, 0) - for _, item := range array { - content = append(content, nodeForString(item)) - } - return nodeForSequence(content) -} - -func nodeForSchemaArray(array []*Schema) *yaml.Node { - content := make([]*yaml.Node, 0) - for _, item := range array { - content = append(content, item.nodeValue()) - } - return nodeForSequence(content) -} - -func (object *StringOrStringArray) nodeValue() *yaml.Node { - if object.String != nil { - return nodeForString(*object.String) - } else if object.StringArray != nil { - return nodeForStringArray(*(object.StringArray)) - } else { - return nil - } -} - -func (object *SchemaOrStringArray) nodeValue() *yaml.Node { - if object.Schema != nil { - return object.Schema.nodeValue() - } else if object.StringArray != nil { - return nodeForStringArray(*(object.StringArray)) - } else { - return nil - } -} - -func (object *SchemaOrSchemaArray) nodeValue() *yaml.Node { - if object.Schema != nil { - return object.Schema.nodeValue() - } else if object.SchemaArray != nil { - return nodeForSchemaArray(*(object.SchemaArray)) - } else { - return nil - } -} - -func (object *SchemaEnumValue) nodeValue() *yaml.Node { - if object.String != nil { - return nodeForString(*object.String) - } else if object.Bool != nil { - return nodeForBoolean(*object.Bool) - } else { - return nil - } -} - -func nodeForNamedSchemaArray(array *[]*NamedSchema) *yaml.Node { - content := make([]*yaml.Node, 0) - for _, pair := range *(array) { - content = appendPair(content, pair.Name, pair.Value.nodeValue()) - } - return nodeForMapping(content) -} - -func nodeForNamedSchemaOrStringArray(array *[]*NamedSchemaOrStringArray) *yaml.Node { - content := make([]*yaml.Node, 0) - for _, pair := range *(array) { - content = appendPair(content, pair.Name, pair.Value.nodeValue()) - } - return nodeForMapping(content) -} - -func nodeForSchemaEnumArray(array *[]SchemaEnumValue) *yaml.Node { - content := make([]*yaml.Node, 0) - for _, item := range *array { - content = append(content, item.nodeValue()) - } - return nodeForSequence(content) -} - -func nodeForMapping(content []*yaml.Node) *yaml.Node { - return &yaml.Node{ - Kind: yaml.MappingNode, - Content: content, - } -} - -func nodeForSequence(content []*yaml.Node) *yaml.Node { - return &yaml.Node{ - Kind: yaml.SequenceNode, - Content: content, - } -} - -func nodeForString(value string) *yaml.Node { - return &yaml.Node{ - Kind: yaml.ScalarNode, - Tag: "!!str", - Value: value, - } -} - -func nodeForBoolean(value bool) *yaml.Node { - return &yaml.Node{ - Kind: yaml.ScalarNode, - Tag: "!!bool", - Value: fmt.Sprintf("%t", value), - } -} - -func nodeForInt64(value int64) *yaml.Node { - return &yaml.Node{ - Kind: yaml.ScalarNode, - Tag: "!!int", - Value: fmt.Sprintf("%d", value), - } -} - -func nodeForFloat64(value float64) *yaml.Node { - return &yaml.Node{ - Kind: yaml.ScalarNode, - Tag: "!!float", - Value: fmt.Sprintf("%f", value), - } -} - -func appendPair(nodes []*yaml.Node, name string, value *yaml.Node) []*yaml.Node { - nodes = append(nodes, nodeForString(name)) - nodes = append(nodes, value) - return nodes -} - -func (schema *Schema) nodeValue() *yaml.Node { - n := &yaml.Node{Kind: yaml.MappingNode} - content := make([]*yaml.Node, 0) - if schema.Title != nil { - content = appendPair(content, "title", nodeForString(*schema.Title)) - } - if schema.ID != nil { - content = appendPair(content, "id", nodeForString(*schema.ID)) - } - if schema.Schema != nil { - content = appendPair(content, "$schema", nodeForString(*schema.Schema)) - } - if schema.Type != nil { - content = appendPair(content, "type", schema.Type.nodeValue()) - } - if schema.Items != nil { - content = appendPair(content, "items", schema.Items.nodeValue()) - } - if schema.Description != nil { - content = appendPair(content, "description", nodeForString(*schema.Description)) - } - if schema.Required != nil { - content = appendPair(content, "required", nodeForStringArray(*schema.Required)) - } - if schema.AdditionalProperties != nil { - content = appendPair(content, "additionalProperties", schema.AdditionalProperties.nodeValue()) - } - if schema.PatternProperties != nil { - content = appendPair(content, "patternProperties", nodeForNamedSchemaArray(schema.PatternProperties)) - } - if schema.Properties != nil { - content = appendPair(content, "properties", nodeForNamedSchemaArray(schema.Properties)) - } - if schema.Dependencies != nil { - content = appendPair(content, "dependencies", nodeForNamedSchemaOrStringArray(schema.Dependencies)) - } - if schema.Ref != nil { - content = appendPair(content, "$ref", nodeForString(*schema.Ref)) - } - if schema.MultipleOf != nil { - content = appendPair(content, "multipleOf", schema.MultipleOf.nodeValue()) - } - if schema.Maximum != nil { - content = appendPair(content, "maximum", schema.Maximum.nodeValue()) - } - if schema.ExclusiveMaximum != nil { - content = appendPair(content, "exclusiveMaximum", nodeForBoolean(*schema.ExclusiveMaximum)) - } - if schema.Minimum != nil { - content = appendPair(content, "minimum", schema.Minimum.nodeValue()) - } - if schema.ExclusiveMinimum != nil { - content = appendPair(content, "exclusiveMinimum", nodeForBoolean(*schema.ExclusiveMinimum)) - } - if schema.MaxLength != nil { - content = appendPair(content, "maxLength", nodeForInt64(*schema.MaxLength)) - } - if schema.MinLength != nil { - content = appendPair(content, "minLength", nodeForInt64(*schema.MinLength)) - } - if schema.Pattern != nil { - content = appendPair(content, "pattern", nodeForString(*schema.Pattern)) - } - if schema.AdditionalItems != nil { - content = appendPair(content, "additionalItems", schema.AdditionalItems.nodeValue()) - } - if schema.MaxItems != nil { - content = appendPair(content, "maxItems", nodeForInt64(*schema.MaxItems)) - } - if schema.MinItems != nil { - content = appendPair(content, "minItems", nodeForInt64(*schema.MinItems)) - } - if schema.UniqueItems != nil { - content = appendPair(content, "uniqueItems", nodeForBoolean(*schema.UniqueItems)) - } - if schema.MaxProperties != nil { - content = appendPair(content, "maxProperties", nodeForInt64(*schema.MaxProperties)) - } - if schema.MinProperties != nil { - content = appendPair(content, "minProperties", nodeForInt64(*schema.MinProperties)) - } - if schema.Enumeration != nil { - content = appendPair(content, "enum", nodeForSchemaEnumArray(schema.Enumeration)) - } - if schema.AllOf != nil { - content = appendPair(content, "allOf", nodeForSchemaArray(*schema.AllOf)) - } - if schema.AnyOf != nil { - content = appendPair(content, "anyOf", nodeForSchemaArray(*schema.AnyOf)) - } - if schema.OneOf != nil { - content = appendPair(content, "oneOf", nodeForSchemaArray(*schema.OneOf)) - } - if schema.Not != nil { - content = appendPair(content, "not", schema.Not.nodeValue()) - } - if schema.Definitions != nil { - content = appendPair(content, "definitions", nodeForNamedSchemaArray(schema.Definitions)) - } - if schema.Default != nil { - // m = append(m, yaml.MapItem{Key: "default", Value: *schema.Default}) - } - if schema.Format != nil { - content = appendPair(content, "format", nodeForString(*schema.Format)) - } - n.Content = content - return n -} - -// JSONString returns a json representation of a schema. -func (schema *Schema) JSONString() string { - node := schema.nodeValue() - return Render(node) -} diff --git a/ui/wasm/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.go b/ui/wasm/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.go deleted file mode 100644 index 727d7f4ad52..00000000000 --- a/ui/wasm/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.go +++ /dev/null @@ -1,8818 +0,0 @@ -// Copyright 2020 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// THIS FILE IS AUTOMATICALLY GENERATED. - -package openapi_v2 - -import ( - "fmt" - "github.com/googleapis/gnostic/compiler" - "gopkg.in/yaml.v3" - "regexp" - "strings" -) - -// Version returns the package name (and OpenAPI version). -func Version() string { - return "openapi_v2" -} - -// NewAdditionalPropertiesItem creates an object of type AdditionalPropertiesItem if possible, returning an error if not. -func NewAdditionalPropertiesItem(in *yaml.Node, context *compiler.Context) (*AdditionalPropertiesItem, error) { - errors := make([]error, 0) - x := &AdditionalPropertiesItem{} - matched := false - // Schema schema = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewSchema(m, compiler.NewContext("schema", m, context)) - if matchingError == nil { - x.Oneof = &AdditionalPropertiesItem_Schema{Schema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // bool boolean = 2; - boolValue, ok := compiler.BoolForScalarNode(in) - if ok { - x.Oneof = &AdditionalPropertiesItem_Boolean{Boolean: boolValue} - matched = true - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } else { - message := fmt.Sprintf("contains an invalid AdditionalPropertiesItem") - err := compiler.NewError(context, message) - errors = []error{err} - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewAny creates an object of type Any if possible, returning an error if not. -func NewAny(in *yaml.Node, context *compiler.Context) (*Any, error) { - errors := make([]error, 0) - x := &Any{} - bytes := compiler.Marshal(in) - x.Yaml = string(bytes) - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewApiKeySecurity creates an object of type ApiKeySecurity if possible, returning an error if not. -func NewApiKeySecurity(in *yaml.Node, context *compiler.Context) (*ApiKeySecurity, error) { - errors := make([]error, 0) - x := &ApiKeySecurity{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"in", "name", "type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "in", "name", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [apiKey] - if ok && !compiler.StringArrayContainsValue([]string{"apiKey"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string name = 2; - v2 := compiler.MapValueForKey(m, "name") - if v2 != nil { - x.Name, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string in = 3; - v3 := compiler.MapValueForKey(m, "in") - if v3 != nil { - x.In, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [header query] - if ok && !compiler.StringArrayContainsValue([]string{"header", "query"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 4; - v4 := compiler.MapValueForKey(m, "description") - if v4 != nil { - x.Description, ok = compiler.StringForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 5; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewBasicAuthenticationSecurity creates an object of type BasicAuthenticationSecurity if possible, returning an error if not. -func NewBasicAuthenticationSecurity(in *yaml.Node, context *compiler.Context) (*BasicAuthenticationSecurity, error) { - errors := make([]error, 0) - x := &BasicAuthenticationSecurity{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [basic] - if ok && !compiler.StringArrayContainsValue([]string{"basic"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 2; - v2 := compiler.MapValueForKey(m, "description") - if v2 != nil { - x.Description, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 3; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewBodyParameter creates an object of type BodyParameter if possible, returning an error if not. -func NewBodyParameter(in *yaml.Node, context *compiler.Context) (*BodyParameter, error) { - errors := make([]error, 0) - x := &BodyParameter{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"in", "name", "schema"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "in", "name", "required", "schema"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string description = 1; - v1 := compiler.MapValueForKey(m, "description") - if v1 != nil { - x.Description, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string name = 2; - v2 := compiler.MapValueForKey(m, "name") - if v2 != nil { - x.Name, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string in = 3; - v3 := compiler.MapValueForKey(m, "in") - if v3 != nil { - x.In, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [body] - if ok && !compiler.StringArrayContainsValue([]string{"body"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool required = 4; - v4 := compiler.MapValueForKey(m, "required") - if v4 != nil { - x.Required, ok = compiler.BoolForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Schema schema = 5; - v5 := compiler.MapValueForKey(m, "schema") - if v5 != nil { - var err error - x.Schema, err = NewSchema(v5, compiler.NewContext("schema", v5, context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 6; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewContact creates an object of type Contact if possible, returning an error if not. -func NewContact(in *yaml.Node, context *compiler.Context) (*Contact, error) { - errors := make([]error, 0) - x := &Contact{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"email", "name", "url"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string url = 2; - v2 := compiler.MapValueForKey(m, "url") - if v2 != nil { - x.Url, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for url: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string email = 3; - v3 := compiler.MapValueForKey(m, "email") - if v3 != nil { - x.Email, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for email: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 4; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewDefault creates an object of type Default if possible, returning an error if not. -func NewDefault(in *yaml.Node, context *compiler.Context) (*Default, error) { - errors := make([]error, 0) - x := &Default{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedAny additional_properties = 1; - // MAP: Any - x.AdditionalProperties = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewDefinitions creates an object of type Definitions if possible, returning an error if not. -func NewDefinitions(in *yaml.Node, context *compiler.Context) (*Definitions, error) { - errors := make([]error, 0) - x := &Definitions{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedSchema additional_properties = 1; - // MAP: Schema - x.AdditionalProperties = make([]*NamedSchema, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedSchema{} - pair.Name = k - var err error - pair.Value, err = NewSchema(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewDocument creates an object of type Document if possible, returning an error if not. -func NewDocument(in *yaml.Node, context *compiler.Context) (*Document, error) { - errors := make([]error, 0) - x := &Document{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"info", "paths", "swagger"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"basePath", "consumes", "definitions", "externalDocs", "host", "info", "parameters", "paths", "produces", "responses", "schemes", "security", "securityDefinitions", "swagger", "tags"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string swagger = 1; - v1 := compiler.MapValueForKey(m, "swagger") - if v1 != nil { - x.Swagger, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for swagger: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [2.0] - if ok && !compiler.StringArrayContainsValue([]string{"2.0"}, x.Swagger) { - message := fmt.Sprintf("has unexpected value for swagger: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Info info = 2; - v2 := compiler.MapValueForKey(m, "info") - if v2 != nil { - var err error - x.Info, err = NewInfo(v2, compiler.NewContext("info", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - // string host = 3; - v3 := compiler.MapValueForKey(m, "host") - if v3 != nil { - x.Host, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for host: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string base_path = 4; - v4 := compiler.MapValueForKey(m, "basePath") - if v4 != nil { - x.BasePath, ok = compiler.StringForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for basePath: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated string schemes = 5; - v5 := compiler.MapValueForKey(m, "schemes") - if v5 != nil { - v, ok := compiler.SequenceNodeForNode(v5) - if ok { - x.Schemes = compiler.StringArrayForSequenceNode(v) - } else { - message := fmt.Sprintf("has unexpected value for schemes: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [http https ws wss] - if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) { - message := fmt.Sprintf("has unexpected value for schemes: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated string consumes = 6; - v6 := compiler.MapValueForKey(m, "consumes") - if v6 != nil { - v, ok := compiler.SequenceNodeForNode(v6) - if ok { - x.Consumes = compiler.StringArrayForSequenceNode(v) - } else { - message := fmt.Sprintf("has unexpected value for consumes: %s", compiler.Display(v6)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated string produces = 7; - v7 := compiler.MapValueForKey(m, "produces") - if v7 != nil { - v, ok := compiler.SequenceNodeForNode(v7) - if ok { - x.Produces = compiler.StringArrayForSequenceNode(v) - } else { - message := fmt.Sprintf("has unexpected value for produces: %s", compiler.Display(v7)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Paths paths = 8; - v8 := compiler.MapValueForKey(m, "paths") - if v8 != nil { - var err error - x.Paths, err = NewPaths(v8, compiler.NewContext("paths", v8, context)) - if err != nil { - errors = append(errors, err) - } - } - // Definitions definitions = 9; - v9 := compiler.MapValueForKey(m, "definitions") - if v9 != nil { - var err error - x.Definitions, err = NewDefinitions(v9, compiler.NewContext("definitions", v9, context)) - if err != nil { - errors = append(errors, err) - } - } - // ParameterDefinitions parameters = 10; - v10 := compiler.MapValueForKey(m, "parameters") - if v10 != nil { - var err error - x.Parameters, err = NewParameterDefinitions(v10, compiler.NewContext("parameters", v10, context)) - if err != nil { - errors = append(errors, err) - } - } - // ResponseDefinitions responses = 11; - v11 := compiler.MapValueForKey(m, "responses") - if v11 != nil { - var err error - x.Responses, err = NewResponseDefinitions(v11, compiler.NewContext("responses", v11, context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated SecurityRequirement security = 12; - v12 := compiler.MapValueForKey(m, "security") - if v12 != nil { - // repeated SecurityRequirement - x.Security = make([]*SecurityRequirement, 0) - a, ok := compiler.SequenceNodeForNode(v12) - if ok { - for _, item := range a.Content { - y, err := NewSecurityRequirement(item, compiler.NewContext("security", item, context)) - if err != nil { - errors = append(errors, err) - } - x.Security = append(x.Security, y) - } - } - } - // SecurityDefinitions security_definitions = 13; - v13 := compiler.MapValueForKey(m, "securityDefinitions") - if v13 != nil { - var err error - x.SecurityDefinitions, err = NewSecurityDefinitions(v13, compiler.NewContext("securityDefinitions", v13, context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated Tag tags = 14; - v14 := compiler.MapValueForKey(m, "tags") - if v14 != nil { - // repeated Tag - x.Tags = make([]*Tag, 0) - a, ok := compiler.SequenceNodeForNode(v14) - if ok { - for _, item := range a.Content { - y, err := NewTag(item, compiler.NewContext("tags", item, context)) - if err != nil { - errors = append(errors, err) - } - x.Tags = append(x.Tags, y) - } - } - } - // ExternalDocs external_docs = 15; - v15 := compiler.MapValueForKey(m, "externalDocs") - if v15 != nil { - var err error - x.ExternalDocs, err = NewExternalDocs(v15, compiler.NewContext("externalDocs", v15, context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 16; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewExamples creates an object of type Examples if possible, returning an error if not. -func NewExamples(in *yaml.Node, context *compiler.Context) (*Examples, error) { - errors := make([]error, 0) - x := &Examples{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedAny additional_properties = 1; - // MAP: Any - x.AdditionalProperties = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewExternalDocs creates an object of type ExternalDocs if possible, returning an error if not. -func NewExternalDocs(in *yaml.Node, context *compiler.Context) (*ExternalDocs, error) { - errors := make([]error, 0) - x := &ExternalDocs{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"url"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "url"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string description = 1; - v1 := compiler.MapValueForKey(m, "description") - if v1 != nil { - x.Description, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string url = 2; - v2 := compiler.MapValueForKey(m, "url") - if v2 != nil { - x.Url, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for url: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 3; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewFileSchema creates an object of type FileSchema if possible, returning an error if not. -func NewFileSchema(in *yaml.Node, context *compiler.Context) (*FileSchema, error) { - errors := make([]error, 0) - x := &FileSchema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"default", "description", "example", "externalDocs", "format", "readOnly", "required", "title", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string format = 1; - v1 := compiler.MapValueForKey(m, "format") - if v1 != nil { - x.Format, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string title = 2; - v2 := compiler.MapValueForKey(m, "title") - if v2 != nil { - x.Title, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for title: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 4; - v4 := compiler.MapValueForKey(m, "default") - if v4 != nil { - var err error - x.Default, err = NewAny(v4, compiler.NewContext("default", v4, context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated string required = 5; - v5 := compiler.MapValueForKey(m, "required") - if v5 != nil { - v, ok := compiler.SequenceNodeForNode(v5) - if ok { - x.Required = compiler.StringArrayForSequenceNode(v) - } else { - message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string type = 6; - v6 := compiler.MapValueForKey(m, "type") - if v6 != nil { - x.Type, ok = compiler.StringForScalarNode(v6) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [file] - if ok && !compiler.StringArrayContainsValue([]string{"file"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool read_only = 7; - v7 := compiler.MapValueForKey(m, "readOnly") - if v7 != nil { - x.ReadOnly, ok = compiler.BoolForScalarNode(v7) - if !ok { - message := fmt.Sprintf("has unexpected value for readOnly: %s", compiler.Display(v7)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // ExternalDocs external_docs = 8; - v8 := compiler.MapValueForKey(m, "externalDocs") - if v8 != nil { - var err error - x.ExternalDocs, err = NewExternalDocs(v8, compiler.NewContext("externalDocs", v8, context)) - if err != nil { - errors = append(errors, err) - } - } - // Any example = 9; - v9 := compiler.MapValueForKey(m, "example") - if v9 != nil { - var err error - x.Example, err = NewAny(v9, compiler.NewContext("example", v9, context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 10; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewFormDataParameterSubSchema creates an object of type FormDataParameterSubSchema if possible, returning an error if not. -func NewFormDataParameterSubSchema(in *yaml.Node, context *compiler.Context) (*FormDataParameterSubSchema, error) { - errors := make([]error, 0) - x := &FormDataParameterSubSchema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"allowEmptyValue", "collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // bool required = 1; - v1 := compiler.MapValueForKey(m, "required") - if v1 != nil { - x.Required, ok = compiler.BoolForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string in = 2; - v2 := compiler.MapValueForKey(m, "in") - if v2 != nil { - x.In, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [formData] - if ok && !compiler.StringArrayContainsValue([]string{"formData"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string name = 4; - v4 := compiler.MapValueForKey(m, "name") - if v4 != nil { - x.Name, ok = compiler.StringForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool allow_empty_value = 5; - v5 := compiler.MapValueForKey(m, "allowEmptyValue") - if v5 != nil { - x.AllowEmptyValue, ok = compiler.BoolForScalarNode(v5) - if !ok { - message := fmt.Sprintf("has unexpected value for allowEmptyValue: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string type = 6; - v6 := compiler.MapValueForKey(m, "type") - if v6 != nil { - x.Type, ok = compiler.StringForScalarNode(v6) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [string number boolean integer array file] - if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array", "file"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 7; - v7 := compiler.MapValueForKey(m, "format") - if v7 != nil { - x.Format, ok = compiler.StringForScalarNode(v7) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v7)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PrimitivesItems items = 8; - v8 := compiler.MapValueForKey(m, "items") - if v8 != nil { - var err error - x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", v8, context)) - if err != nil { - errors = append(errors, err) - } - } - // string collection_format = 9; - v9 := compiler.MapValueForKey(m, "collectionFormat") - if v9 != nil { - x.CollectionFormat, ok = compiler.StringForScalarNode(v9) - if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v9)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [csv ssv tsv pipes multi] - if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v9)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 10; - v10 := compiler.MapValueForKey(m, "default") - if v10 != nil { - var err error - x.Default, err = NewAny(v10, compiler.NewContext("default", v10, context)) - if err != nil { - errors = append(errors, err) - } - } - // float maximum = 11; - v11 := compiler.MapValueForKey(m, "maximum") - if v11 != nil { - v, ok := compiler.FloatForScalarNode(v11) - if ok { - x.Maximum = v - } else { - message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v11)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 12; - v12 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v12 != nil { - x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v12) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v12)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 13; - v13 := compiler.MapValueForKey(m, "minimum") - if v13 != nil { - v, ok := compiler.FloatForScalarNode(v13) - if ok { - x.Minimum = v - } else { - message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v13)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 14; - v14 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v14 != nil { - x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v14) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v14)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 15; - v15 := compiler.MapValueForKey(m, "maxLength") - if v15 != nil { - t, ok := compiler.IntForScalarNode(v15) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v15)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 16; - v16 := compiler.MapValueForKey(m, "minLength") - if v16 != nil { - t, ok := compiler.IntForScalarNode(v16) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v16)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 17; - v17 := compiler.MapValueForKey(m, "pattern") - if v17 != nil { - x.Pattern, ok = compiler.StringForScalarNode(v17) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v17)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 18; - v18 := compiler.MapValueForKey(m, "maxItems") - if v18 != nil { - t, ok := compiler.IntForScalarNode(v18) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v18)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 19; - v19 := compiler.MapValueForKey(m, "minItems") - if v19 != nil { - t, ok := compiler.IntForScalarNode(v19) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v19)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 20; - v20 := compiler.MapValueForKey(m, "uniqueItems") - if v20 != nil { - x.UniqueItems, ok = compiler.BoolForScalarNode(v20) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v20)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 21; - v21 := compiler.MapValueForKey(m, "enum") - if v21 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := compiler.SequenceNodeForNode(v21) - if ok { - for _, item := range a.Content { - y, err := NewAny(item, compiler.NewContext("enum", item, context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // float multiple_of = 22; - v22 := compiler.MapValueForKey(m, "multipleOf") - if v22 != nil { - v, ok := compiler.FloatForScalarNode(v22) - if ok { - x.MultipleOf = v - } else { - message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v22)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 23; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewHeader creates an object of type Header if possible, returning an error if not. -func NewHeader(in *yaml.Node, context *compiler.Context) (*Header, error) { - errors := make([]error, 0) - x := &Header{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "pattern", "type", "uniqueItems"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [string number integer boolean array] - if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 2; - v2 := compiler.MapValueForKey(m, "format") - if v2 != nil { - x.Format, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PrimitivesItems items = 3; - v3 := compiler.MapValueForKey(m, "items") - if v3 != nil { - var err error - x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", v3, context)) - if err != nil { - errors = append(errors, err) - } - } - // string collection_format = 4; - v4 := compiler.MapValueForKey(m, "collectionFormat") - if v4 != nil { - x.CollectionFormat, ok = compiler.StringForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [csv ssv tsv pipes] - if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 5; - v5 := compiler.MapValueForKey(m, "default") - if v5 != nil { - var err error - x.Default, err = NewAny(v5, compiler.NewContext("default", v5, context)) - if err != nil { - errors = append(errors, err) - } - } - // float maximum = 6; - v6 := compiler.MapValueForKey(m, "maximum") - if v6 != nil { - v, ok := compiler.FloatForScalarNode(v6) - if ok { - x.Maximum = v - } else { - message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v6)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 7; - v7 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v7 != nil { - x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v7) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v7)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 8; - v8 := compiler.MapValueForKey(m, "minimum") - if v8 != nil { - v, ok := compiler.FloatForScalarNode(v8) - if ok { - x.Minimum = v - } else { - message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v8)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 9; - v9 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v9 != nil { - x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v9) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v9)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 10; - v10 := compiler.MapValueForKey(m, "maxLength") - if v10 != nil { - t, ok := compiler.IntForScalarNode(v10) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v10)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 11; - v11 := compiler.MapValueForKey(m, "minLength") - if v11 != nil { - t, ok := compiler.IntForScalarNode(v11) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v11)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 12; - v12 := compiler.MapValueForKey(m, "pattern") - if v12 != nil { - x.Pattern, ok = compiler.StringForScalarNode(v12) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v12)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 13; - v13 := compiler.MapValueForKey(m, "maxItems") - if v13 != nil { - t, ok := compiler.IntForScalarNode(v13) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v13)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 14; - v14 := compiler.MapValueForKey(m, "minItems") - if v14 != nil { - t, ok := compiler.IntForScalarNode(v14) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v14)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 15; - v15 := compiler.MapValueForKey(m, "uniqueItems") - if v15 != nil { - x.UniqueItems, ok = compiler.BoolForScalarNode(v15) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v15)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 16; - v16 := compiler.MapValueForKey(m, "enum") - if v16 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := compiler.SequenceNodeForNode(v16) - if ok { - for _, item := range a.Content { - y, err := NewAny(item, compiler.NewContext("enum", item, context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // float multiple_of = 17; - v17 := compiler.MapValueForKey(m, "multipleOf") - if v17 != nil { - v, ok := compiler.FloatForScalarNode(v17) - if ok { - x.MultipleOf = v - } else { - message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v17)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 18; - v18 := compiler.MapValueForKey(m, "description") - if v18 != nil { - x.Description, ok = compiler.StringForScalarNode(v18) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v18)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 19; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewHeaderParameterSubSchema creates an object of type HeaderParameterSubSchema if possible, returning an error if not. -func NewHeaderParameterSubSchema(in *yaml.Node, context *compiler.Context) (*HeaderParameterSubSchema, error) { - errors := make([]error, 0) - x := &HeaderParameterSubSchema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // bool required = 1; - v1 := compiler.MapValueForKey(m, "required") - if v1 != nil { - x.Required, ok = compiler.BoolForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string in = 2; - v2 := compiler.MapValueForKey(m, "in") - if v2 != nil { - x.In, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [header] - if ok && !compiler.StringArrayContainsValue([]string{"header"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string name = 4; - v4 := compiler.MapValueForKey(m, "name") - if v4 != nil { - x.Name, ok = compiler.StringForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string type = 5; - v5 := compiler.MapValueForKey(m, "type") - if v5 != nil { - x.Type, ok = compiler.StringForScalarNode(v5) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [string number boolean integer array] - if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 6; - v6 := compiler.MapValueForKey(m, "format") - if v6 != nil { - x.Format, ok = compiler.StringForScalarNode(v6) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v6)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PrimitivesItems items = 7; - v7 := compiler.MapValueForKey(m, "items") - if v7 != nil { - var err error - x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", v7, context)) - if err != nil { - errors = append(errors, err) - } - } - // string collection_format = 8; - v8 := compiler.MapValueForKey(m, "collectionFormat") - if v8 != nil { - x.CollectionFormat, ok = compiler.StringForScalarNode(v8) - if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v8)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [csv ssv tsv pipes] - if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v8)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 9; - v9 := compiler.MapValueForKey(m, "default") - if v9 != nil { - var err error - x.Default, err = NewAny(v9, compiler.NewContext("default", v9, context)) - if err != nil { - errors = append(errors, err) - } - } - // float maximum = 10; - v10 := compiler.MapValueForKey(m, "maximum") - if v10 != nil { - v, ok := compiler.FloatForScalarNode(v10) - if ok { - x.Maximum = v - } else { - message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v10)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 11; - v11 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v11 != nil { - x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v11) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v11)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 12; - v12 := compiler.MapValueForKey(m, "minimum") - if v12 != nil { - v, ok := compiler.FloatForScalarNode(v12) - if ok { - x.Minimum = v - } else { - message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v12)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 13; - v13 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v13 != nil { - x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v13) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v13)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 14; - v14 := compiler.MapValueForKey(m, "maxLength") - if v14 != nil { - t, ok := compiler.IntForScalarNode(v14) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v14)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 15; - v15 := compiler.MapValueForKey(m, "minLength") - if v15 != nil { - t, ok := compiler.IntForScalarNode(v15) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v15)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 16; - v16 := compiler.MapValueForKey(m, "pattern") - if v16 != nil { - x.Pattern, ok = compiler.StringForScalarNode(v16) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v16)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 17; - v17 := compiler.MapValueForKey(m, "maxItems") - if v17 != nil { - t, ok := compiler.IntForScalarNode(v17) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v17)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 18; - v18 := compiler.MapValueForKey(m, "minItems") - if v18 != nil { - t, ok := compiler.IntForScalarNode(v18) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v18)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 19; - v19 := compiler.MapValueForKey(m, "uniqueItems") - if v19 != nil { - x.UniqueItems, ok = compiler.BoolForScalarNode(v19) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v19)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 20; - v20 := compiler.MapValueForKey(m, "enum") - if v20 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := compiler.SequenceNodeForNode(v20) - if ok { - for _, item := range a.Content { - y, err := NewAny(item, compiler.NewContext("enum", item, context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // float multiple_of = 21; - v21 := compiler.MapValueForKey(m, "multipleOf") - if v21 != nil { - v, ok := compiler.FloatForScalarNode(v21) - if ok { - x.MultipleOf = v - } else { - message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v21)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 22; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewHeaders creates an object of type Headers if possible, returning an error if not. -func NewHeaders(in *yaml.Node, context *compiler.Context) (*Headers, error) { - errors := make([]error, 0) - x := &Headers{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedHeader additional_properties = 1; - // MAP: Header - x.AdditionalProperties = make([]*NamedHeader, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedHeader{} - pair.Name = k - var err error - pair.Value, err = NewHeader(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewInfo creates an object of type Info if possible, returning an error if not. -func NewInfo(in *yaml.Node, context *compiler.Context) (*Info, error) { - errors := make([]error, 0) - x := &Info{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"title", "version"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"contact", "description", "license", "termsOfService", "title", "version"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string title = 1; - v1 := compiler.MapValueForKey(m, "title") - if v1 != nil { - x.Title, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for title: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string version = 2; - v2 := compiler.MapValueForKey(m, "version") - if v2 != nil { - x.Version, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for version: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string terms_of_service = 4; - v4 := compiler.MapValueForKey(m, "termsOfService") - if v4 != nil { - x.TermsOfService, ok = compiler.StringForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for termsOfService: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Contact contact = 5; - v5 := compiler.MapValueForKey(m, "contact") - if v5 != nil { - var err error - x.Contact, err = NewContact(v5, compiler.NewContext("contact", v5, context)) - if err != nil { - errors = append(errors, err) - } - } - // License license = 6; - v6 := compiler.MapValueForKey(m, "license") - if v6 != nil { - var err error - x.License, err = NewLicense(v6, compiler.NewContext("license", v6, context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 7; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewItemsItem creates an object of type ItemsItem if possible, returning an error if not. -func NewItemsItem(in *yaml.Node, context *compiler.Context) (*ItemsItem, error) { - errors := make([]error, 0) - x := &ItemsItem{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value for item array: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - x.Schema = make([]*Schema, 0) - y, err := NewSchema(m, compiler.NewContext("", m, context)) - if err != nil { - return nil, err - } - x.Schema = append(x.Schema, y) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewJsonReference creates an object of type JsonReference if possible, returning an error if not. -func NewJsonReference(in *yaml.Node, context *compiler.Context) (*JsonReference, error) { - errors := make([]error, 0) - x := &JsonReference{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"$ref"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string _ref = 1; - v1 := compiler.MapValueForKey(m, "$ref") - if v1 != nil { - x.XRef, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for $ref: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 2; - v2 := compiler.MapValueForKey(m, "description") - if v2 != nil { - x.Description, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewLicense creates an object of type License if possible, returning an error if not. -func NewLicense(in *yaml.Node, context *compiler.Context) (*License, error) { - errors := make([]error, 0) - x := &License{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"name"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"name", "url"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string url = 2; - v2 := compiler.MapValueForKey(m, "url") - if v2 != nil { - x.Url, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for url: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 3; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedAny creates an object of type NamedAny if possible, returning an error if not. -func NewNamedAny(in *yaml.Node, context *compiler.Context) (*NamedAny, error) { - errors := make([]error, 0) - x := &NamedAny{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewAny(v2, compiler.NewContext("value", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedHeader creates an object of type NamedHeader if possible, returning an error if not. -func NewNamedHeader(in *yaml.Node, context *compiler.Context) (*NamedHeader, error) { - errors := make([]error, 0) - x := &NamedHeader{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Header value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewHeader(v2, compiler.NewContext("value", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedParameter creates an object of type NamedParameter if possible, returning an error if not. -func NewNamedParameter(in *yaml.Node, context *compiler.Context) (*NamedParameter, error) { - errors := make([]error, 0) - x := &NamedParameter{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Parameter value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewParameter(v2, compiler.NewContext("value", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedPathItem creates an object of type NamedPathItem if possible, returning an error if not. -func NewNamedPathItem(in *yaml.Node, context *compiler.Context) (*NamedPathItem, error) { - errors := make([]error, 0) - x := &NamedPathItem{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PathItem value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewPathItem(v2, compiler.NewContext("value", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedResponse creates an object of type NamedResponse if possible, returning an error if not. -func NewNamedResponse(in *yaml.Node, context *compiler.Context) (*NamedResponse, error) { - errors := make([]error, 0) - x := &NamedResponse{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Response value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewResponse(v2, compiler.NewContext("value", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedResponseValue creates an object of type NamedResponseValue if possible, returning an error if not. -func NewNamedResponseValue(in *yaml.Node, context *compiler.Context) (*NamedResponseValue, error) { - errors := make([]error, 0) - x := &NamedResponseValue{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // ResponseValue value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewResponseValue(v2, compiler.NewContext("value", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedSchema creates an object of type NamedSchema if possible, returning an error if not. -func NewNamedSchema(in *yaml.Node, context *compiler.Context) (*NamedSchema, error) { - errors := make([]error, 0) - x := &NamedSchema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Schema value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewSchema(v2, compiler.NewContext("value", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedSecurityDefinitionsItem creates an object of type NamedSecurityDefinitionsItem if possible, returning an error if not. -func NewNamedSecurityDefinitionsItem(in *yaml.Node, context *compiler.Context) (*NamedSecurityDefinitionsItem, error) { - errors := make([]error, 0) - x := &NamedSecurityDefinitionsItem{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // SecurityDefinitionsItem value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewSecurityDefinitionsItem(v2, compiler.NewContext("value", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedString creates an object of type NamedString if possible, returning an error if not. -func NewNamedString(in *yaml.Node, context *compiler.Context) (*NamedString, error) { - errors := make([]error, 0) - x := &NamedString{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - x.Value, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for value: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedStringArray creates an object of type NamedStringArray if possible, returning an error if not. -func NewNamedStringArray(in *yaml.Node, context *compiler.Context) (*NamedStringArray, error) { - errors := make([]error, 0) - x := &NamedStringArray{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // StringArray value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewStringArray(v2, compiler.NewContext("value", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNonBodyParameter creates an object of type NonBodyParameter if possible, returning an error if not. -func NewNonBodyParameter(in *yaml.Node, context *compiler.Context) (*NonBodyParameter, error) { - errors := make([]error, 0) - x := &NonBodyParameter{} - matched := false - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"in", "name", "type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // HeaderParameterSubSchema header_parameter_sub_schema = 1; - { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewHeaderParameterSubSchema(m, compiler.NewContext("headerParameterSubSchema", m, context)) - if matchingError == nil { - x.Oneof = &NonBodyParameter_HeaderParameterSubSchema{HeaderParameterSubSchema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - // FormDataParameterSubSchema form_data_parameter_sub_schema = 2; - { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewFormDataParameterSubSchema(m, compiler.NewContext("formDataParameterSubSchema", m, context)) - if matchingError == nil { - x.Oneof = &NonBodyParameter_FormDataParameterSubSchema{FormDataParameterSubSchema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - // QueryParameterSubSchema query_parameter_sub_schema = 3; - { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewQueryParameterSubSchema(m, compiler.NewContext("queryParameterSubSchema", m, context)) - if matchingError == nil { - x.Oneof = &NonBodyParameter_QueryParameterSubSchema{QueryParameterSubSchema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - // PathParameterSubSchema path_parameter_sub_schema = 4; - { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewPathParameterSubSchema(m, compiler.NewContext("pathParameterSubSchema", m, context)) - if matchingError == nil { - x.Oneof = &NonBodyParameter_PathParameterSubSchema{PathParameterSubSchema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } else { - message := fmt.Sprintf("contains an invalid NonBodyParameter") - err := compiler.NewError(context, message) - errors = []error{err} - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewOauth2AccessCodeSecurity creates an object of type Oauth2AccessCodeSecurity if possible, returning an error if not. -func NewOauth2AccessCodeSecurity(in *yaml.Node, context *compiler.Context) (*Oauth2AccessCodeSecurity, error) { - errors := make([]error, 0) - x := &Oauth2AccessCodeSecurity{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"authorizationUrl", "flow", "tokenUrl", "type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"authorizationUrl", "description", "flow", "scopes", "tokenUrl", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [oauth2] - if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string flow = 2; - v2 := compiler.MapValueForKey(m, "flow") - if v2 != nil { - x.Flow, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [accessCode] - if ok && !compiler.StringArrayContainsValue([]string{"accessCode"}, x.Flow) { - message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Oauth2Scopes scopes = 3; - v3 := compiler.MapValueForKey(m, "scopes") - if v3 != nil { - var err error - x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", v3, context)) - if err != nil { - errors = append(errors, err) - } - } - // string authorization_url = 4; - v4 := compiler.MapValueForKey(m, "authorizationUrl") - if v4 != nil { - x.AuthorizationUrl, ok = compiler.StringForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for authorizationUrl: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string token_url = 5; - v5 := compiler.MapValueForKey(m, "tokenUrl") - if v5 != nil { - x.TokenUrl, ok = compiler.StringForScalarNode(v5) - if !ok { - message := fmt.Sprintf("has unexpected value for tokenUrl: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 6; - v6 := compiler.MapValueForKey(m, "description") - if v6 != nil { - x.Description, ok = compiler.StringForScalarNode(v6) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v6)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 7; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewOauth2ApplicationSecurity creates an object of type Oauth2ApplicationSecurity if possible, returning an error if not. -func NewOauth2ApplicationSecurity(in *yaml.Node, context *compiler.Context) (*Oauth2ApplicationSecurity, error) { - errors := make([]error, 0) - x := &Oauth2ApplicationSecurity{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"flow", "tokenUrl", "type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "flow", "scopes", "tokenUrl", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [oauth2] - if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string flow = 2; - v2 := compiler.MapValueForKey(m, "flow") - if v2 != nil { - x.Flow, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [application] - if ok && !compiler.StringArrayContainsValue([]string{"application"}, x.Flow) { - message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Oauth2Scopes scopes = 3; - v3 := compiler.MapValueForKey(m, "scopes") - if v3 != nil { - var err error - x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", v3, context)) - if err != nil { - errors = append(errors, err) - } - } - // string token_url = 4; - v4 := compiler.MapValueForKey(m, "tokenUrl") - if v4 != nil { - x.TokenUrl, ok = compiler.StringForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for tokenUrl: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 5; - v5 := compiler.MapValueForKey(m, "description") - if v5 != nil { - x.Description, ok = compiler.StringForScalarNode(v5) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 6; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewOauth2ImplicitSecurity creates an object of type Oauth2ImplicitSecurity if possible, returning an error if not. -func NewOauth2ImplicitSecurity(in *yaml.Node, context *compiler.Context) (*Oauth2ImplicitSecurity, error) { - errors := make([]error, 0) - x := &Oauth2ImplicitSecurity{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"authorizationUrl", "flow", "type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"authorizationUrl", "description", "flow", "scopes", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [oauth2] - if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string flow = 2; - v2 := compiler.MapValueForKey(m, "flow") - if v2 != nil { - x.Flow, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [implicit] - if ok && !compiler.StringArrayContainsValue([]string{"implicit"}, x.Flow) { - message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Oauth2Scopes scopes = 3; - v3 := compiler.MapValueForKey(m, "scopes") - if v3 != nil { - var err error - x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", v3, context)) - if err != nil { - errors = append(errors, err) - } - } - // string authorization_url = 4; - v4 := compiler.MapValueForKey(m, "authorizationUrl") - if v4 != nil { - x.AuthorizationUrl, ok = compiler.StringForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for authorizationUrl: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 5; - v5 := compiler.MapValueForKey(m, "description") - if v5 != nil { - x.Description, ok = compiler.StringForScalarNode(v5) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 6; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewOauth2PasswordSecurity creates an object of type Oauth2PasswordSecurity if possible, returning an error if not. -func NewOauth2PasswordSecurity(in *yaml.Node, context *compiler.Context) (*Oauth2PasswordSecurity, error) { - errors := make([]error, 0) - x := &Oauth2PasswordSecurity{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"flow", "tokenUrl", "type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "flow", "scopes", "tokenUrl", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [oauth2] - if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string flow = 2; - v2 := compiler.MapValueForKey(m, "flow") - if v2 != nil { - x.Flow, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [password] - if ok && !compiler.StringArrayContainsValue([]string{"password"}, x.Flow) { - message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Oauth2Scopes scopes = 3; - v3 := compiler.MapValueForKey(m, "scopes") - if v3 != nil { - var err error - x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", v3, context)) - if err != nil { - errors = append(errors, err) - } - } - // string token_url = 4; - v4 := compiler.MapValueForKey(m, "tokenUrl") - if v4 != nil { - x.TokenUrl, ok = compiler.StringForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for tokenUrl: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 5; - v5 := compiler.MapValueForKey(m, "description") - if v5 != nil { - x.Description, ok = compiler.StringForScalarNode(v5) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 6; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewOauth2Scopes creates an object of type Oauth2Scopes if possible, returning an error if not. -func NewOauth2Scopes(in *yaml.Node, context *compiler.Context) (*Oauth2Scopes, error) { - errors := make([]error, 0) - x := &Oauth2Scopes{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedString additional_properties = 1; - // MAP: string - x.AdditionalProperties = make([]*NamedString, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedString{} - pair.Name = k - pair.Value, _ = compiler.StringForScalarNode(v) - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewOperation creates an object of type Operation if possible, returning an error if not. -func NewOperation(in *yaml.Node, context *compiler.Context) (*Operation, error) { - errors := make([]error, 0) - x := &Operation{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"responses"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"consumes", "deprecated", "description", "externalDocs", "operationId", "parameters", "produces", "responses", "schemes", "security", "summary", "tags"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // repeated string tags = 1; - v1 := compiler.MapValueForKey(m, "tags") - if v1 != nil { - v, ok := compiler.SequenceNodeForNode(v1) - if ok { - x.Tags = compiler.StringArrayForSequenceNode(v) - } else { - message := fmt.Sprintf("has unexpected value for tags: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string summary = 2; - v2 := compiler.MapValueForKey(m, "summary") - if v2 != nil { - x.Summary, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for summary: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // ExternalDocs external_docs = 4; - v4 := compiler.MapValueForKey(m, "externalDocs") - if v4 != nil { - var err error - x.ExternalDocs, err = NewExternalDocs(v4, compiler.NewContext("externalDocs", v4, context)) - if err != nil { - errors = append(errors, err) - } - } - // string operation_id = 5; - v5 := compiler.MapValueForKey(m, "operationId") - if v5 != nil { - x.OperationId, ok = compiler.StringForScalarNode(v5) - if !ok { - message := fmt.Sprintf("has unexpected value for operationId: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated string produces = 6; - v6 := compiler.MapValueForKey(m, "produces") - if v6 != nil { - v, ok := compiler.SequenceNodeForNode(v6) - if ok { - x.Produces = compiler.StringArrayForSequenceNode(v) - } else { - message := fmt.Sprintf("has unexpected value for produces: %s", compiler.Display(v6)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated string consumes = 7; - v7 := compiler.MapValueForKey(m, "consumes") - if v7 != nil { - v, ok := compiler.SequenceNodeForNode(v7) - if ok { - x.Consumes = compiler.StringArrayForSequenceNode(v) - } else { - message := fmt.Sprintf("has unexpected value for consumes: %s", compiler.Display(v7)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated ParametersItem parameters = 8; - v8 := compiler.MapValueForKey(m, "parameters") - if v8 != nil { - // repeated ParametersItem - x.Parameters = make([]*ParametersItem, 0) - a, ok := compiler.SequenceNodeForNode(v8) - if ok { - for _, item := range a.Content { - y, err := NewParametersItem(item, compiler.NewContext("parameters", item, context)) - if err != nil { - errors = append(errors, err) - } - x.Parameters = append(x.Parameters, y) - } - } - } - // Responses responses = 9; - v9 := compiler.MapValueForKey(m, "responses") - if v9 != nil { - var err error - x.Responses, err = NewResponses(v9, compiler.NewContext("responses", v9, context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated string schemes = 10; - v10 := compiler.MapValueForKey(m, "schemes") - if v10 != nil { - v, ok := compiler.SequenceNodeForNode(v10) - if ok { - x.Schemes = compiler.StringArrayForSequenceNode(v) - } else { - message := fmt.Sprintf("has unexpected value for schemes: %s", compiler.Display(v10)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [http https ws wss] - if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) { - message := fmt.Sprintf("has unexpected value for schemes: %s", compiler.Display(v10)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool deprecated = 11; - v11 := compiler.MapValueForKey(m, "deprecated") - if v11 != nil { - x.Deprecated, ok = compiler.BoolForScalarNode(v11) - if !ok { - message := fmt.Sprintf("has unexpected value for deprecated: %s", compiler.Display(v11)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated SecurityRequirement security = 12; - v12 := compiler.MapValueForKey(m, "security") - if v12 != nil { - // repeated SecurityRequirement - x.Security = make([]*SecurityRequirement, 0) - a, ok := compiler.SequenceNodeForNode(v12) - if ok { - for _, item := range a.Content { - y, err := NewSecurityRequirement(item, compiler.NewContext("security", item, context)) - if err != nil { - errors = append(errors, err) - } - x.Security = append(x.Security, y) - } - } - } - // repeated NamedAny vendor_extension = 13; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewParameter creates an object of type Parameter if possible, returning an error if not. -func NewParameter(in *yaml.Node, context *compiler.Context) (*Parameter, error) { - errors := make([]error, 0) - x := &Parameter{} - matched := false - // BodyParameter body_parameter = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewBodyParameter(m, compiler.NewContext("bodyParameter", m, context)) - if matchingError == nil { - x.Oneof = &Parameter_BodyParameter{BodyParameter: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // NonBodyParameter non_body_parameter = 2; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewNonBodyParameter(m, compiler.NewContext("nonBodyParameter", m, context)) - if matchingError == nil { - x.Oneof = &Parameter_NonBodyParameter{NonBodyParameter: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } else { - message := fmt.Sprintf("contains an invalid Parameter") - err := compiler.NewError(context, message) - errors = []error{err} - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewParameterDefinitions creates an object of type ParameterDefinitions if possible, returning an error if not. -func NewParameterDefinitions(in *yaml.Node, context *compiler.Context) (*ParameterDefinitions, error) { - errors := make([]error, 0) - x := &ParameterDefinitions{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedParameter additional_properties = 1; - // MAP: Parameter - x.AdditionalProperties = make([]*NamedParameter, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedParameter{} - pair.Name = k - var err error - pair.Value, err = NewParameter(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewParametersItem creates an object of type ParametersItem if possible, returning an error if not. -func NewParametersItem(in *yaml.Node, context *compiler.Context) (*ParametersItem, error) { - errors := make([]error, 0) - x := &ParametersItem{} - matched := false - // Parameter parameter = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewParameter(m, compiler.NewContext("parameter", m, context)) - if matchingError == nil { - x.Oneof = &ParametersItem_Parameter{Parameter: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // JsonReference json_reference = 2; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", m, context)) - if matchingError == nil { - x.Oneof = &ParametersItem_JsonReference{JsonReference: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } else { - message := fmt.Sprintf("contains an invalid ParametersItem") - err := compiler.NewError(context, message) - errors = []error{err} - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewPathItem creates an object of type PathItem if possible, returning an error if not. -func NewPathItem(in *yaml.Node, context *compiler.Context) (*PathItem, error) { - errors := make([]error, 0) - x := &PathItem{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"$ref", "delete", "get", "head", "options", "parameters", "patch", "post", "put"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string _ref = 1; - v1 := compiler.MapValueForKey(m, "$ref") - if v1 != nil { - x.XRef, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for $ref: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Operation get = 2; - v2 := compiler.MapValueForKey(m, "get") - if v2 != nil { - var err error - x.Get, err = NewOperation(v2, compiler.NewContext("get", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation put = 3; - v3 := compiler.MapValueForKey(m, "put") - if v3 != nil { - var err error - x.Put, err = NewOperation(v3, compiler.NewContext("put", v3, context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation post = 4; - v4 := compiler.MapValueForKey(m, "post") - if v4 != nil { - var err error - x.Post, err = NewOperation(v4, compiler.NewContext("post", v4, context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation delete = 5; - v5 := compiler.MapValueForKey(m, "delete") - if v5 != nil { - var err error - x.Delete, err = NewOperation(v5, compiler.NewContext("delete", v5, context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation options = 6; - v6 := compiler.MapValueForKey(m, "options") - if v6 != nil { - var err error - x.Options, err = NewOperation(v6, compiler.NewContext("options", v6, context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation head = 7; - v7 := compiler.MapValueForKey(m, "head") - if v7 != nil { - var err error - x.Head, err = NewOperation(v7, compiler.NewContext("head", v7, context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation patch = 8; - v8 := compiler.MapValueForKey(m, "patch") - if v8 != nil { - var err error - x.Patch, err = NewOperation(v8, compiler.NewContext("patch", v8, context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated ParametersItem parameters = 9; - v9 := compiler.MapValueForKey(m, "parameters") - if v9 != nil { - // repeated ParametersItem - x.Parameters = make([]*ParametersItem, 0) - a, ok := compiler.SequenceNodeForNode(v9) - if ok { - for _, item := range a.Content { - y, err := NewParametersItem(item, compiler.NewContext("parameters", item, context)) - if err != nil { - errors = append(errors, err) - } - x.Parameters = append(x.Parameters, y) - } - } - } - // repeated NamedAny vendor_extension = 10; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewPathParameterSubSchema creates an object of type PathParameterSubSchema if possible, returning an error if not. -func NewPathParameterSubSchema(in *yaml.Node, context *compiler.Context) (*PathParameterSubSchema, error) { - errors := make([]error, 0) - x := &PathParameterSubSchema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"required"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // bool required = 1; - v1 := compiler.MapValueForKey(m, "required") - if v1 != nil { - x.Required, ok = compiler.BoolForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string in = 2; - v2 := compiler.MapValueForKey(m, "in") - if v2 != nil { - x.In, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [path] - if ok && !compiler.StringArrayContainsValue([]string{"path"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string name = 4; - v4 := compiler.MapValueForKey(m, "name") - if v4 != nil { - x.Name, ok = compiler.StringForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string type = 5; - v5 := compiler.MapValueForKey(m, "type") - if v5 != nil { - x.Type, ok = compiler.StringForScalarNode(v5) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [string number boolean integer array] - if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 6; - v6 := compiler.MapValueForKey(m, "format") - if v6 != nil { - x.Format, ok = compiler.StringForScalarNode(v6) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v6)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PrimitivesItems items = 7; - v7 := compiler.MapValueForKey(m, "items") - if v7 != nil { - var err error - x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", v7, context)) - if err != nil { - errors = append(errors, err) - } - } - // string collection_format = 8; - v8 := compiler.MapValueForKey(m, "collectionFormat") - if v8 != nil { - x.CollectionFormat, ok = compiler.StringForScalarNode(v8) - if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v8)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [csv ssv tsv pipes] - if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v8)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 9; - v9 := compiler.MapValueForKey(m, "default") - if v9 != nil { - var err error - x.Default, err = NewAny(v9, compiler.NewContext("default", v9, context)) - if err != nil { - errors = append(errors, err) - } - } - // float maximum = 10; - v10 := compiler.MapValueForKey(m, "maximum") - if v10 != nil { - v, ok := compiler.FloatForScalarNode(v10) - if ok { - x.Maximum = v - } else { - message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v10)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 11; - v11 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v11 != nil { - x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v11) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v11)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 12; - v12 := compiler.MapValueForKey(m, "minimum") - if v12 != nil { - v, ok := compiler.FloatForScalarNode(v12) - if ok { - x.Minimum = v - } else { - message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v12)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 13; - v13 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v13 != nil { - x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v13) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v13)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 14; - v14 := compiler.MapValueForKey(m, "maxLength") - if v14 != nil { - t, ok := compiler.IntForScalarNode(v14) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v14)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 15; - v15 := compiler.MapValueForKey(m, "minLength") - if v15 != nil { - t, ok := compiler.IntForScalarNode(v15) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v15)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 16; - v16 := compiler.MapValueForKey(m, "pattern") - if v16 != nil { - x.Pattern, ok = compiler.StringForScalarNode(v16) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v16)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 17; - v17 := compiler.MapValueForKey(m, "maxItems") - if v17 != nil { - t, ok := compiler.IntForScalarNode(v17) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v17)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 18; - v18 := compiler.MapValueForKey(m, "minItems") - if v18 != nil { - t, ok := compiler.IntForScalarNode(v18) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v18)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 19; - v19 := compiler.MapValueForKey(m, "uniqueItems") - if v19 != nil { - x.UniqueItems, ok = compiler.BoolForScalarNode(v19) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v19)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 20; - v20 := compiler.MapValueForKey(m, "enum") - if v20 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := compiler.SequenceNodeForNode(v20) - if ok { - for _, item := range a.Content { - y, err := NewAny(item, compiler.NewContext("enum", item, context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // float multiple_of = 21; - v21 := compiler.MapValueForKey(m, "multipleOf") - if v21 != nil { - v, ok := compiler.FloatForScalarNode(v21) - if ok { - x.MultipleOf = v - } else { - message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v21)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 22; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewPaths creates an object of type Paths if possible, returning an error if not. -func NewPaths(in *yaml.Node, context *compiler.Context) (*Paths, error) { - errors := make([]error, 0) - x := &Paths{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{} - allowedPatterns := []*regexp.Regexp{pattern0, pattern1} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // repeated NamedAny vendor_extension = 1; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - // repeated NamedPathItem path = 2; - // MAP: PathItem ^/ - x.Path = make([]*NamedPathItem, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "/") { - pair := &NamedPathItem{} - pair.Name = k - var err error - pair.Value, err = NewPathItem(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - x.Path = append(x.Path, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewPrimitivesItems creates an object of type PrimitivesItems if possible, returning an error if not. -func NewPrimitivesItems(in *yaml.Node, context *compiler.Context) (*PrimitivesItems, error) { - errors := make([]error, 0) - x := &PrimitivesItems{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"collectionFormat", "default", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "pattern", "type", "uniqueItems"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [string number integer boolean array] - if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 2; - v2 := compiler.MapValueForKey(m, "format") - if v2 != nil { - x.Format, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PrimitivesItems items = 3; - v3 := compiler.MapValueForKey(m, "items") - if v3 != nil { - var err error - x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", v3, context)) - if err != nil { - errors = append(errors, err) - } - } - // string collection_format = 4; - v4 := compiler.MapValueForKey(m, "collectionFormat") - if v4 != nil { - x.CollectionFormat, ok = compiler.StringForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [csv ssv tsv pipes] - if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 5; - v5 := compiler.MapValueForKey(m, "default") - if v5 != nil { - var err error - x.Default, err = NewAny(v5, compiler.NewContext("default", v5, context)) - if err != nil { - errors = append(errors, err) - } - } - // float maximum = 6; - v6 := compiler.MapValueForKey(m, "maximum") - if v6 != nil { - v, ok := compiler.FloatForScalarNode(v6) - if ok { - x.Maximum = v - } else { - message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v6)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 7; - v7 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v7 != nil { - x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v7) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v7)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 8; - v8 := compiler.MapValueForKey(m, "minimum") - if v8 != nil { - v, ok := compiler.FloatForScalarNode(v8) - if ok { - x.Minimum = v - } else { - message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v8)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 9; - v9 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v9 != nil { - x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v9) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v9)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 10; - v10 := compiler.MapValueForKey(m, "maxLength") - if v10 != nil { - t, ok := compiler.IntForScalarNode(v10) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v10)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 11; - v11 := compiler.MapValueForKey(m, "minLength") - if v11 != nil { - t, ok := compiler.IntForScalarNode(v11) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v11)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 12; - v12 := compiler.MapValueForKey(m, "pattern") - if v12 != nil { - x.Pattern, ok = compiler.StringForScalarNode(v12) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v12)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 13; - v13 := compiler.MapValueForKey(m, "maxItems") - if v13 != nil { - t, ok := compiler.IntForScalarNode(v13) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v13)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 14; - v14 := compiler.MapValueForKey(m, "minItems") - if v14 != nil { - t, ok := compiler.IntForScalarNode(v14) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v14)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 15; - v15 := compiler.MapValueForKey(m, "uniqueItems") - if v15 != nil { - x.UniqueItems, ok = compiler.BoolForScalarNode(v15) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v15)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 16; - v16 := compiler.MapValueForKey(m, "enum") - if v16 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := compiler.SequenceNodeForNode(v16) - if ok { - for _, item := range a.Content { - y, err := NewAny(item, compiler.NewContext("enum", item, context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // float multiple_of = 17; - v17 := compiler.MapValueForKey(m, "multipleOf") - if v17 != nil { - v, ok := compiler.FloatForScalarNode(v17) - if ok { - x.MultipleOf = v - } else { - message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v17)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 18; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewProperties creates an object of type Properties if possible, returning an error if not. -func NewProperties(in *yaml.Node, context *compiler.Context) (*Properties, error) { - errors := make([]error, 0) - x := &Properties{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedSchema additional_properties = 1; - // MAP: Schema - x.AdditionalProperties = make([]*NamedSchema, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedSchema{} - pair.Name = k - var err error - pair.Value, err = NewSchema(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewQueryParameterSubSchema creates an object of type QueryParameterSubSchema if possible, returning an error if not. -func NewQueryParameterSubSchema(in *yaml.Node, context *compiler.Context) (*QueryParameterSubSchema, error) { - errors := make([]error, 0) - x := &QueryParameterSubSchema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"allowEmptyValue", "collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // bool required = 1; - v1 := compiler.MapValueForKey(m, "required") - if v1 != nil { - x.Required, ok = compiler.BoolForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string in = 2; - v2 := compiler.MapValueForKey(m, "in") - if v2 != nil { - x.In, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [query] - if ok && !compiler.StringArrayContainsValue([]string{"query"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string name = 4; - v4 := compiler.MapValueForKey(m, "name") - if v4 != nil { - x.Name, ok = compiler.StringForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool allow_empty_value = 5; - v5 := compiler.MapValueForKey(m, "allowEmptyValue") - if v5 != nil { - x.AllowEmptyValue, ok = compiler.BoolForScalarNode(v5) - if !ok { - message := fmt.Sprintf("has unexpected value for allowEmptyValue: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string type = 6; - v6 := compiler.MapValueForKey(m, "type") - if v6 != nil { - x.Type, ok = compiler.StringForScalarNode(v6) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [string number boolean integer array] - if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 7; - v7 := compiler.MapValueForKey(m, "format") - if v7 != nil { - x.Format, ok = compiler.StringForScalarNode(v7) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v7)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PrimitivesItems items = 8; - v8 := compiler.MapValueForKey(m, "items") - if v8 != nil { - var err error - x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", v8, context)) - if err != nil { - errors = append(errors, err) - } - } - // string collection_format = 9; - v9 := compiler.MapValueForKey(m, "collectionFormat") - if v9 != nil { - x.CollectionFormat, ok = compiler.StringForScalarNode(v9) - if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v9)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [csv ssv tsv pipes multi] - if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v9)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 10; - v10 := compiler.MapValueForKey(m, "default") - if v10 != nil { - var err error - x.Default, err = NewAny(v10, compiler.NewContext("default", v10, context)) - if err != nil { - errors = append(errors, err) - } - } - // float maximum = 11; - v11 := compiler.MapValueForKey(m, "maximum") - if v11 != nil { - v, ok := compiler.FloatForScalarNode(v11) - if ok { - x.Maximum = v - } else { - message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v11)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 12; - v12 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v12 != nil { - x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v12) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v12)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 13; - v13 := compiler.MapValueForKey(m, "minimum") - if v13 != nil { - v, ok := compiler.FloatForScalarNode(v13) - if ok { - x.Minimum = v - } else { - message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v13)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 14; - v14 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v14 != nil { - x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v14) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v14)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 15; - v15 := compiler.MapValueForKey(m, "maxLength") - if v15 != nil { - t, ok := compiler.IntForScalarNode(v15) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v15)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 16; - v16 := compiler.MapValueForKey(m, "minLength") - if v16 != nil { - t, ok := compiler.IntForScalarNode(v16) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v16)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 17; - v17 := compiler.MapValueForKey(m, "pattern") - if v17 != nil { - x.Pattern, ok = compiler.StringForScalarNode(v17) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v17)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 18; - v18 := compiler.MapValueForKey(m, "maxItems") - if v18 != nil { - t, ok := compiler.IntForScalarNode(v18) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v18)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 19; - v19 := compiler.MapValueForKey(m, "minItems") - if v19 != nil { - t, ok := compiler.IntForScalarNode(v19) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v19)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 20; - v20 := compiler.MapValueForKey(m, "uniqueItems") - if v20 != nil { - x.UniqueItems, ok = compiler.BoolForScalarNode(v20) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v20)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 21; - v21 := compiler.MapValueForKey(m, "enum") - if v21 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := compiler.SequenceNodeForNode(v21) - if ok { - for _, item := range a.Content { - y, err := NewAny(item, compiler.NewContext("enum", item, context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // float multiple_of = 22; - v22 := compiler.MapValueForKey(m, "multipleOf") - if v22 != nil { - v, ok := compiler.FloatForScalarNode(v22) - if ok { - x.MultipleOf = v - } else { - message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v22)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 23; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewResponse creates an object of type Response if possible, returning an error if not. -func NewResponse(in *yaml.Node, context *compiler.Context) (*Response, error) { - errors := make([]error, 0) - x := &Response{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"description"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "examples", "headers", "schema"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string description = 1; - v1 := compiler.MapValueForKey(m, "description") - if v1 != nil { - x.Description, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // SchemaItem schema = 2; - v2 := compiler.MapValueForKey(m, "schema") - if v2 != nil { - var err error - x.Schema, err = NewSchemaItem(v2, compiler.NewContext("schema", v2, context)) - if err != nil { - errors = append(errors, err) - } - } - // Headers headers = 3; - v3 := compiler.MapValueForKey(m, "headers") - if v3 != nil { - var err error - x.Headers, err = NewHeaders(v3, compiler.NewContext("headers", v3, context)) - if err != nil { - errors = append(errors, err) - } - } - // Examples examples = 4; - v4 := compiler.MapValueForKey(m, "examples") - if v4 != nil { - var err error - x.Examples, err = NewExamples(v4, compiler.NewContext("examples", v4, context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 5; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewResponseDefinitions creates an object of type ResponseDefinitions if possible, returning an error if not. -func NewResponseDefinitions(in *yaml.Node, context *compiler.Context) (*ResponseDefinitions, error) { - errors := make([]error, 0) - x := &ResponseDefinitions{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedResponse additional_properties = 1; - // MAP: Response - x.AdditionalProperties = make([]*NamedResponse, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedResponse{} - pair.Name = k - var err error - pair.Value, err = NewResponse(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewResponseValue creates an object of type ResponseValue if possible, returning an error if not. -func NewResponseValue(in *yaml.Node, context *compiler.Context) (*ResponseValue, error) { - errors := make([]error, 0) - x := &ResponseValue{} - matched := false - // Response response = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewResponse(m, compiler.NewContext("response", m, context)) - if matchingError == nil { - x.Oneof = &ResponseValue_Response{Response: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // JsonReference json_reference = 2; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", m, context)) - if matchingError == nil { - x.Oneof = &ResponseValue_JsonReference{JsonReference: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } else { - message := fmt.Sprintf("contains an invalid ResponseValue") - err := compiler.NewError(context, message) - errors = []error{err} - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewResponses creates an object of type Responses if possible, returning an error if not. -func NewResponses(in *yaml.Node, context *compiler.Context) (*Responses, error) { - errors := make([]error, 0) - x := &Responses{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{} - allowedPatterns := []*regexp.Regexp{pattern2, pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // repeated NamedResponseValue response_code = 1; - // MAP: ResponseValue ^([0-9]{3})$|^(default)$ - x.ResponseCode = make([]*NamedResponseValue, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if pattern2.MatchString(k) { - pair := &NamedResponseValue{} - pair.Name = k - var err error - pair.Value, err = NewResponseValue(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - x.ResponseCode = append(x.ResponseCode, pair) - } - } - } - // repeated NamedAny vendor_extension = 2; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewSchema creates an object of type Schema if possible, returning an error if not. -func NewSchema(in *yaml.Node, context *compiler.Context) (*Schema, error) { - errors := make([]error, 0) - x := &Schema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"$ref", "additionalProperties", "allOf", "default", "description", "discriminator", "enum", "example", "exclusiveMaximum", "exclusiveMinimum", "externalDocs", "format", "items", "maxItems", "maxLength", "maxProperties", "maximum", "minItems", "minLength", "minProperties", "minimum", "multipleOf", "pattern", "properties", "readOnly", "required", "title", "type", "uniqueItems", "xml"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string _ref = 1; - v1 := compiler.MapValueForKey(m, "$ref") - if v1 != nil { - x.XRef, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for $ref: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 2; - v2 := compiler.MapValueForKey(m, "format") - if v2 != nil { - x.Format, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string title = 3; - v3 := compiler.MapValueForKey(m, "title") - if v3 != nil { - x.Title, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for title: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 4; - v4 := compiler.MapValueForKey(m, "description") - if v4 != nil { - x.Description, ok = compiler.StringForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 5; - v5 := compiler.MapValueForKey(m, "default") - if v5 != nil { - var err error - x.Default, err = NewAny(v5, compiler.NewContext("default", v5, context)) - if err != nil { - errors = append(errors, err) - } - } - // float multiple_of = 6; - v6 := compiler.MapValueForKey(m, "multipleOf") - if v6 != nil { - v, ok := compiler.FloatForScalarNode(v6) - if ok { - x.MultipleOf = v - } else { - message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v6)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float maximum = 7; - v7 := compiler.MapValueForKey(m, "maximum") - if v7 != nil { - v, ok := compiler.FloatForScalarNode(v7) - if ok { - x.Maximum = v - } else { - message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v7)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 8; - v8 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v8 != nil { - x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v8) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v8)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 9; - v9 := compiler.MapValueForKey(m, "minimum") - if v9 != nil { - v, ok := compiler.FloatForScalarNode(v9) - if ok { - x.Minimum = v - } else { - message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v9)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 10; - v10 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v10 != nil { - x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v10) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v10)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 11; - v11 := compiler.MapValueForKey(m, "maxLength") - if v11 != nil { - t, ok := compiler.IntForScalarNode(v11) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v11)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 12; - v12 := compiler.MapValueForKey(m, "minLength") - if v12 != nil { - t, ok := compiler.IntForScalarNode(v12) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v12)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 13; - v13 := compiler.MapValueForKey(m, "pattern") - if v13 != nil { - x.Pattern, ok = compiler.StringForScalarNode(v13) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v13)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 14; - v14 := compiler.MapValueForKey(m, "maxItems") - if v14 != nil { - t, ok := compiler.IntForScalarNode(v14) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v14)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 15; - v15 := compiler.MapValueForKey(m, "minItems") - if v15 != nil { - t, ok := compiler.IntForScalarNode(v15) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v15)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 16; - v16 := compiler.MapValueForKey(m, "uniqueItems") - if v16 != nil { - x.UniqueItems, ok = compiler.BoolForScalarNode(v16) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v16)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_properties = 17; - v17 := compiler.MapValueForKey(m, "maxProperties") - if v17 != nil { - t, ok := compiler.IntForScalarNode(v17) - if ok { - x.MaxProperties = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxProperties: %s", compiler.Display(v17)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_properties = 18; - v18 := compiler.MapValueForKey(m, "minProperties") - if v18 != nil { - t, ok := compiler.IntForScalarNode(v18) - if ok { - x.MinProperties = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minProperties: %s", compiler.Display(v18)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated string required = 19; - v19 := compiler.MapValueForKey(m, "required") - if v19 != nil { - v, ok := compiler.SequenceNodeForNode(v19) - if ok { - x.Required = compiler.StringArrayForSequenceNode(v) - } else { - message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v19)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 20; - v20 := compiler.MapValueForKey(m, "enum") - if v20 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := compiler.SequenceNodeForNode(v20) - if ok { - for _, item := range a.Content { - y, err := NewAny(item, compiler.NewContext("enum", item, context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // AdditionalPropertiesItem additional_properties = 21; - v21 := compiler.MapValueForKey(m, "additionalProperties") - if v21 != nil { - var err error - x.AdditionalProperties, err = NewAdditionalPropertiesItem(v21, compiler.NewContext("additionalProperties", v21, context)) - if err != nil { - errors = append(errors, err) - } - } - // TypeItem type = 22; - v22 := compiler.MapValueForKey(m, "type") - if v22 != nil { - var err error - x.Type, err = NewTypeItem(v22, compiler.NewContext("type", v22, context)) - if err != nil { - errors = append(errors, err) - } - } - // ItemsItem items = 23; - v23 := compiler.MapValueForKey(m, "items") - if v23 != nil { - var err error - x.Items, err = NewItemsItem(v23, compiler.NewContext("items", v23, context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated Schema all_of = 24; - v24 := compiler.MapValueForKey(m, "allOf") - if v24 != nil { - // repeated Schema - x.AllOf = make([]*Schema, 0) - a, ok := compiler.SequenceNodeForNode(v24) - if ok { - for _, item := range a.Content { - y, err := NewSchema(item, compiler.NewContext("allOf", item, context)) - if err != nil { - errors = append(errors, err) - } - x.AllOf = append(x.AllOf, y) - } - } - } - // Properties properties = 25; - v25 := compiler.MapValueForKey(m, "properties") - if v25 != nil { - var err error - x.Properties, err = NewProperties(v25, compiler.NewContext("properties", v25, context)) - if err != nil { - errors = append(errors, err) - } - } - // string discriminator = 26; - v26 := compiler.MapValueForKey(m, "discriminator") - if v26 != nil { - x.Discriminator, ok = compiler.StringForScalarNode(v26) - if !ok { - message := fmt.Sprintf("has unexpected value for discriminator: %s", compiler.Display(v26)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool read_only = 27; - v27 := compiler.MapValueForKey(m, "readOnly") - if v27 != nil { - x.ReadOnly, ok = compiler.BoolForScalarNode(v27) - if !ok { - message := fmt.Sprintf("has unexpected value for readOnly: %s", compiler.Display(v27)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Xml xml = 28; - v28 := compiler.MapValueForKey(m, "xml") - if v28 != nil { - var err error - x.Xml, err = NewXml(v28, compiler.NewContext("xml", v28, context)) - if err != nil { - errors = append(errors, err) - } - } - // ExternalDocs external_docs = 29; - v29 := compiler.MapValueForKey(m, "externalDocs") - if v29 != nil { - var err error - x.ExternalDocs, err = NewExternalDocs(v29, compiler.NewContext("externalDocs", v29, context)) - if err != nil { - errors = append(errors, err) - } - } - // Any example = 30; - v30 := compiler.MapValueForKey(m, "example") - if v30 != nil { - var err error - x.Example, err = NewAny(v30, compiler.NewContext("example", v30, context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 31; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewSchemaItem creates an object of type SchemaItem if possible, returning an error if not. -func NewSchemaItem(in *yaml.Node, context *compiler.Context) (*SchemaItem, error) { - errors := make([]error, 0) - x := &SchemaItem{} - matched := false - // Schema schema = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewSchema(m, compiler.NewContext("schema", m, context)) - if matchingError == nil { - x.Oneof = &SchemaItem_Schema{Schema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // FileSchema file_schema = 2; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewFileSchema(m, compiler.NewContext("fileSchema", m, context)) - if matchingError == nil { - x.Oneof = &SchemaItem_FileSchema{FileSchema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } else { - message := fmt.Sprintf("contains an invalid SchemaItem") - err := compiler.NewError(context, message) - errors = []error{err} - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewSecurityDefinitions creates an object of type SecurityDefinitions if possible, returning an error if not. -func NewSecurityDefinitions(in *yaml.Node, context *compiler.Context) (*SecurityDefinitions, error) { - errors := make([]error, 0) - x := &SecurityDefinitions{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedSecurityDefinitionsItem additional_properties = 1; - // MAP: SecurityDefinitionsItem - x.AdditionalProperties = make([]*NamedSecurityDefinitionsItem, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedSecurityDefinitionsItem{} - pair.Name = k - var err error - pair.Value, err = NewSecurityDefinitionsItem(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewSecurityDefinitionsItem creates an object of type SecurityDefinitionsItem if possible, returning an error if not. -func NewSecurityDefinitionsItem(in *yaml.Node, context *compiler.Context) (*SecurityDefinitionsItem, error) { - errors := make([]error, 0) - x := &SecurityDefinitionsItem{} - matched := false - // BasicAuthenticationSecurity basic_authentication_security = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewBasicAuthenticationSecurity(m, compiler.NewContext("basicAuthenticationSecurity", m, context)) - if matchingError == nil { - x.Oneof = &SecurityDefinitionsItem_BasicAuthenticationSecurity{BasicAuthenticationSecurity: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // ApiKeySecurity api_key_security = 2; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewApiKeySecurity(m, compiler.NewContext("apiKeySecurity", m, context)) - if matchingError == nil { - x.Oneof = &SecurityDefinitionsItem_ApiKeySecurity{ApiKeySecurity: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // Oauth2ImplicitSecurity oauth2_implicit_security = 3; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewOauth2ImplicitSecurity(m, compiler.NewContext("oauth2ImplicitSecurity", m, context)) - if matchingError == nil { - x.Oneof = &SecurityDefinitionsItem_Oauth2ImplicitSecurity{Oauth2ImplicitSecurity: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // Oauth2PasswordSecurity oauth2_password_security = 4; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewOauth2PasswordSecurity(m, compiler.NewContext("oauth2PasswordSecurity", m, context)) - if matchingError == nil { - x.Oneof = &SecurityDefinitionsItem_Oauth2PasswordSecurity{Oauth2PasswordSecurity: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // Oauth2ApplicationSecurity oauth2_application_security = 5; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewOauth2ApplicationSecurity(m, compiler.NewContext("oauth2ApplicationSecurity", m, context)) - if matchingError == nil { - x.Oneof = &SecurityDefinitionsItem_Oauth2ApplicationSecurity{Oauth2ApplicationSecurity: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // Oauth2AccessCodeSecurity oauth2_access_code_security = 6; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewOauth2AccessCodeSecurity(m, compiler.NewContext("oauth2AccessCodeSecurity", m, context)) - if matchingError == nil { - x.Oneof = &SecurityDefinitionsItem_Oauth2AccessCodeSecurity{Oauth2AccessCodeSecurity: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } else { - message := fmt.Sprintf("contains an invalid SecurityDefinitionsItem") - err := compiler.NewError(context, message) - errors = []error{err} - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewSecurityRequirement creates an object of type SecurityRequirement if possible, returning an error if not. -func NewSecurityRequirement(in *yaml.Node, context *compiler.Context) (*SecurityRequirement, error) { - errors := make([]error, 0) - x := &SecurityRequirement{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedStringArray additional_properties = 1; - // MAP: StringArray - x.AdditionalProperties = make([]*NamedStringArray, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedStringArray{} - pair.Name = k - var err error - pair.Value, err = NewStringArray(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewStringArray creates an object of type StringArray if possible, returning an error if not. -func NewStringArray(in *yaml.Node, context *compiler.Context) (*StringArray, error) { - errors := make([]error, 0) - x := &StringArray{} - x.Value = make([]string, 0) - for _, node := range in.Content { - s, _ := compiler.StringForScalarNode(node) - x.Value = append(x.Value, s) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewTag creates an object of type Tag if possible, returning an error if not. -func NewTag(in *yaml.Node, context *compiler.Context) (*Tag, error) { - errors := make([]error, 0) - x := &Tag{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"name"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "externalDocs", "name"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 2; - v2 := compiler.MapValueForKey(m, "description") - if v2 != nil { - x.Description, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // ExternalDocs external_docs = 3; - v3 := compiler.MapValueForKey(m, "externalDocs") - if v3 != nil { - var err error - x.ExternalDocs, err = NewExternalDocs(v3, compiler.NewContext("externalDocs", v3, context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 4; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewTypeItem creates an object of type TypeItem if possible, returning an error if not. -func NewTypeItem(in *yaml.Node, context *compiler.Context) (*TypeItem, error) { - errors := make([]error, 0) - x := &TypeItem{} - v1 := in - switch v1.Kind { - case yaml.ScalarNode: - x.Value = make([]string, 0) - x.Value = append(x.Value, v1.Value) - case yaml.SequenceNode: - x.Value = make([]string, 0) - for _, v := range v1.Content { - value := v.Value - ok := v.Kind == yaml.ScalarNode - if ok { - x.Value = append(x.Value, value) - } else { - message := fmt.Sprintf("has unexpected value for string array element: %+v (%T)", value, value) - errors = append(errors, compiler.NewError(context, message)) - } - } - default: - message := fmt.Sprintf("has unexpected value for string array: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewVendorExtension creates an object of type VendorExtension if possible, returning an error if not. -func NewVendorExtension(in *yaml.Node, context *compiler.Context) (*VendorExtension, error) { - errors := make([]error, 0) - x := &VendorExtension{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedAny additional_properties = 1; - // MAP: Any - x.AdditionalProperties = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewXml creates an object of type Xml if possible, returning an error if not. -func NewXml(in *yaml.Node, context *compiler.Context) (*Xml, error) { - errors := make([]error, 0) - x := &Xml{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"attribute", "name", "namespace", "prefix", "wrapped"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string namespace = 2; - v2 := compiler.MapValueForKey(m, "namespace") - if v2 != nil { - x.Namespace, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for namespace: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string prefix = 3; - v3 := compiler.MapValueForKey(m, "prefix") - if v3 != nil { - x.Prefix, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for prefix: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool attribute = 4; - v4 := compiler.MapValueForKey(m, "attribute") - if v4 != nil { - x.Attribute, ok = compiler.BoolForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for attribute: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool wrapped = 5; - v5 := compiler.MapValueForKey(m, "wrapped") - if v5 != nil { - x.Wrapped, ok = compiler.BoolForScalarNode(v5) - if !ok { - message := fmt.Sprintf("has unexpected value for wrapped: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 6; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, v, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside AdditionalPropertiesItem objects. -func (m *AdditionalPropertiesItem) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*AdditionalPropertiesItem_Schema) - if ok { - _, err := p.Schema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Any objects. -func (m *Any) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ApiKeySecurity objects. -func (m *ApiKeySecurity) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside BasicAuthenticationSecurity objects. -func (m *BasicAuthenticationSecurity) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside BodyParameter objects. -func (m *BodyParameter) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Schema != nil { - _, err := m.Schema.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Contact objects. -func (m *Contact) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Default objects. -func (m *Default) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Definitions objects. -func (m *Definitions) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Document objects. -func (m *Document) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Info != nil { - _, err := m.Info.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Paths != nil { - _, err := m.Paths.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Definitions != nil { - _, err := m.Definitions.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Parameters != nil { - _, err := m.Parameters.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Responses != nil { - _, err := m.Responses.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Security { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - if m.SecurityDefinitions != nil { - _, err := m.SecurityDefinitions.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Tags { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - if m.ExternalDocs != nil { - _, err := m.ExternalDocs.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Examples objects. -func (m *Examples) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ExternalDocs objects. -func (m *ExternalDocs) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside FileSchema objects. -func (m *FileSchema) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.ExternalDocs != nil { - _, err := m.ExternalDocs.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Example != nil { - _, err := m.Example.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside FormDataParameterSubSchema objects. -func (m *FormDataParameterSubSchema) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Header objects. -func (m *Header) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside HeaderParameterSubSchema objects. -func (m *HeaderParameterSubSchema) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Headers objects. -func (m *Headers) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Info objects. -func (m *Info) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Contact != nil { - _, err := m.Contact.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.License != nil { - _, err := m.License.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ItemsItem objects. -func (m *ItemsItem) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.Schema { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside JsonReference objects. -func (m *JsonReference) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.XRef != "" { - info, err := compiler.ReadInfoForRef(root, m.XRef) - if err != nil { - return nil, err - } - if info != nil { - replacement, err := NewJsonReference(info, nil) - if err == nil { - *m = *replacement - return m.ResolveReferences(root) - } - } - return info, nil - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside License objects. -func (m *License) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedAny objects. -func (m *NamedAny) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedHeader objects. -func (m *NamedHeader) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedParameter objects. -func (m *NamedParameter) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedPathItem objects. -func (m *NamedPathItem) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedResponse objects. -func (m *NamedResponse) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedResponseValue objects. -func (m *NamedResponseValue) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedSchema objects. -func (m *NamedSchema) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedSecurityDefinitionsItem objects. -func (m *NamedSecurityDefinitionsItem) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedString objects. -func (m *NamedString) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedStringArray objects. -func (m *NamedStringArray) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NonBodyParameter objects. -func (m *NonBodyParameter) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*NonBodyParameter_HeaderParameterSubSchema) - if ok { - _, err := p.HeaderParameterSubSchema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*NonBodyParameter_FormDataParameterSubSchema) - if ok { - _, err := p.FormDataParameterSubSchema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*NonBodyParameter_QueryParameterSubSchema) - if ok { - _, err := p.QueryParameterSubSchema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*NonBodyParameter_PathParameterSubSchema) - if ok { - _, err := p.PathParameterSubSchema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Oauth2AccessCodeSecurity objects. -func (m *Oauth2AccessCodeSecurity) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Scopes != nil { - _, err := m.Scopes.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Oauth2ApplicationSecurity objects. -func (m *Oauth2ApplicationSecurity) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Scopes != nil { - _, err := m.Scopes.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Oauth2ImplicitSecurity objects. -func (m *Oauth2ImplicitSecurity) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Scopes != nil { - _, err := m.Scopes.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Oauth2PasswordSecurity objects. -func (m *Oauth2PasswordSecurity) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Scopes != nil { - _, err := m.Scopes.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Oauth2Scopes objects. -func (m *Oauth2Scopes) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Operation objects. -func (m *Operation) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.ExternalDocs != nil { - _, err := m.ExternalDocs.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Parameters { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - if m.Responses != nil { - _, err := m.Responses.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Security { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Parameter objects. -func (m *Parameter) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*Parameter_BodyParameter) - if ok { - _, err := p.BodyParameter.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*Parameter_NonBodyParameter) - if ok { - _, err := p.NonBodyParameter.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ParameterDefinitions objects. -func (m *ParameterDefinitions) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ParametersItem objects. -func (m *ParametersItem) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*ParametersItem_Parameter) - if ok { - _, err := p.Parameter.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*ParametersItem_JsonReference) - if ok { - info, err := p.JsonReference.ResolveReferences(root) - if err != nil { - return nil, err - } else if info != nil { - n, err := NewParametersItem(info, nil) - if err != nil { - return nil, err - } else if n != nil { - *m = *n - return nil, nil - } - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside PathItem objects. -func (m *PathItem) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.XRef != "" { - info, err := compiler.ReadInfoForRef(root, m.XRef) - if err != nil { - return nil, err - } - if info != nil { - replacement, err := NewPathItem(info, nil) - if err == nil { - *m = *replacement - return m.ResolveReferences(root) - } - } - return info, nil - } - if m.Get != nil { - _, err := m.Get.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Put != nil { - _, err := m.Put.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Post != nil { - _, err := m.Post.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Delete != nil { - _, err := m.Delete.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Options != nil { - _, err := m.Options.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Head != nil { - _, err := m.Head.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Patch != nil { - _, err := m.Patch.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Parameters { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside PathParameterSubSchema objects. -func (m *PathParameterSubSchema) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Paths objects. -func (m *Paths) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.Path { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside PrimitivesItems objects. -func (m *PrimitivesItems) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Properties objects. -func (m *Properties) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside QueryParameterSubSchema objects. -func (m *QueryParameterSubSchema) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Response objects. -func (m *Response) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Schema != nil { - _, err := m.Schema.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Headers != nil { - _, err := m.Headers.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Examples != nil { - _, err := m.Examples.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ResponseDefinitions objects. -func (m *ResponseDefinitions) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ResponseValue objects. -func (m *ResponseValue) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*ResponseValue_Response) - if ok { - _, err := p.Response.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*ResponseValue_JsonReference) - if ok { - info, err := p.JsonReference.ResolveReferences(root) - if err != nil { - return nil, err - } else if info != nil { - n, err := NewResponseValue(info, nil) - if err != nil { - return nil, err - } else if n != nil { - *m = *n - return nil, nil - } - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Responses objects. -func (m *Responses) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.ResponseCode { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Schema objects. -func (m *Schema) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.XRef != "" { - info, err := compiler.ReadInfoForRef(root, m.XRef) - if err != nil { - return nil, err - } - if info != nil { - replacement, err := NewSchema(info, nil) - if err == nil { - *m = *replacement - return m.ResolveReferences(root) - } - } - return info, nil - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - if m.AdditionalProperties != nil { - _, err := m.AdditionalProperties.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Type != nil { - _, err := m.Type.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.AllOf { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - if m.Properties != nil { - _, err := m.Properties.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Xml != nil { - _, err := m.Xml.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.ExternalDocs != nil { - _, err := m.ExternalDocs.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Example != nil { - _, err := m.Example.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside SchemaItem objects. -func (m *SchemaItem) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*SchemaItem_Schema) - if ok { - _, err := p.Schema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*SchemaItem_FileSchema) - if ok { - _, err := p.FileSchema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside SecurityDefinitions objects. -func (m *SecurityDefinitions) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside SecurityDefinitionsItem objects. -func (m *SecurityDefinitionsItem) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*SecurityDefinitionsItem_BasicAuthenticationSecurity) - if ok { - _, err := p.BasicAuthenticationSecurity.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*SecurityDefinitionsItem_ApiKeySecurity) - if ok { - _, err := p.ApiKeySecurity.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2ImplicitSecurity) - if ok { - _, err := p.Oauth2ImplicitSecurity.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2PasswordSecurity) - if ok { - _, err := p.Oauth2PasswordSecurity.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2ApplicationSecurity) - if ok { - _, err := p.Oauth2ApplicationSecurity.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity) - if ok { - _, err := p.Oauth2AccessCodeSecurity.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside SecurityRequirement objects. -func (m *SecurityRequirement) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside StringArray objects. -func (m *StringArray) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Tag objects. -func (m *Tag) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.ExternalDocs != nil { - _, err := m.ExternalDocs.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside TypeItem objects. -func (m *TypeItem) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside VendorExtension objects. -func (m *VendorExtension) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Xml objects. -func (m *Xml) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ToRawInfo returns a description of AdditionalPropertiesItem suitable for JSON or YAML export. -func (m *AdditionalPropertiesItem) ToRawInfo() *yaml.Node { - // ONE OF WRAPPER - // AdditionalPropertiesItem - // {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetSchema() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:boolean Type:bool StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if v1, ok := m.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { - return compiler.NewScalarNodeForBool(v1.Boolean) - } - return compiler.NewNullNode() -} - -// ToRawInfo returns a description of Any suitable for JSON or YAML export. -func (m *Any) ToRawInfo() *yaml.Node { - var err error - var node yaml.Node - err = yaml.Unmarshal([]byte(m.Yaml), &node) - if err == nil { - if node.Kind == yaml.DocumentNode { - return node.Content[0] - } - return &node - } - return compiler.NewNullNode() -} - -// ToRawInfo returns a description of ApiKeySecurity suitable for JSON or YAML export. -func (m *ApiKeySecurity) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of BasicAuthenticationSecurity suitable for JSON or YAML export. -func (m *BasicAuthenticationSecurity) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of BodyParameter suitable for JSON or YAML export. -func (m *BodyParameter) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) - if m.Required != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("schema")) - info.Content = append(info.Content, m.Schema.ToRawInfo()) - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Contact suitable for JSON or YAML export. -func (m *Contact) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - if m.Url != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("url")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Url)) - } - if m.Email != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("email")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Email)) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Default suitable for JSON or YAML export. -func (m *Default) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Definitions suitable for JSON or YAML export. -func (m *Definitions) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Document suitable for JSON or YAML export. -func (m *Document) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("swagger")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Swagger)) - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("info")) - info.Content = append(info.Content, m.Info.ToRawInfo()) - if m.Host != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("host")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Host)) - } - if m.BasePath != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("basePath")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.BasePath)) - } - if len(m.Schemes) != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("schemes")) - info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Schemes)) - } - if len(m.Consumes) != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("consumes")) - info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Consumes)) - } - if len(m.Produces) != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("produces")) - info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Produces)) - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("paths")) - info.Content = append(info.Content, m.Paths.ToRawInfo()) - if m.Definitions != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("definitions")) - info.Content = append(info.Content, m.Definitions.ToRawInfo()) - } - if m.Parameters != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("parameters")) - info.Content = append(info.Content, m.Parameters.ToRawInfo()) - } - if m.Responses != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("responses")) - info.Content = append(info.Content, m.Responses.ToRawInfo()) - } - if len(m.Security) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Security { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("security")) - info.Content = append(info.Content, items) - } - if m.SecurityDefinitions != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("securityDefinitions")) - info.Content = append(info.Content, m.SecurityDefinitions.ToRawInfo()) - } - if len(m.Tags) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Tags { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("tags")) - info.Content = append(info.Content, items) - } - if m.ExternalDocs != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) - info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Examples suitable for JSON or YAML export. -func (m *Examples) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of ExternalDocs suitable for JSON or YAML export. -func (m *ExternalDocs) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("url")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Url)) - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of FileSchema suitable for JSON or YAML export. -func (m *FileSchema) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Format != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) - } - if m.Title != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("title")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Title)) - } - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.Default != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) - info.Content = append(info.Content, m.Default.ToRawInfo()) - } - if len(m.Required) != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) - info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Required)) - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) - if m.ReadOnly != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("readOnly")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ReadOnly)) - } - if m.ExternalDocs != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) - info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) - } - if m.Example != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("example")) - info.Content = append(info.Content, m.Example.ToRawInfo()) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of FormDataParameterSubSchema suitable for JSON or YAML export. -func (m *FormDataParameterSubSchema) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Required != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) - } - if m.In != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) - } - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - if m.AllowEmptyValue != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("allowEmptyValue")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.AllowEmptyValue)) - } - if m.Type != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) - } - if m.Format != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) - } - if m.Items != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) - info.Content = append(info.Content, m.Items.ToRawInfo()) - } - if m.CollectionFormat != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) - } - if m.Default != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) - info.Content = append(info.Content, m.Default.ToRawInfo()) - } - if m.Maximum != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) - } - if m.ExclusiveMaximum != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) - } - if m.Minimum != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) - } - if m.ExclusiveMinimum != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) - } - if m.MaxLength != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) - } - if m.MinLength != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) - } - if m.Pattern != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) - } - if m.MaxItems != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) - } - if m.MinItems != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) - } - if m.UniqueItems != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) - } - if len(m.Enum) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Enum { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) - info.Content = append(info.Content, items) - } - if m.MultipleOf != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Header suitable for JSON or YAML export. -func (m *Header) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) - if m.Format != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) - } - if m.Items != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) - info.Content = append(info.Content, m.Items.ToRawInfo()) - } - if m.CollectionFormat != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) - } - if m.Default != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) - info.Content = append(info.Content, m.Default.ToRawInfo()) - } - if m.Maximum != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) - } - if m.ExclusiveMaximum != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) - } - if m.Minimum != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) - } - if m.ExclusiveMinimum != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) - } - if m.MaxLength != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) - } - if m.MinLength != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) - } - if m.Pattern != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) - } - if m.MaxItems != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) - } - if m.MinItems != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) - } - if m.UniqueItems != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) - } - if len(m.Enum) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Enum { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) - info.Content = append(info.Content, items) - } - if m.MultipleOf != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) - } - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of HeaderParameterSubSchema suitable for JSON or YAML export. -func (m *HeaderParameterSubSchema) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Required != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) - } - if m.In != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) - } - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - if m.Type != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) - } - if m.Format != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) - } - if m.Items != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) - info.Content = append(info.Content, m.Items.ToRawInfo()) - } - if m.CollectionFormat != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) - } - if m.Default != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) - info.Content = append(info.Content, m.Default.ToRawInfo()) - } - if m.Maximum != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) - } - if m.ExclusiveMaximum != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) - } - if m.Minimum != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) - } - if m.ExclusiveMinimum != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) - } - if m.MaxLength != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) - } - if m.MinLength != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) - } - if m.Pattern != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) - } - if m.MaxItems != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) - } - if m.MinItems != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) - } - if m.UniqueItems != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) - } - if len(m.Enum) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Enum { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) - info.Content = append(info.Content, items) - } - if m.MultipleOf != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Headers suitable for JSON or YAML export. -func (m *Headers) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Info suitable for JSON or YAML export. -func (m *Info) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("title")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Title)) - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("version")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Version)) - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.TermsOfService != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("termsOfService")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.TermsOfService)) - } - if m.Contact != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("contact")) - info.Content = append(info.Content, m.Contact.ToRawInfo()) - } - if m.License != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("license")) - info.Content = append(info.Content, m.License.ToRawInfo()) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of ItemsItem suitable for JSON or YAML export. -func (m *ItemsItem) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if len(m.Schema) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Schema { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("schema")) - info.Content = append(info.Content, items) - } - return info -} - -// ToRawInfo returns a description of JsonReference suitable for JSON or YAML export. -func (m *JsonReference) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("$ref")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.XRef)) - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - return info -} - -// ToRawInfo returns a description of License suitable for JSON or YAML export. -func (m *License) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - if m.Url != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("url")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Url)) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of NamedAny suitable for JSON or YAML export. -func (m *NamedAny) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - if m.Value != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("value")) - info.Content = append(info.Content, m.Value.ToRawInfo()) - } - return info -} - -// ToRawInfo returns a description of NamedHeader suitable for JSON or YAML export. -func (m *NamedHeader) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - // &{Name:value Type:Header StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedParameter suitable for JSON or YAML export. -func (m *NamedParameter) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - // &{Name:value Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedPathItem suitable for JSON or YAML export. -func (m *NamedPathItem) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - // &{Name:value Type:PathItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedResponse suitable for JSON or YAML export. -func (m *NamedResponse) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - // &{Name:value Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedResponseValue suitable for JSON or YAML export. -func (m *NamedResponseValue) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - // &{Name:value Type:ResponseValue StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedSchema suitable for JSON or YAML export. -func (m *NamedSchema) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - // &{Name:value Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedSecurityDefinitionsItem suitable for JSON or YAML export. -func (m *NamedSecurityDefinitionsItem) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - // &{Name:value Type:SecurityDefinitionsItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedString suitable for JSON or YAML export. -func (m *NamedString) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - if m.Value != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("value")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Value)) - } - return info -} - -// ToRawInfo returns a description of NamedStringArray suitable for JSON or YAML export. -func (m *NamedStringArray) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - // &{Name:value Type:StringArray StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NonBodyParameter suitable for JSON or YAML export. -func (m *NonBodyParameter) ToRawInfo() *yaml.Node { - // ONE OF WRAPPER - // NonBodyParameter - // {Name:headerParameterSubSchema Type:HeaderParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetHeaderParameterSubSchema() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:formDataParameterSubSchema Type:FormDataParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetFormDataParameterSubSchema() - if v1 != nil { - return v1.ToRawInfo() - } - // {Name:queryParameterSubSchema Type:QueryParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v2 := m.GetQueryParameterSubSchema() - if v2 != nil { - return v2.ToRawInfo() - } - // {Name:pathParameterSubSchema Type:PathParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v3 := m.GetPathParameterSubSchema() - if v3 != nil { - return v3.ToRawInfo() - } - return compiler.NewNullNode() -} - -// ToRawInfo returns a description of Oauth2AccessCodeSecurity suitable for JSON or YAML export. -func (m *Oauth2AccessCodeSecurity) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("flow")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Flow)) - if m.Scopes != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("scopes")) - info.Content = append(info.Content, m.Scopes.ToRawInfo()) - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("authorizationUrl")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.AuthorizationUrl)) - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("tokenUrl")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.TokenUrl)) - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Oauth2ApplicationSecurity suitable for JSON or YAML export. -func (m *Oauth2ApplicationSecurity) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("flow")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Flow)) - if m.Scopes != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("scopes")) - info.Content = append(info.Content, m.Scopes.ToRawInfo()) - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("tokenUrl")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.TokenUrl)) - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Oauth2ImplicitSecurity suitable for JSON or YAML export. -func (m *Oauth2ImplicitSecurity) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("flow")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Flow)) - if m.Scopes != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("scopes")) - info.Content = append(info.Content, m.Scopes.ToRawInfo()) - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("authorizationUrl")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.AuthorizationUrl)) - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Oauth2PasswordSecurity suitable for JSON or YAML export. -func (m *Oauth2PasswordSecurity) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("flow")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Flow)) - if m.Scopes != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("scopes")) - info.Content = append(info.Content, m.Scopes.ToRawInfo()) - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("tokenUrl")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.TokenUrl)) - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Oauth2Scopes suitable for JSON or YAML export. -func (m *Oauth2Scopes) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // &{Name:additionalProperties Type:NamedString StringEnumValues:[] MapType:string Repeated:true Pattern: Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Operation suitable for JSON or YAML export. -func (m *Operation) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if len(m.Tags) != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("tags")) - info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Tags)) - } - if m.Summary != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("summary")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Summary)) - } - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.ExternalDocs != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) - info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) - } - if m.OperationId != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("operationId")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.OperationId)) - } - if len(m.Produces) != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("produces")) - info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Produces)) - } - if len(m.Consumes) != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("consumes")) - info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Consumes)) - } - if len(m.Parameters) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Parameters { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("parameters")) - info.Content = append(info.Content, items) - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("responses")) - info.Content = append(info.Content, m.Responses.ToRawInfo()) - if len(m.Schemes) != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("schemes")) - info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Schemes)) - } - if m.Deprecated != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("deprecated")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Deprecated)) - } - if len(m.Security) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Security { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("security")) - info.Content = append(info.Content, items) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Parameter suitable for JSON or YAML export. -func (m *Parameter) ToRawInfo() *yaml.Node { - // ONE OF WRAPPER - // Parameter - // {Name:bodyParameter Type:BodyParameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetBodyParameter() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:nonBodyParameter Type:NonBodyParameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetNonBodyParameter() - if v1 != nil { - return v1.ToRawInfo() - } - return compiler.NewNullNode() -} - -// ToRawInfo returns a description of ParameterDefinitions suitable for JSON or YAML export. -func (m *ParameterDefinitions) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of ParametersItem suitable for JSON or YAML export. -func (m *ParametersItem) ToRawInfo() *yaml.Node { - // ONE OF WRAPPER - // ParametersItem - // {Name:parameter Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetParameter() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:jsonReference Type:JsonReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetJsonReference() - if v1 != nil { - return v1.ToRawInfo() - } - return compiler.NewNullNode() -} - -// ToRawInfo returns a description of PathItem suitable for JSON or YAML export. -func (m *PathItem) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.XRef != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("$ref")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.XRef)) - } - if m.Get != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("get")) - info.Content = append(info.Content, m.Get.ToRawInfo()) - } - if m.Put != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("put")) - info.Content = append(info.Content, m.Put.ToRawInfo()) - } - if m.Post != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("post")) - info.Content = append(info.Content, m.Post.ToRawInfo()) - } - if m.Delete != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("delete")) - info.Content = append(info.Content, m.Delete.ToRawInfo()) - } - if m.Options != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("options")) - info.Content = append(info.Content, m.Options.ToRawInfo()) - } - if m.Head != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("head")) - info.Content = append(info.Content, m.Head.ToRawInfo()) - } - if m.Patch != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("patch")) - info.Content = append(info.Content, m.Patch.ToRawInfo()) - } - if len(m.Parameters) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Parameters { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("parameters")) - info.Content = append(info.Content, items) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of PathParameterSubSchema suitable for JSON or YAML export. -func (m *PathParameterSubSchema) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) - if m.In != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) - } - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - if m.Type != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) - } - if m.Format != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) - } - if m.Items != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) - info.Content = append(info.Content, m.Items.ToRawInfo()) - } - if m.CollectionFormat != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) - } - if m.Default != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) - info.Content = append(info.Content, m.Default.ToRawInfo()) - } - if m.Maximum != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) - } - if m.ExclusiveMaximum != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) - } - if m.Minimum != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) - } - if m.ExclusiveMinimum != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) - } - if m.MaxLength != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) - } - if m.MinLength != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) - } - if m.Pattern != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) - } - if m.MaxItems != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) - } - if m.MinItems != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) - } - if m.UniqueItems != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) - } - if len(m.Enum) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Enum { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) - info.Content = append(info.Content, items) - } - if m.MultipleOf != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Paths suitable for JSON or YAML export. -func (m *Paths) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - if m.Path != nil { - for _, item := range m.Path { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of PrimitivesItems suitable for JSON or YAML export. -func (m *PrimitivesItems) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Type != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) - } - if m.Format != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) - } - if m.Items != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) - info.Content = append(info.Content, m.Items.ToRawInfo()) - } - if m.CollectionFormat != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) - } - if m.Default != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) - info.Content = append(info.Content, m.Default.ToRawInfo()) - } - if m.Maximum != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) - } - if m.ExclusiveMaximum != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) - } - if m.Minimum != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) - } - if m.ExclusiveMinimum != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) - } - if m.MaxLength != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) - } - if m.MinLength != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) - } - if m.Pattern != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) - } - if m.MaxItems != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) - } - if m.MinItems != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) - } - if m.UniqueItems != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) - } - if len(m.Enum) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Enum { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) - info.Content = append(info.Content, items) - } - if m.MultipleOf != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Properties suitable for JSON or YAML export. -func (m *Properties) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of QueryParameterSubSchema suitable for JSON or YAML export. -func (m *QueryParameterSubSchema) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Required != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) - } - if m.In != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) - } - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - if m.AllowEmptyValue != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("allowEmptyValue")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.AllowEmptyValue)) - } - if m.Type != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) - } - if m.Format != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) - } - if m.Items != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) - info.Content = append(info.Content, m.Items.ToRawInfo()) - } - if m.CollectionFormat != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) - } - if m.Default != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) - info.Content = append(info.Content, m.Default.ToRawInfo()) - } - if m.Maximum != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) - } - if m.ExclusiveMaximum != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) - } - if m.Minimum != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) - } - if m.ExclusiveMinimum != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) - } - if m.MaxLength != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) - } - if m.MinLength != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) - } - if m.Pattern != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) - } - if m.MaxItems != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) - } - if m.MinItems != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) - } - if m.UniqueItems != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) - } - if len(m.Enum) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Enum { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) - info.Content = append(info.Content, items) - } - if m.MultipleOf != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Response suitable for JSON or YAML export. -func (m *Response) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - if m.Schema != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("schema")) - info.Content = append(info.Content, m.Schema.ToRawInfo()) - } - if m.Headers != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("headers")) - info.Content = append(info.Content, m.Headers.ToRawInfo()) - } - if m.Examples != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("examples")) - info.Content = append(info.Content, m.Examples.ToRawInfo()) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of ResponseDefinitions suitable for JSON or YAML export. -func (m *ResponseDefinitions) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of ResponseValue suitable for JSON or YAML export. -func (m *ResponseValue) ToRawInfo() *yaml.Node { - // ONE OF WRAPPER - // ResponseValue - // {Name:response Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetResponse() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:jsonReference Type:JsonReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetJsonReference() - if v1 != nil { - return v1.ToRawInfo() - } - return compiler.NewNullNode() -} - -// ToRawInfo returns a description of Responses suitable for JSON or YAML export. -func (m *Responses) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.ResponseCode != nil { - for _, item := range m.ResponseCode { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Schema suitable for JSON or YAML export. -func (m *Schema) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.XRef != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("$ref")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.XRef)) - } - if m.Format != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) - } - if m.Title != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("title")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Title)) - } - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.Default != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) - info.Content = append(info.Content, m.Default.ToRawInfo()) - } - if m.MultipleOf != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) - } - if m.Maximum != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) - } - if m.ExclusiveMaximum != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) - } - if m.Minimum != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) - } - if m.ExclusiveMinimum != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) - } - if m.MaxLength != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) - } - if m.MinLength != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) - } - if m.Pattern != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) - } - if m.MaxItems != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) - } - if m.MinItems != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) - } - if m.UniqueItems != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) - } - if m.MaxProperties != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxProperties")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxProperties)) - } - if m.MinProperties != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minProperties")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinProperties)) - } - if len(m.Required) != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) - info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Required)) - } - if len(m.Enum) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Enum { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) - info.Content = append(info.Content, items) - } - if m.AdditionalProperties != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("additionalProperties")) - info.Content = append(info.Content, m.AdditionalProperties.ToRawInfo()) - } - if m.Type != nil { - if len(m.Type.Value) == 1 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type.Value[0])) - } else { - info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) - info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Type.Value)) - } - } - if m.Items != nil { - items := compiler.NewSequenceNode() - for _, item := range m.Items.Schema { - items.Content = append(items.Content, item.ToRawInfo()) - } - if len(items.Content) == 1 { - items = items.Content[0] - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) - info.Content = append(info.Content, items) - } - if len(m.AllOf) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.AllOf { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("allOf")) - info.Content = append(info.Content, items) - } - if m.Properties != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("properties")) - info.Content = append(info.Content, m.Properties.ToRawInfo()) - } - if m.Discriminator != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("discriminator")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Discriminator)) - } - if m.ReadOnly != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("readOnly")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ReadOnly)) - } - if m.Xml != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("xml")) - info.Content = append(info.Content, m.Xml.ToRawInfo()) - } - if m.ExternalDocs != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) - info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) - } - if m.Example != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("example")) - info.Content = append(info.Content, m.Example.ToRawInfo()) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of SchemaItem suitable for JSON or YAML export. -func (m *SchemaItem) ToRawInfo() *yaml.Node { - // ONE OF WRAPPER - // SchemaItem - // {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetSchema() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:fileSchema Type:FileSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetFileSchema() - if v1 != nil { - return v1.ToRawInfo() - } - return compiler.NewNullNode() -} - -// ToRawInfo returns a description of SecurityDefinitions suitable for JSON or YAML export. -func (m *SecurityDefinitions) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of SecurityDefinitionsItem suitable for JSON or YAML export. -func (m *SecurityDefinitionsItem) ToRawInfo() *yaml.Node { - // ONE OF WRAPPER - // SecurityDefinitionsItem - // {Name:basicAuthenticationSecurity Type:BasicAuthenticationSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetBasicAuthenticationSecurity() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:apiKeySecurity Type:ApiKeySecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetApiKeySecurity() - if v1 != nil { - return v1.ToRawInfo() - } - // {Name:oauth2ImplicitSecurity Type:Oauth2ImplicitSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v2 := m.GetOauth2ImplicitSecurity() - if v2 != nil { - return v2.ToRawInfo() - } - // {Name:oauth2PasswordSecurity Type:Oauth2PasswordSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v3 := m.GetOauth2PasswordSecurity() - if v3 != nil { - return v3.ToRawInfo() - } - // {Name:oauth2ApplicationSecurity Type:Oauth2ApplicationSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v4 := m.GetOauth2ApplicationSecurity() - if v4 != nil { - return v4.ToRawInfo() - } - // {Name:oauth2AccessCodeSecurity Type:Oauth2AccessCodeSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v5 := m.GetOauth2AccessCodeSecurity() - if v5 != nil { - return v5.ToRawInfo() - } - return compiler.NewNullNode() -} - -// ToRawInfo returns a description of SecurityRequirement suitable for JSON or YAML export. -func (m *SecurityRequirement) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of StringArray suitable for JSON or YAML export. -func (m *StringArray) ToRawInfo() *yaml.Node { - return compiler.NewSequenceNodeForStringArray(m.Value) -} - -// ToRawInfo returns a description of Tag suitable for JSON or YAML export. -func (m *Tag) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.ExternalDocs != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) - info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of TypeItem suitable for JSON or YAML export. -func (m *TypeItem) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if len(m.Value) != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("value")) - info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Value)) - } - return info -} - -// ToRawInfo returns a description of VendorExtension suitable for JSON or YAML export. -func (m *VendorExtension) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Xml suitable for JSON or YAML export. -func (m *Xml) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - if m.Namespace != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("namespace")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Namespace)) - } - if m.Prefix != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("prefix")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Prefix)) - } - if m.Attribute != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("attribute")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Attribute)) - } - if m.Wrapped != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("wrapped")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Wrapped)) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -var ( - pattern0 = regexp.MustCompile("^x-") - pattern1 = regexp.MustCompile("^/") - pattern2 = regexp.MustCompile("^([0-9]{3})$|^(default)$") -) diff --git a/ui/wasm/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.pb.go b/ui/wasm/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.pb.go deleted file mode 100644 index 8a5f302f337..00000000000 --- a/ui/wasm/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.pb.go +++ /dev/null @@ -1,7342 +0,0 @@ -// Copyright 2020 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// THIS FILE IS AUTOMATICALLY GENERATED. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.26.0 -// protoc v3.15.5 -// source: openapiv2/OpenAPIv2.proto - -package openapi_v2 - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - anypb "google.golang.org/protobuf/types/known/anypb" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type AdditionalPropertiesItem struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Oneof: - // *AdditionalPropertiesItem_Schema - // *AdditionalPropertiesItem_Boolean - Oneof isAdditionalPropertiesItem_Oneof `protobuf_oneof:"oneof"` -} - -func (x *AdditionalPropertiesItem) Reset() { - *x = AdditionalPropertiesItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AdditionalPropertiesItem) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AdditionalPropertiesItem) ProtoMessage() {} - -func (x *AdditionalPropertiesItem) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AdditionalPropertiesItem.ProtoReflect.Descriptor instead. -func (*AdditionalPropertiesItem) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{0} -} - -func (m *AdditionalPropertiesItem) GetOneof() isAdditionalPropertiesItem_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (x *AdditionalPropertiesItem) GetSchema() *Schema { - if x, ok := x.GetOneof().(*AdditionalPropertiesItem_Schema); ok { - return x.Schema - } - return nil -} - -func (x *AdditionalPropertiesItem) GetBoolean() bool { - if x, ok := x.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { - return x.Boolean - } - return false -} - -type isAdditionalPropertiesItem_Oneof interface { - isAdditionalPropertiesItem_Oneof() -} - -type AdditionalPropertiesItem_Schema struct { - Schema *Schema `protobuf:"bytes,1,opt,name=schema,proto3,oneof"` -} - -type AdditionalPropertiesItem_Boolean struct { - Boolean bool `protobuf:"varint,2,opt,name=boolean,proto3,oneof"` -} - -func (*AdditionalPropertiesItem_Schema) isAdditionalPropertiesItem_Oneof() {} - -func (*AdditionalPropertiesItem_Boolean) isAdditionalPropertiesItem_Oneof() {} - -type Any struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Value *anypb.Any `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` - Yaml string `protobuf:"bytes,2,opt,name=yaml,proto3" json:"yaml,omitempty"` -} - -func (x *Any) Reset() { - *x = Any{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Any) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Any) ProtoMessage() {} - -func (x *Any) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Any.ProtoReflect.Descriptor instead. -func (*Any) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{1} -} - -func (x *Any) GetValue() *anypb.Any { - if x != nil { - return x.Value - } - return nil -} - -func (x *Any) GetYaml() string { - if x != nil { - return x.Yaml - } - return "" -} - -type ApiKeySecurity struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - In string `protobuf:"bytes,3,opt,name=in,proto3" json:"in,omitempty"` - Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *ApiKeySecurity) Reset() { - *x = ApiKeySecurity{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ApiKeySecurity) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ApiKeySecurity) ProtoMessage() {} - -func (x *ApiKeySecurity) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ApiKeySecurity.ProtoReflect.Descriptor instead. -func (*ApiKeySecurity) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{2} -} - -func (x *ApiKeySecurity) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *ApiKeySecurity) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *ApiKeySecurity) GetIn() string { - if x != nil { - return x.In - } - return "" -} - -func (x *ApiKeySecurity) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *ApiKeySecurity) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type BasicAuthenticationSecurity struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *BasicAuthenticationSecurity) Reset() { - *x = BasicAuthenticationSecurity{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *BasicAuthenticationSecurity) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*BasicAuthenticationSecurity) ProtoMessage() {} - -func (x *BasicAuthenticationSecurity) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use BasicAuthenticationSecurity.ProtoReflect.Descriptor instead. -func (*BasicAuthenticationSecurity) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{3} -} - -func (x *BasicAuthenticationSecurity) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *BasicAuthenticationSecurity) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *BasicAuthenticationSecurity) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type BodyParameter struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` - // The name of the parameter. - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - // Determines the location of the parameter. - In string `protobuf:"bytes,3,opt,name=in,proto3" json:"in,omitempty"` - // Determines whether or not this parameter is required or optional. - Required bool `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"` - Schema *Schema `protobuf:"bytes,5,opt,name=schema,proto3" json:"schema,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *BodyParameter) Reset() { - *x = BodyParameter{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *BodyParameter) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*BodyParameter) ProtoMessage() {} - -func (x *BodyParameter) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use BodyParameter.ProtoReflect.Descriptor instead. -func (*BodyParameter) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{4} -} - -func (x *BodyParameter) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *BodyParameter) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *BodyParameter) GetIn() string { - if x != nil { - return x.In - } - return "" -} - -func (x *BodyParameter) GetRequired() bool { - if x != nil { - return x.Required - } - return false -} - -func (x *BodyParameter) GetSchema() *Schema { - if x != nil { - return x.Schema - } - return nil -} - -func (x *BodyParameter) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -// Contact information for the owners of the API. -type Contact struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The identifying name of the contact person/organization. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // The URL pointing to the contact information. - Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` - // The email address of the contact person/organization. - Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *Contact) Reset() { - *x = Contact{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Contact) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Contact) ProtoMessage() {} - -func (x *Contact) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Contact.ProtoReflect.Descriptor instead. -func (*Contact) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{5} -} - -func (x *Contact) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *Contact) GetUrl() string { - if x != nil { - return x.Url - } - return "" -} - -func (x *Contact) GetEmail() string { - if x != nil { - return x.Email - } - return "" -} - -func (x *Contact) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type Default struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *Default) Reset() { - *x = Default{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Default) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Default) ProtoMessage() {} - -func (x *Default) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Default.ProtoReflect.Descriptor instead. -func (*Default) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{6} -} - -func (x *Default) GetAdditionalProperties() []*NamedAny { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -// One or more JSON objects describing the schemas being consumed and produced by the API. -type Definitions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *Definitions) Reset() { - *x = Definitions{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Definitions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Definitions) ProtoMessage() {} - -func (x *Definitions) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Definitions.ProtoReflect.Descriptor instead. -func (*Definitions) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{7} -} - -func (x *Definitions) GetAdditionalProperties() []*NamedSchema { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -type Document struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The Swagger version of this document. - Swagger string `protobuf:"bytes,1,opt,name=swagger,proto3" json:"swagger,omitempty"` - Info *Info `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"` - // The host (name or ip) of the API. Example: 'swagger.io' - Host string `protobuf:"bytes,3,opt,name=host,proto3" json:"host,omitempty"` - // The base path to the API. Example: '/api'. - BasePath string `protobuf:"bytes,4,opt,name=base_path,json=basePath,proto3" json:"base_path,omitempty"` - // The transfer protocol of the API. - Schemes []string `protobuf:"bytes,5,rep,name=schemes,proto3" json:"schemes,omitempty"` - // A list of MIME types accepted by the API. - Consumes []string `protobuf:"bytes,6,rep,name=consumes,proto3" json:"consumes,omitempty"` - // A list of MIME types the API can produce. - Produces []string `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"` - Paths *Paths `protobuf:"bytes,8,opt,name=paths,proto3" json:"paths,omitempty"` - Definitions *Definitions `protobuf:"bytes,9,opt,name=definitions,proto3" json:"definitions,omitempty"` - Parameters *ParameterDefinitions `protobuf:"bytes,10,opt,name=parameters,proto3" json:"parameters,omitempty"` - Responses *ResponseDefinitions `protobuf:"bytes,11,opt,name=responses,proto3" json:"responses,omitempty"` - Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"` - SecurityDefinitions *SecurityDefinitions `protobuf:"bytes,13,opt,name=security_definitions,json=securityDefinitions,proto3" json:"security_definitions,omitempty"` - Tags []*Tag `protobuf:"bytes,14,rep,name=tags,proto3" json:"tags,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,15,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,16,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *Document) Reset() { - *x = Document{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Document) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Document) ProtoMessage() {} - -func (x *Document) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Document.ProtoReflect.Descriptor instead. -func (*Document) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{8} -} - -func (x *Document) GetSwagger() string { - if x != nil { - return x.Swagger - } - return "" -} - -func (x *Document) GetInfo() *Info { - if x != nil { - return x.Info - } - return nil -} - -func (x *Document) GetHost() string { - if x != nil { - return x.Host - } - return "" -} - -func (x *Document) GetBasePath() string { - if x != nil { - return x.BasePath - } - return "" -} - -func (x *Document) GetSchemes() []string { - if x != nil { - return x.Schemes - } - return nil -} - -func (x *Document) GetConsumes() []string { - if x != nil { - return x.Consumes - } - return nil -} - -func (x *Document) GetProduces() []string { - if x != nil { - return x.Produces - } - return nil -} - -func (x *Document) GetPaths() *Paths { - if x != nil { - return x.Paths - } - return nil -} - -func (x *Document) GetDefinitions() *Definitions { - if x != nil { - return x.Definitions - } - return nil -} - -func (x *Document) GetParameters() *ParameterDefinitions { - if x != nil { - return x.Parameters - } - return nil -} - -func (x *Document) GetResponses() *ResponseDefinitions { - if x != nil { - return x.Responses - } - return nil -} - -func (x *Document) GetSecurity() []*SecurityRequirement { - if x != nil { - return x.Security - } - return nil -} - -func (x *Document) GetSecurityDefinitions() *SecurityDefinitions { - if x != nil { - return x.SecurityDefinitions - } - return nil -} - -func (x *Document) GetTags() []*Tag { - if x != nil { - return x.Tags - } - return nil -} - -func (x *Document) GetExternalDocs() *ExternalDocs { - if x != nil { - return x.ExternalDocs - } - return nil -} - -func (x *Document) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type Examples struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *Examples) Reset() { - *x = Examples{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Examples) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Examples) ProtoMessage() {} - -func (x *Examples) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Examples.ProtoReflect.Descriptor instead. -func (*Examples) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{9} -} - -func (x *Examples) GetAdditionalProperties() []*NamedAny { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -// information about external documentation -type ExternalDocs struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` - Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *ExternalDocs) Reset() { - *x = ExternalDocs{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ExternalDocs) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ExternalDocs) ProtoMessage() {} - -func (x *ExternalDocs) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ExternalDocs.ProtoReflect.Descriptor instead. -func (*ExternalDocs) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{10} -} - -func (x *ExternalDocs) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *ExternalDocs) GetUrl() string { - if x != nil { - return x.Url - } - return "" -} - -func (x *ExternalDocs) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -// A deterministic version of a JSON Schema object. -type FileSchema struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Format string `protobuf:"bytes,1,opt,name=format,proto3" json:"format,omitempty"` - Title string `protobuf:"bytes,2,opt,name=title,proto3" json:"title,omitempty"` - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - Default *Any `protobuf:"bytes,4,opt,name=default,proto3" json:"default,omitempty"` - Required []string `protobuf:"bytes,5,rep,name=required,proto3" json:"required,omitempty"` - Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"` - ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,8,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` - Example *Any `protobuf:"bytes,9,opt,name=example,proto3" json:"example,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *FileSchema) Reset() { - *x = FileSchema{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *FileSchema) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FileSchema) ProtoMessage() {} - -func (x *FileSchema) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FileSchema.ProtoReflect.Descriptor instead. -func (*FileSchema) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{11} -} - -func (x *FileSchema) GetFormat() string { - if x != nil { - return x.Format - } - return "" -} - -func (x *FileSchema) GetTitle() string { - if x != nil { - return x.Title - } - return "" -} - -func (x *FileSchema) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *FileSchema) GetDefault() *Any { - if x != nil { - return x.Default - } - return nil -} - -func (x *FileSchema) GetRequired() []string { - if x != nil { - return x.Required - } - return nil -} - -func (x *FileSchema) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *FileSchema) GetReadOnly() bool { - if x != nil { - return x.ReadOnly - } - return false -} - -func (x *FileSchema) GetExternalDocs() *ExternalDocs { - if x != nil { - return x.ExternalDocs - } - return nil -} - -func (x *FileSchema) GetExample() *Any { - if x != nil { - return x.Example - } - return nil -} - -func (x *FileSchema) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type FormDataParameterSubSchema struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Determines whether or not this parameter is required or optional. - Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` - // Determines the location of the parameter. - In string `protobuf:"bytes,2,opt,name=in,proto3" json:"in,omitempty"` - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - // The name of the parameter. - Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` - // allows sending a parameter by name only or with an empty value. - AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue,proto3" json:"allow_empty_value,omitempty"` - Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"` - Format string `protobuf:"bytes,7,opt,name=format,proto3" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items,proto3" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,10,opt,name=default,proto3" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,21,rep,name=enum,proto3" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *FormDataParameterSubSchema) Reset() { - *x = FormDataParameterSubSchema{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *FormDataParameterSubSchema) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FormDataParameterSubSchema) ProtoMessage() {} - -func (x *FormDataParameterSubSchema) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FormDataParameterSubSchema.ProtoReflect.Descriptor instead. -func (*FormDataParameterSubSchema) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{12} -} - -func (x *FormDataParameterSubSchema) GetRequired() bool { - if x != nil { - return x.Required - } - return false -} - -func (x *FormDataParameterSubSchema) GetIn() string { - if x != nil { - return x.In - } - return "" -} - -func (x *FormDataParameterSubSchema) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *FormDataParameterSubSchema) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *FormDataParameterSubSchema) GetAllowEmptyValue() bool { - if x != nil { - return x.AllowEmptyValue - } - return false -} - -func (x *FormDataParameterSubSchema) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *FormDataParameterSubSchema) GetFormat() string { - if x != nil { - return x.Format - } - return "" -} - -func (x *FormDataParameterSubSchema) GetItems() *PrimitivesItems { - if x != nil { - return x.Items - } - return nil -} - -func (x *FormDataParameterSubSchema) GetCollectionFormat() string { - if x != nil { - return x.CollectionFormat - } - return "" -} - -func (x *FormDataParameterSubSchema) GetDefault() *Any { - if x != nil { - return x.Default - } - return nil -} - -func (x *FormDataParameterSubSchema) GetMaximum() float64 { - if x != nil { - return x.Maximum - } - return 0 -} - -func (x *FormDataParameterSubSchema) GetExclusiveMaximum() bool { - if x != nil { - return x.ExclusiveMaximum - } - return false -} - -func (x *FormDataParameterSubSchema) GetMinimum() float64 { - if x != nil { - return x.Minimum - } - return 0 -} - -func (x *FormDataParameterSubSchema) GetExclusiveMinimum() bool { - if x != nil { - return x.ExclusiveMinimum - } - return false -} - -func (x *FormDataParameterSubSchema) GetMaxLength() int64 { - if x != nil { - return x.MaxLength - } - return 0 -} - -func (x *FormDataParameterSubSchema) GetMinLength() int64 { - if x != nil { - return x.MinLength - } - return 0 -} - -func (x *FormDataParameterSubSchema) GetPattern() string { - if x != nil { - return x.Pattern - } - return "" -} - -func (x *FormDataParameterSubSchema) GetMaxItems() int64 { - if x != nil { - return x.MaxItems - } - return 0 -} - -func (x *FormDataParameterSubSchema) GetMinItems() int64 { - if x != nil { - return x.MinItems - } - return 0 -} - -func (x *FormDataParameterSubSchema) GetUniqueItems() bool { - if x != nil { - return x.UniqueItems - } - return false -} - -func (x *FormDataParameterSubSchema) GetEnum() []*Any { - if x != nil { - return x.Enum - } - return nil -} - -func (x *FormDataParameterSubSchema) GetMultipleOf() float64 { - if x != nil { - return x.MultipleOf - } - return 0 -} - -func (x *FormDataParameterSubSchema) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type Header struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items,proto3" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,5,opt,name=default,proto3" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,6,opt,name=maximum,proto3" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,8,opt,name=minimum,proto3" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,12,opt,name=pattern,proto3" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,16,rep,name=enum,proto3" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` - Description string `protobuf:"bytes,18,opt,name=description,proto3" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,19,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *Header) Reset() { - *x = Header{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Header) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Header) ProtoMessage() {} - -func (x *Header) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Header.ProtoReflect.Descriptor instead. -func (*Header) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{13} -} - -func (x *Header) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *Header) GetFormat() string { - if x != nil { - return x.Format - } - return "" -} - -func (x *Header) GetItems() *PrimitivesItems { - if x != nil { - return x.Items - } - return nil -} - -func (x *Header) GetCollectionFormat() string { - if x != nil { - return x.CollectionFormat - } - return "" -} - -func (x *Header) GetDefault() *Any { - if x != nil { - return x.Default - } - return nil -} - -func (x *Header) GetMaximum() float64 { - if x != nil { - return x.Maximum - } - return 0 -} - -func (x *Header) GetExclusiveMaximum() bool { - if x != nil { - return x.ExclusiveMaximum - } - return false -} - -func (x *Header) GetMinimum() float64 { - if x != nil { - return x.Minimum - } - return 0 -} - -func (x *Header) GetExclusiveMinimum() bool { - if x != nil { - return x.ExclusiveMinimum - } - return false -} - -func (x *Header) GetMaxLength() int64 { - if x != nil { - return x.MaxLength - } - return 0 -} - -func (x *Header) GetMinLength() int64 { - if x != nil { - return x.MinLength - } - return 0 -} - -func (x *Header) GetPattern() string { - if x != nil { - return x.Pattern - } - return "" -} - -func (x *Header) GetMaxItems() int64 { - if x != nil { - return x.MaxItems - } - return 0 -} - -func (x *Header) GetMinItems() int64 { - if x != nil { - return x.MinItems - } - return 0 -} - -func (x *Header) GetUniqueItems() bool { - if x != nil { - return x.UniqueItems - } - return false -} - -func (x *Header) GetEnum() []*Any { - if x != nil { - return x.Enum - } - return nil -} - -func (x *Header) GetMultipleOf() float64 { - if x != nil { - return x.MultipleOf - } - return 0 -} - -func (x *Header) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *Header) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type HeaderParameterSubSchema struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Determines whether or not this parameter is required or optional. - Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` - // Determines the location of the parameter. - In string `protobuf:"bytes,2,opt,name=in,proto3" json:"in,omitempty"` - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - // The name of the parameter. - Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` - Type string `protobuf:"bytes,5,opt,name=type,proto3" json:"type,omitempty"` - Format string `protobuf:"bytes,6,opt,name=format,proto3" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items,proto3" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,9,opt,name=default,proto3" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,10,opt,name=maximum,proto3" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,12,opt,name=minimum,proto3" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,16,opt,name=pattern,proto3" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,20,rep,name=enum,proto3" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *HeaderParameterSubSchema) Reset() { - *x = HeaderParameterSubSchema{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HeaderParameterSubSchema) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HeaderParameterSubSchema) ProtoMessage() {} - -func (x *HeaderParameterSubSchema) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HeaderParameterSubSchema.ProtoReflect.Descriptor instead. -func (*HeaderParameterSubSchema) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{14} -} - -func (x *HeaderParameterSubSchema) GetRequired() bool { - if x != nil { - return x.Required - } - return false -} - -func (x *HeaderParameterSubSchema) GetIn() string { - if x != nil { - return x.In - } - return "" -} - -func (x *HeaderParameterSubSchema) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *HeaderParameterSubSchema) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *HeaderParameterSubSchema) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *HeaderParameterSubSchema) GetFormat() string { - if x != nil { - return x.Format - } - return "" -} - -func (x *HeaderParameterSubSchema) GetItems() *PrimitivesItems { - if x != nil { - return x.Items - } - return nil -} - -func (x *HeaderParameterSubSchema) GetCollectionFormat() string { - if x != nil { - return x.CollectionFormat - } - return "" -} - -func (x *HeaderParameterSubSchema) GetDefault() *Any { - if x != nil { - return x.Default - } - return nil -} - -func (x *HeaderParameterSubSchema) GetMaximum() float64 { - if x != nil { - return x.Maximum - } - return 0 -} - -func (x *HeaderParameterSubSchema) GetExclusiveMaximum() bool { - if x != nil { - return x.ExclusiveMaximum - } - return false -} - -func (x *HeaderParameterSubSchema) GetMinimum() float64 { - if x != nil { - return x.Minimum - } - return 0 -} - -func (x *HeaderParameterSubSchema) GetExclusiveMinimum() bool { - if x != nil { - return x.ExclusiveMinimum - } - return false -} - -func (x *HeaderParameterSubSchema) GetMaxLength() int64 { - if x != nil { - return x.MaxLength - } - return 0 -} - -func (x *HeaderParameterSubSchema) GetMinLength() int64 { - if x != nil { - return x.MinLength - } - return 0 -} - -func (x *HeaderParameterSubSchema) GetPattern() string { - if x != nil { - return x.Pattern - } - return "" -} - -func (x *HeaderParameterSubSchema) GetMaxItems() int64 { - if x != nil { - return x.MaxItems - } - return 0 -} - -func (x *HeaderParameterSubSchema) GetMinItems() int64 { - if x != nil { - return x.MinItems - } - return 0 -} - -func (x *HeaderParameterSubSchema) GetUniqueItems() bool { - if x != nil { - return x.UniqueItems - } - return false -} - -func (x *HeaderParameterSubSchema) GetEnum() []*Any { - if x != nil { - return x.Enum - } - return nil -} - -func (x *HeaderParameterSubSchema) GetMultipleOf() float64 { - if x != nil { - return x.MultipleOf - } - return 0 -} - -func (x *HeaderParameterSubSchema) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type Headers struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedHeader `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *Headers) Reset() { - *x = Headers{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Headers) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Headers) ProtoMessage() {} - -func (x *Headers) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Headers.ProtoReflect.Descriptor instead. -func (*Headers) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{15} -} - -func (x *Headers) GetAdditionalProperties() []*NamedHeader { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -// General information about the API. -type Info struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // A unique and precise title of the API. - Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"` - // A semantic version number of the API. - Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` - // A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - // The terms of service for the API. - TermsOfService string `protobuf:"bytes,4,opt,name=terms_of_service,json=termsOfService,proto3" json:"terms_of_service,omitempty"` - Contact *Contact `protobuf:"bytes,5,opt,name=contact,proto3" json:"contact,omitempty"` - License *License `protobuf:"bytes,6,opt,name=license,proto3" json:"license,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *Info) Reset() { - *x = Info{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Info) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Info) ProtoMessage() {} - -func (x *Info) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Info.ProtoReflect.Descriptor instead. -func (*Info) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{16} -} - -func (x *Info) GetTitle() string { - if x != nil { - return x.Title - } - return "" -} - -func (x *Info) GetVersion() string { - if x != nil { - return x.Version - } - return "" -} - -func (x *Info) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *Info) GetTermsOfService() string { - if x != nil { - return x.TermsOfService - } - return "" -} - -func (x *Info) GetContact() *Contact { - if x != nil { - return x.Contact - } - return nil -} - -func (x *Info) GetLicense() *License { - if x != nil { - return x.License - } - return nil -} - -func (x *Info) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type ItemsItem struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Schema []*Schema `protobuf:"bytes,1,rep,name=schema,proto3" json:"schema,omitempty"` -} - -func (x *ItemsItem) Reset() { - *x = ItemsItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ItemsItem) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ItemsItem) ProtoMessage() {} - -func (x *ItemsItem) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ItemsItem.ProtoReflect.Descriptor instead. -func (*ItemsItem) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{17} -} - -func (x *ItemsItem) GetSchema() []*Schema { - if x != nil { - return x.Schema - } - return nil -} - -type JsonReference struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` -} - -func (x *JsonReference) Reset() { - *x = JsonReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *JsonReference) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*JsonReference) ProtoMessage() {} - -func (x *JsonReference) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use JsonReference.ProtoReflect.Descriptor instead. -func (*JsonReference) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{18} -} - -func (x *JsonReference) GetXRef() string { - if x != nil { - return x.XRef - } - return "" -} - -func (x *JsonReference) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -type License struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The name of the license type. It's encouraged to use an OSI compatible license. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // The URL pointing to the license. - Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *License) Reset() { - *x = License{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *License) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*License) ProtoMessage() {} - -func (x *License) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use License.ProtoReflect.Descriptor instead. -func (*License) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{19} -} - -func (x *License) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *License) GetUrl() string { - if x != nil { - return x.Url - } - return "" -} - -func (x *License) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs. -type NamedAny struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *Any `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *NamedAny) Reset() { - *x = NamedAny{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NamedAny) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NamedAny) ProtoMessage() {} - -func (x *NamedAny) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NamedAny.ProtoReflect.Descriptor instead. -func (*NamedAny) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{20} -} - -func (x *NamedAny) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NamedAny) GetValue() *Any { - if x != nil { - return x.Value - } - return nil -} - -// Automatically-generated message used to represent maps of Header as ordered (name,value) pairs. -type NamedHeader struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *Header `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *NamedHeader) Reset() { - *x = NamedHeader{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NamedHeader) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NamedHeader) ProtoMessage() {} - -func (x *NamedHeader) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NamedHeader.ProtoReflect.Descriptor instead. -func (*NamedHeader) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{21} -} - -func (x *NamedHeader) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NamedHeader) GetValue() *Header { - if x != nil { - return x.Value - } - return nil -} - -// Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs. -type NamedParameter struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *Parameter `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *NamedParameter) Reset() { - *x = NamedParameter{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NamedParameter) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NamedParameter) ProtoMessage() {} - -func (x *NamedParameter) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NamedParameter.ProtoReflect.Descriptor instead. -func (*NamedParameter) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{22} -} - -func (x *NamedParameter) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NamedParameter) GetValue() *Parameter { - if x != nil { - return x.Value - } - return nil -} - -// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs. -type NamedPathItem struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *PathItem `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *NamedPathItem) Reset() { - *x = NamedPathItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NamedPathItem) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NamedPathItem) ProtoMessage() {} - -func (x *NamedPathItem) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NamedPathItem.ProtoReflect.Descriptor instead. -func (*NamedPathItem) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{23} -} - -func (x *NamedPathItem) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NamedPathItem) GetValue() *PathItem { - if x != nil { - return x.Value - } - return nil -} - -// Automatically-generated message used to represent maps of Response as ordered (name,value) pairs. -type NamedResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *Response `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *NamedResponse) Reset() { - *x = NamedResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NamedResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NamedResponse) ProtoMessage() {} - -func (x *NamedResponse) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NamedResponse.ProtoReflect.Descriptor instead. -func (*NamedResponse) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{24} -} - -func (x *NamedResponse) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NamedResponse) GetValue() *Response { - if x != nil { - return x.Value - } - return nil -} - -// Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs. -type NamedResponseValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *ResponseValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *NamedResponseValue) Reset() { - *x = NamedResponseValue{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NamedResponseValue) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NamedResponseValue) ProtoMessage() {} - -func (x *NamedResponseValue) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NamedResponseValue.ProtoReflect.Descriptor instead. -func (*NamedResponseValue) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{25} -} - -func (x *NamedResponseValue) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NamedResponseValue) GetValue() *ResponseValue { - if x != nil { - return x.Value - } - return nil -} - -// Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs. -type NamedSchema struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *Schema `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *NamedSchema) Reset() { - *x = NamedSchema{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NamedSchema) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NamedSchema) ProtoMessage() {} - -func (x *NamedSchema) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[26] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NamedSchema.ProtoReflect.Descriptor instead. -func (*NamedSchema) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{26} -} - -func (x *NamedSchema) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NamedSchema) GetValue() *Schema { - if x != nil { - return x.Value - } - return nil -} - -// Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs. -type NamedSecurityDefinitionsItem struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *SecurityDefinitionsItem `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *NamedSecurityDefinitionsItem) Reset() { - *x = NamedSecurityDefinitionsItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NamedSecurityDefinitionsItem) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NamedSecurityDefinitionsItem) ProtoMessage() {} - -func (x *NamedSecurityDefinitionsItem) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[27] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NamedSecurityDefinitionsItem.ProtoReflect.Descriptor instead. -func (*NamedSecurityDefinitionsItem) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{27} -} - -func (x *NamedSecurityDefinitionsItem) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NamedSecurityDefinitionsItem) GetValue() *SecurityDefinitionsItem { - if x != nil { - return x.Value - } - return nil -} - -// Automatically-generated message used to represent maps of string as ordered (name,value) pairs. -type NamedString struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *NamedString) Reset() { - *x = NamedString{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NamedString) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NamedString) ProtoMessage() {} - -func (x *NamedString) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[28] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NamedString.ProtoReflect.Descriptor instead. -func (*NamedString) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{28} -} - -func (x *NamedString) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NamedString) GetValue() string { - if x != nil { - return x.Value - } - return "" -} - -// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs. -type NamedStringArray struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *StringArray `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *NamedStringArray) Reset() { - *x = NamedStringArray{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NamedStringArray) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NamedStringArray) ProtoMessage() {} - -func (x *NamedStringArray) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[29] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NamedStringArray.ProtoReflect.Descriptor instead. -func (*NamedStringArray) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{29} -} - -func (x *NamedStringArray) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NamedStringArray) GetValue() *StringArray { - if x != nil { - return x.Value - } - return nil -} - -type NonBodyParameter struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Oneof: - // *NonBodyParameter_HeaderParameterSubSchema - // *NonBodyParameter_FormDataParameterSubSchema - // *NonBodyParameter_QueryParameterSubSchema - // *NonBodyParameter_PathParameterSubSchema - Oneof isNonBodyParameter_Oneof `protobuf_oneof:"oneof"` -} - -func (x *NonBodyParameter) Reset() { - *x = NonBodyParameter{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NonBodyParameter) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NonBodyParameter) ProtoMessage() {} - -func (x *NonBodyParameter) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[30] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NonBodyParameter.ProtoReflect.Descriptor instead. -func (*NonBodyParameter) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{30} -} - -func (m *NonBodyParameter) GetOneof() isNonBodyParameter_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (x *NonBodyParameter) GetHeaderParameterSubSchema() *HeaderParameterSubSchema { - if x, ok := x.GetOneof().(*NonBodyParameter_HeaderParameterSubSchema); ok { - return x.HeaderParameterSubSchema - } - return nil -} - -func (x *NonBodyParameter) GetFormDataParameterSubSchema() *FormDataParameterSubSchema { - if x, ok := x.GetOneof().(*NonBodyParameter_FormDataParameterSubSchema); ok { - return x.FormDataParameterSubSchema - } - return nil -} - -func (x *NonBodyParameter) GetQueryParameterSubSchema() *QueryParameterSubSchema { - if x, ok := x.GetOneof().(*NonBodyParameter_QueryParameterSubSchema); ok { - return x.QueryParameterSubSchema - } - return nil -} - -func (x *NonBodyParameter) GetPathParameterSubSchema() *PathParameterSubSchema { - if x, ok := x.GetOneof().(*NonBodyParameter_PathParameterSubSchema); ok { - return x.PathParameterSubSchema - } - return nil -} - -type isNonBodyParameter_Oneof interface { - isNonBodyParameter_Oneof() -} - -type NonBodyParameter_HeaderParameterSubSchema struct { - HeaderParameterSubSchema *HeaderParameterSubSchema `protobuf:"bytes,1,opt,name=header_parameter_sub_schema,json=headerParameterSubSchema,proto3,oneof"` -} - -type NonBodyParameter_FormDataParameterSubSchema struct { - FormDataParameterSubSchema *FormDataParameterSubSchema `protobuf:"bytes,2,opt,name=form_data_parameter_sub_schema,json=formDataParameterSubSchema,proto3,oneof"` -} - -type NonBodyParameter_QueryParameterSubSchema struct { - QueryParameterSubSchema *QueryParameterSubSchema `protobuf:"bytes,3,opt,name=query_parameter_sub_schema,json=queryParameterSubSchema,proto3,oneof"` -} - -type NonBodyParameter_PathParameterSubSchema struct { - PathParameterSubSchema *PathParameterSubSchema `protobuf:"bytes,4,opt,name=path_parameter_sub_schema,json=pathParameterSubSchema,proto3,oneof"` -} - -func (*NonBodyParameter_HeaderParameterSubSchema) isNonBodyParameter_Oneof() {} - -func (*NonBodyParameter_FormDataParameterSubSchema) isNonBodyParameter_Oneof() {} - -func (*NonBodyParameter_QueryParameterSubSchema) isNonBodyParameter_Oneof() {} - -func (*NonBodyParameter_PathParameterSubSchema) isNonBodyParameter_Oneof() {} - -type Oauth2AccessCodeSecurity struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` - Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` - AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"` - TokenUrl string `protobuf:"bytes,5,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` - Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *Oauth2AccessCodeSecurity) Reset() { - *x = Oauth2AccessCodeSecurity{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[31] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Oauth2AccessCodeSecurity) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Oauth2AccessCodeSecurity) ProtoMessage() {} - -func (x *Oauth2AccessCodeSecurity) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[31] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Oauth2AccessCodeSecurity.ProtoReflect.Descriptor instead. -func (*Oauth2AccessCodeSecurity) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{31} -} - -func (x *Oauth2AccessCodeSecurity) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *Oauth2AccessCodeSecurity) GetFlow() string { - if x != nil { - return x.Flow - } - return "" -} - -func (x *Oauth2AccessCodeSecurity) GetScopes() *Oauth2Scopes { - if x != nil { - return x.Scopes - } - return nil -} - -func (x *Oauth2AccessCodeSecurity) GetAuthorizationUrl() string { - if x != nil { - return x.AuthorizationUrl - } - return "" -} - -func (x *Oauth2AccessCodeSecurity) GetTokenUrl() string { - if x != nil { - return x.TokenUrl - } - return "" -} - -func (x *Oauth2AccessCodeSecurity) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *Oauth2AccessCodeSecurity) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type Oauth2ApplicationSecurity struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` - Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` - TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` - Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *Oauth2ApplicationSecurity) Reset() { - *x = Oauth2ApplicationSecurity{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[32] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Oauth2ApplicationSecurity) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Oauth2ApplicationSecurity) ProtoMessage() {} - -func (x *Oauth2ApplicationSecurity) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[32] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Oauth2ApplicationSecurity.ProtoReflect.Descriptor instead. -func (*Oauth2ApplicationSecurity) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{32} -} - -func (x *Oauth2ApplicationSecurity) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *Oauth2ApplicationSecurity) GetFlow() string { - if x != nil { - return x.Flow - } - return "" -} - -func (x *Oauth2ApplicationSecurity) GetScopes() *Oauth2Scopes { - if x != nil { - return x.Scopes - } - return nil -} - -func (x *Oauth2ApplicationSecurity) GetTokenUrl() string { - if x != nil { - return x.TokenUrl - } - return "" -} - -func (x *Oauth2ApplicationSecurity) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *Oauth2ApplicationSecurity) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type Oauth2ImplicitSecurity struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` - Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` - AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"` - Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *Oauth2ImplicitSecurity) Reset() { - *x = Oauth2ImplicitSecurity{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[33] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Oauth2ImplicitSecurity) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Oauth2ImplicitSecurity) ProtoMessage() {} - -func (x *Oauth2ImplicitSecurity) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[33] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Oauth2ImplicitSecurity.ProtoReflect.Descriptor instead. -func (*Oauth2ImplicitSecurity) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{33} -} - -func (x *Oauth2ImplicitSecurity) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *Oauth2ImplicitSecurity) GetFlow() string { - if x != nil { - return x.Flow - } - return "" -} - -func (x *Oauth2ImplicitSecurity) GetScopes() *Oauth2Scopes { - if x != nil { - return x.Scopes - } - return nil -} - -func (x *Oauth2ImplicitSecurity) GetAuthorizationUrl() string { - if x != nil { - return x.AuthorizationUrl - } - return "" -} - -func (x *Oauth2ImplicitSecurity) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *Oauth2ImplicitSecurity) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type Oauth2PasswordSecurity struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` - Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` - TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` - Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *Oauth2PasswordSecurity) Reset() { - *x = Oauth2PasswordSecurity{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[34] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Oauth2PasswordSecurity) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Oauth2PasswordSecurity) ProtoMessage() {} - -func (x *Oauth2PasswordSecurity) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[34] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Oauth2PasswordSecurity.ProtoReflect.Descriptor instead. -func (*Oauth2PasswordSecurity) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{34} -} - -func (x *Oauth2PasswordSecurity) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *Oauth2PasswordSecurity) GetFlow() string { - if x != nil { - return x.Flow - } - return "" -} - -func (x *Oauth2PasswordSecurity) GetScopes() *Oauth2Scopes { - if x != nil { - return x.Scopes - } - return nil -} - -func (x *Oauth2PasswordSecurity) GetTokenUrl() string { - if x != nil { - return x.TokenUrl - } - return "" -} - -func (x *Oauth2PasswordSecurity) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *Oauth2PasswordSecurity) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type Oauth2Scopes struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedString `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *Oauth2Scopes) Reset() { - *x = Oauth2Scopes{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[35] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Oauth2Scopes) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Oauth2Scopes) ProtoMessage() {} - -func (x *Oauth2Scopes) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[35] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Oauth2Scopes.ProtoReflect.Descriptor instead. -func (*Oauth2Scopes) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{35} -} - -func (x *Oauth2Scopes) GetAdditionalProperties() []*NamedString { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -type Operation struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Tags []string `protobuf:"bytes,1,rep,name=tags,proto3" json:"tags,omitempty"` - // A brief summary of the operation. - Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"` - // A longer description of the operation, GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,4,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` - // A unique identifier of the operation. - OperationId string `protobuf:"bytes,5,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"` - // A list of MIME types the API can produce. - Produces []string `protobuf:"bytes,6,rep,name=produces,proto3" json:"produces,omitempty"` - // A list of MIME types the API can consume. - Consumes []string `protobuf:"bytes,7,rep,name=consumes,proto3" json:"consumes,omitempty"` - // The parameters needed to send a valid API call. - Parameters []*ParametersItem `protobuf:"bytes,8,rep,name=parameters,proto3" json:"parameters,omitempty"` - Responses *Responses `protobuf:"bytes,9,opt,name=responses,proto3" json:"responses,omitempty"` - // The transfer protocol of the API. - Schemes []string `protobuf:"bytes,10,rep,name=schemes,proto3" json:"schemes,omitempty"` - Deprecated bool `protobuf:"varint,11,opt,name=deprecated,proto3" json:"deprecated,omitempty"` - Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,13,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *Operation) Reset() { - *x = Operation{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[36] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Operation) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Operation) ProtoMessage() {} - -func (x *Operation) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[36] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Operation.ProtoReflect.Descriptor instead. -func (*Operation) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{36} -} - -func (x *Operation) GetTags() []string { - if x != nil { - return x.Tags - } - return nil -} - -func (x *Operation) GetSummary() string { - if x != nil { - return x.Summary - } - return "" -} - -func (x *Operation) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *Operation) GetExternalDocs() *ExternalDocs { - if x != nil { - return x.ExternalDocs - } - return nil -} - -func (x *Operation) GetOperationId() string { - if x != nil { - return x.OperationId - } - return "" -} - -func (x *Operation) GetProduces() []string { - if x != nil { - return x.Produces - } - return nil -} - -func (x *Operation) GetConsumes() []string { - if x != nil { - return x.Consumes - } - return nil -} - -func (x *Operation) GetParameters() []*ParametersItem { - if x != nil { - return x.Parameters - } - return nil -} - -func (x *Operation) GetResponses() *Responses { - if x != nil { - return x.Responses - } - return nil -} - -func (x *Operation) GetSchemes() []string { - if x != nil { - return x.Schemes - } - return nil -} - -func (x *Operation) GetDeprecated() bool { - if x != nil { - return x.Deprecated - } - return false -} - -func (x *Operation) GetSecurity() []*SecurityRequirement { - if x != nil { - return x.Security - } - return nil -} - -func (x *Operation) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type Parameter struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Oneof: - // *Parameter_BodyParameter - // *Parameter_NonBodyParameter - Oneof isParameter_Oneof `protobuf_oneof:"oneof"` -} - -func (x *Parameter) Reset() { - *x = Parameter{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[37] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Parameter) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Parameter) ProtoMessage() {} - -func (x *Parameter) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[37] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Parameter.ProtoReflect.Descriptor instead. -func (*Parameter) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{37} -} - -func (m *Parameter) GetOneof() isParameter_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (x *Parameter) GetBodyParameter() *BodyParameter { - if x, ok := x.GetOneof().(*Parameter_BodyParameter); ok { - return x.BodyParameter - } - return nil -} - -func (x *Parameter) GetNonBodyParameter() *NonBodyParameter { - if x, ok := x.GetOneof().(*Parameter_NonBodyParameter); ok { - return x.NonBodyParameter - } - return nil -} - -type isParameter_Oneof interface { - isParameter_Oneof() -} - -type Parameter_BodyParameter struct { - BodyParameter *BodyParameter `protobuf:"bytes,1,opt,name=body_parameter,json=bodyParameter,proto3,oneof"` -} - -type Parameter_NonBodyParameter struct { - NonBodyParameter *NonBodyParameter `protobuf:"bytes,2,opt,name=non_body_parameter,json=nonBodyParameter,proto3,oneof"` -} - -func (*Parameter_BodyParameter) isParameter_Oneof() {} - -func (*Parameter_NonBodyParameter) isParameter_Oneof() {} - -// One or more JSON representations for parameters -type ParameterDefinitions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedParameter `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *ParameterDefinitions) Reset() { - *x = ParameterDefinitions{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[38] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ParameterDefinitions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ParameterDefinitions) ProtoMessage() {} - -func (x *ParameterDefinitions) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[38] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ParameterDefinitions.ProtoReflect.Descriptor instead. -func (*ParameterDefinitions) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{38} -} - -func (x *ParameterDefinitions) GetAdditionalProperties() []*NamedParameter { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -type ParametersItem struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Oneof: - // *ParametersItem_Parameter - // *ParametersItem_JsonReference - Oneof isParametersItem_Oneof `protobuf_oneof:"oneof"` -} - -func (x *ParametersItem) Reset() { - *x = ParametersItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[39] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ParametersItem) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ParametersItem) ProtoMessage() {} - -func (x *ParametersItem) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[39] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ParametersItem.ProtoReflect.Descriptor instead. -func (*ParametersItem) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{39} -} - -func (m *ParametersItem) GetOneof() isParametersItem_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (x *ParametersItem) GetParameter() *Parameter { - if x, ok := x.GetOneof().(*ParametersItem_Parameter); ok { - return x.Parameter - } - return nil -} - -func (x *ParametersItem) GetJsonReference() *JsonReference { - if x, ok := x.GetOneof().(*ParametersItem_JsonReference); ok { - return x.JsonReference - } - return nil -} - -type isParametersItem_Oneof interface { - isParametersItem_Oneof() -} - -type ParametersItem_Parameter struct { - Parameter *Parameter `protobuf:"bytes,1,opt,name=parameter,proto3,oneof"` -} - -type ParametersItem_JsonReference struct { - JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,proto3,oneof"` -} - -func (*ParametersItem_Parameter) isParametersItem_Oneof() {} - -func (*ParametersItem_JsonReference) isParametersItem_Oneof() {} - -type PathItem struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"` - Get *Operation `protobuf:"bytes,2,opt,name=get,proto3" json:"get,omitempty"` - Put *Operation `protobuf:"bytes,3,opt,name=put,proto3" json:"put,omitempty"` - Post *Operation `protobuf:"bytes,4,opt,name=post,proto3" json:"post,omitempty"` - Delete *Operation `protobuf:"bytes,5,opt,name=delete,proto3" json:"delete,omitempty"` - Options *Operation `protobuf:"bytes,6,opt,name=options,proto3" json:"options,omitempty"` - Head *Operation `protobuf:"bytes,7,opt,name=head,proto3" json:"head,omitempty"` - Patch *Operation `protobuf:"bytes,8,opt,name=patch,proto3" json:"patch,omitempty"` - // The parameters needed to send a valid API call. - Parameters []*ParametersItem `protobuf:"bytes,9,rep,name=parameters,proto3" json:"parameters,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *PathItem) Reset() { - *x = PathItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[40] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PathItem) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PathItem) ProtoMessage() {} - -func (x *PathItem) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[40] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PathItem.ProtoReflect.Descriptor instead. -func (*PathItem) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{40} -} - -func (x *PathItem) GetXRef() string { - if x != nil { - return x.XRef - } - return "" -} - -func (x *PathItem) GetGet() *Operation { - if x != nil { - return x.Get - } - return nil -} - -func (x *PathItem) GetPut() *Operation { - if x != nil { - return x.Put - } - return nil -} - -func (x *PathItem) GetPost() *Operation { - if x != nil { - return x.Post - } - return nil -} - -func (x *PathItem) GetDelete() *Operation { - if x != nil { - return x.Delete - } - return nil -} - -func (x *PathItem) GetOptions() *Operation { - if x != nil { - return x.Options - } - return nil -} - -func (x *PathItem) GetHead() *Operation { - if x != nil { - return x.Head - } - return nil -} - -func (x *PathItem) GetPatch() *Operation { - if x != nil { - return x.Patch - } - return nil -} - -func (x *PathItem) GetParameters() []*ParametersItem { - if x != nil { - return x.Parameters - } - return nil -} - -func (x *PathItem) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type PathParameterSubSchema struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Determines whether or not this parameter is required or optional. - Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` - // Determines the location of the parameter. - In string `protobuf:"bytes,2,opt,name=in,proto3" json:"in,omitempty"` - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - // The name of the parameter. - Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` - Type string `protobuf:"bytes,5,opt,name=type,proto3" json:"type,omitempty"` - Format string `protobuf:"bytes,6,opt,name=format,proto3" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items,proto3" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,9,opt,name=default,proto3" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,10,opt,name=maximum,proto3" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,12,opt,name=minimum,proto3" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,16,opt,name=pattern,proto3" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,20,rep,name=enum,proto3" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *PathParameterSubSchema) Reset() { - *x = PathParameterSubSchema{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[41] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PathParameterSubSchema) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PathParameterSubSchema) ProtoMessage() {} - -func (x *PathParameterSubSchema) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[41] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PathParameterSubSchema.ProtoReflect.Descriptor instead. -func (*PathParameterSubSchema) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{41} -} - -func (x *PathParameterSubSchema) GetRequired() bool { - if x != nil { - return x.Required - } - return false -} - -func (x *PathParameterSubSchema) GetIn() string { - if x != nil { - return x.In - } - return "" -} - -func (x *PathParameterSubSchema) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *PathParameterSubSchema) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *PathParameterSubSchema) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *PathParameterSubSchema) GetFormat() string { - if x != nil { - return x.Format - } - return "" -} - -func (x *PathParameterSubSchema) GetItems() *PrimitivesItems { - if x != nil { - return x.Items - } - return nil -} - -func (x *PathParameterSubSchema) GetCollectionFormat() string { - if x != nil { - return x.CollectionFormat - } - return "" -} - -func (x *PathParameterSubSchema) GetDefault() *Any { - if x != nil { - return x.Default - } - return nil -} - -func (x *PathParameterSubSchema) GetMaximum() float64 { - if x != nil { - return x.Maximum - } - return 0 -} - -func (x *PathParameterSubSchema) GetExclusiveMaximum() bool { - if x != nil { - return x.ExclusiveMaximum - } - return false -} - -func (x *PathParameterSubSchema) GetMinimum() float64 { - if x != nil { - return x.Minimum - } - return 0 -} - -func (x *PathParameterSubSchema) GetExclusiveMinimum() bool { - if x != nil { - return x.ExclusiveMinimum - } - return false -} - -func (x *PathParameterSubSchema) GetMaxLength() int64 { - if x != nil { - return x.MaxLength - } - return 0 -} - -func (x *PathParameterSubSchema) GetMinLength() int64 { - if x != nil { - return x.MinLength - } - return 0 -} - -func (x *PathParameterSubSchema) GetPattern() string { - if x != nil { - return x.Pattern - } - return "" -} - -func (x *PathParameterSubSchema) GetMaxItems() int64 { - if x != nil { - return x.MaxItems - } - return 0 -} - -func (x *PathParameterSubSchema) GetMinItems() int64 { - if x != nil { - return x.MinItems - } - return 0 -} - -func (x *PathParameterSubSchema) GetUniqueItems() bool { - if x != nil { - return x.UniqueItems - } - return false -} - -func (x *PathParameterSubSchema) GetEnum() []*Any { - if x != nil { - return x.Enum - } - return nil -} - -func (x *PathParameterSubSchema) GetMultipleOf() float64 { - if x != nil { - return x.MultipleOf - } - return 0 -} - -func (x *PathParameterSubSchema) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -// Relative paths to the individual endpoints. They must be relative to the 'basePath'. -type Paths struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - VendorExtension []*NamedAny `protobuf:"bytes,1,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - Path []*NamedPathItem `protobuf:"bytes,2,rep,name=path,proto3" json:"path,omitempty"` -} - -func (x *Paths) Reset() { - *x = Paths{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[42] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Paths) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Paths) ProtoMessage() {} - -func (x *Paths) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[42] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Paths.ProtoReflect.Descriptor instead. -func (*Paths) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{42} -} - -func (x *Paths) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -func (x *Paths) GetPath() []*NamedPathItem { - if x != nil { - return x.Path - } - return nil -} - -type PrimitivesItems struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items,proto3" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,5,opt,name=default,proto3" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,6,opt,name=maximum,proto3" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,8,opt,name=minimum,proto3" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,12,opt,name=pattern,proto3" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,16,rep,name=enum,proto3" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,18,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *PrimitivesItems) Reset() { - *x = PrimitivesItems{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[43] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PrimitivesItems) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PrimitivesItems) ProtoMessage() {} - -func (x *PrimitivesItems) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[43] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PrimitivesItems.ProtoReflect.Descriptor instead. -func (*PrimitivesItems) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{43} -} - -func (x *PrimitivesItems) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *PrimitivesItems) GetFormat() string { - if x != nil { - return x.Format - } - return "" -} - -func (x *PrimitivesItems) GetItems() *PrimitivesItems { - if x != nil { - return x.Items - } - return nil -} - -func (x *PrimitivesItems) GetCollectionFormat() string { - if x != nil { - return x.CollectionFormat - } - return "" -} - -func (x *PrimitivesItems) GetDefault() *Any { - if x != nil { - return x.Default - } - return nil -} - -func (x *PrimitivesItems) GetMaximum() float64 { - if x != nil { - return x.Maximum - } - return 0 -} - -func (x *PrimitivesItems) GetExclusiveMaximum() bool { - if x != nil { - return x.ExclusiveMaximum - } - return false -} - -func (x *PrimitivesItems) GetMinimum() float64 { - if x != nil { - return x.Minimum - } - return 0 -} - -func (x *PrimitivesItems) GetExclusiveMinimum() bool { - if x != nil { - return x.ExclusiveMinimum - } - return false -} - -func (x *PrimitivesItems) GetMaxLength() int64 { - if x != nil { - return x.MaxLength - } - return 0 -} - -func (x *PrimitivesItems) GetMinLength() int64 { - if x != nil { - return x.MinLength - } - return 0 -} - -func (x *PrimitivesItems) GetPattern() string { - if x != nil { - return x.Pattern - } - return "" -} - -func (x *PrimitivesItems) GetMaxItems() int64 { - if x != nil { - return x.MaxItems - } - return 0 -} - -func (x *PrimitivesItems) GetMinItems() int64 { - if x != nil { - return x.MinItems - } - return 0 -} - -func (x *PrimitivesItems) GetUniqueItems() bool { - if x != nil { - return x.UniqueItems - } - return false -} - -func (x *PrimitivesItems) GetEnum() []*Any { - if x != nil { - return x.Enum - } - return nil -} - -func (x *PrimitivesItems) GetMultipleOf() float64 { - if x != nil { - return x.MultipleOf - } - return 0 -} - -func (x *PrimitivesItems) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type Properties struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *Properties) Reset() { - *x = Properties{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[44] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Properties) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Properties) ProtoMessage() {} - -func (x *Properties) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[44] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Properties.ProtoReflect.Descriptor instead. -func (*Properties) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{44} -} - -func (x *Properties) GetAdditionalProperties() []*NamedSchema { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -type QueryParameterSubSchema struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Determines whether or not this parameter is required or optional. - Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` - // Determines the location of the parameter. - In string `protobuf:"bytes,2,opt,name=in,proto3" json:"in,omitempty"` - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - // The name of the parameter. - Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` - // allows sending a parameter by name only or with an empty value. - AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue,proto3" json:"allow_empty_value,omitempty"` - Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"` - Format string `protobuf:"bytes,7,opt,name=format,proto3" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items,proto3" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,10,opt,name=default,proto3" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,21,rep,name=enum,proto3" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *QueryParameterSubSchema) Reset() { - *x = QueryParameterSubSchema{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[45] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *QueryParameterSubSchema) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*QueryParameterSubSchema) ProtoMessage() {} - -func (x *QueryParameterSubSchema) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[45] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use QueryParameterSubSchema.ProtoReflect.Descriptor instead. -func (*QueryParameterSubSchema) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{45} -} - -func (x *QueryParameterSubSchema) GetRequired() bool { - if x != nil { - return x.Required - } - return false -} - -func (x *QueryParameterSubSchema) GetIn() string { - if x != nil { - return x.In - } - return "" -} - -func (x *QueryParameterSubSchema) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *QueryParameterSubSchema) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *QueryParameterSubSchema) GetAllowEmptyValue() bool { - if x != nil { - return x.AllowEmptyValue - } - return false -} - -func (x *QueryParameterSubSchema) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *QueryParameterSubSchema) GetFormat() string { - if x != nil { - return x.Format - } - return "" -} - -func (x *QueryParameterSubSchema) GetItems() *PrimitivesItems { - if x != nil { - return x.Items - } - return nil -} - -func (x *QueryParameterSubSchema) GetCollectionFormat() string { - if x != nil { - return x.CollectionFormat - } - return "" -} - -func (x *QueryParameterSubSchema) GetDefault() *Any { - if x != nil { - return x.Default - } - return nil -} - -func (x *QueryParameterSubSchema) GetMaximum() float64 { - if x != nil { - return x.Maximum - } - return 0 -} - -func (x *QueryParameterSubSchema) GetExclusiveMaximum() bool { - if x != nil { - return x.ExclusiveMaximum - } - return false -} - -func (x *QueryParameterSubSchema) GetMinimum() float64 { - if x != nil { - return x.Minimum - } - return 0 -} - -func (x *QueryParameterSubSchema) GetExclusiveMinimum() bool { - if x != nil { - return x.ExclusiveMinimum - } - return false -} - -func (x *QueryParameterSubSchema) GetMaxLength() int64 { - if x != nil { - return x.MaxLength - } - return 0 -} - -func (x *QueryParameterSubSchema) GetMinLength() int64 { - if x != nil { - return x.MinLength - } - return 0 -} - -func (x *QueryParameterSubSchema) GetPattern() string { - if x != nil { - return x.Pattern - } - return "" -} - -func (x *QueryParameterSubSchema) GetMaxItems() int64 { - if x != nil { - return x.MaxItems - } - return 0 -} - -func (x *QueryParameterSubSchema) GetMinItems() int64 { - if x != nil { - return x.MinItems - } - return 0 -} - -func (x *QueryParameterSubSchema) GetUniqueItems() bool { - if x != nil { - return x.UniqueItems - } - return false -} - -func (x *QueryParameterSubSchema) GetEnum() []*Any { - if x != nil { - return x.Enum - } - return nil -} - -func (x *QueryParameterSubSchema) GetMultipleOf() float64 { - if x != nil { - return x.MultipleOf - } - return 0 -} - -func (x *QueryParameterSubSchema) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type Response struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` - Schema *SchemaItem `protobuf:"bytes,2,opt,name=schema,proto3" json:"schema,omitempty"` - Headers *Headers `protobuf:"bytes,3,opt,name=headers,proto3" json:"headers,omitempty"` - Examples *Examples `protobuf:"bytes,4,opt,name=examples,proto3" json:"examples,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *Response) Reset() { - *x = Response{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[46] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Response) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Response) ProtoMessage() {} - -func (x *Response) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[46] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Response.ProtoReflect.Descriptor instead. -func (*Response) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{46} -} - -func (x *Response) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *Response) GetSchema() *SchemaItem { - if x != nil { - return x.Schema - } - return nil -} - -func (x *Response) GetHeaders() *Headers { - if x != nil { - return x.Headers - } - return nil -} - -func (x *Response) GetExamples() *Examples { - if x != nil { - return x.Examples - } - return nil -} - -func (x *Response) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -// One or more JSON representations for responses -type ResponseDefinitions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedResponse `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *ResponseDefinitions) Reset() { - *x = ResponseDefinitions{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[47] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ResponseDefinitions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ResponseDefinitions) ProtoMessage() {} - -func (x *ResponseDefinitions) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[47] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ResponseDefinitions.ProtoReflect.Descriptor instead. -func (*ResponseDefinitions) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{47} -} - -func (x *ResponseDefinitions) GetAdditionalProperties() []*NamedResponse { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -type ResponseValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Oneof: - // *ResponseValue_Response - // *ResponseValue_JsonReference - Oneof isResponseValue_Oneof `protobuf_oneof:"oneof"` -} - -func (x *ResponseValue) Reset() { - *x = ResponseValue{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[48] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ResponseValue) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ResponseValue) ProtoMessage() {} - -func (x *ResponseValue) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[48] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ResponseValue.ProtoReflect.Descriptor instead. -func (*ResponseValue) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{48} -} - -func (m *ResponseValue) GetOneof() isResponseValue_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (x *ResponseValue) GetResponse() *Response { - if x, ok := x.GetOneof().(*ResponseValue_Response); ok { - return x.Response - } - return nil -} - -func (x *ResponseValue) GetJsonReference() *JsonReference { - if x, ok := x.GetOneof().(*ResponseValue_JsonReference); ok { - return x.JsonReference - } - return nil -} - -type isResponseValue_Oneof interface { - isResponseValue_Oneof() -} - -type ResponseValue_Response struct { - Response *Response `protobuf:"bytes,1,opt,name=response,proto3,oneof"` -} - -type ResponseValue_JsonReference struct { - JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,proto3,oneof"` -} - -func (*ResponseValue_Response) isResponseValue_Oneof() {} - -func (*ResponseValue_JsonReference) isResponseValue_Oneof() {} - -// Response objects names can either be any valid HTTP status code or 'default'. -type Responses struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ResponseCode []*NamedResponseValue `protobuf:"bytes,1,rep,name=response_code,json=responseCode,proto3" json:"response_code,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,2,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *Responses) Reset() { - *x = Responses{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[49] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Responses) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Responses) ProtoMessage() {} - -func (x *Responses) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[49] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Responses.ProtoReflect.Descriptor instead. -func (*Responses) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{49} -} - -func (x *Responses) GetResponseCode() []*NamedResponseValue { - if x != nil { - return x.ResponseCode - } - return nil -} - -func (x *Responses) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -// A deterministic version of a JSON Schema object. -type Schema struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"` - Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"` - Title string `protobuf:"bytes,3,opt,name=title,proto3" json:"title,omitempty"` - Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` - Default *Any `protobuf:"bytes,5,opt,name=default,proto3" json:"default,omitempty"` - MultipleOf float64 `protobuf:"fixed64,6,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` - Maximum float64 `protobuf:"fixed64,7,opt,name=maximum,proto3" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,8,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,9,opt,name=minimum,proto3" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,10,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,11,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,12,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,13,opt,name=pattern,proto3" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,14,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,15,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,16,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` - MaxProperties int64 `protobuf:"varint,17,opt,name=max_properties,json=maxProperties,proto3" json:"max_properties,omitempty"` - MinProperties int64 `protobuf:"varint,18,opt,name=min_properties,json=minProperties,proto3" json:"min_properties,omitempty"` - Required []string `protobuf:"bytes,19,rep,name=required,proto3" json:"required,omitempty"` - Enum []*Any `protobuf:"bytes,20,rep,name=enum,proto3" json:"enum,omitempty"` - AdditionalProperties *AdditionalPropertiesItem `protobuf:"bytes,21,opt,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` - Type *TypeItem `protobuf:"bytes,22,opt,name=type,proto3" json:"type,omitempty"` - Items *ItemsItem `protobuf:"bytes,23,opt,name=items,proto3" json:"items,omitempty"` - AllOf []*Schema `protobuf:"bytes,24,rep,name=all_of,json=allOf,proto3" json:"all_of,omitempty"` - Properties *Properties `protobuf:"bytes,25,opt,name=properties,proto3" json:"properties,omitempty"` - Discriminator string `protobuf:"bytes,26,opt,name=discriminator,proto3" json:"discriminator,omitempty"` - ReadOnly bool `protobuf:"varint,27,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` - Xml *Xml `protobuf:"bytes,28,opt,name=xml,proto3" json:"xml,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,29,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` - Example *Any `protobuf:"bytes,30,opt,name=example,proto3" json:"example,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,31,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *Schema) Reset() { - *x = Schema{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[50] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Schema) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Schema) ProtoMessage() {} - -func (x *Schema) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[50] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Schema.ProtoReflect.Descriptor instead. -func (*Schema) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{50} -} - -func (x *Schema) GetXRef() string { - if x != nil { - return x.XRef - } - return "" -} - -func (x *Schema) GetFormat() string { - if x != nil { - return x.Format - } - return "" -} - -func (x *Schema) GetTitle() string { - if x != nil { - return x.Title - } - return "" -} - -func (x *Schema) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *Schema) GetDefault() *Any { - if x != nil { - return x.Default - } - return nil -} - -func (x *Schema) GetMultipleOf() float64 { - if x != nil { - return x.MultipleOf - } - return 0 -} - -func (x *Schema) GetMaximum() float64 { - if x != nil { - return x.Maximum - } - return 0 -} - -func (x *Schema) GetExclusiveMaximum() bool { - if x != nil { - return x.ExclusiveMaximum - } - return false -} - -func (x *Schema) GetMinimum() float64 { - if x != nil { - return x.Minimum - } - return 0 -} - -func (x *Schema) GetExclusiveMinimum() bool { - if x != nil { - return x.ExclusiveMinimum - } - return false -} - -func (x *Schema) GetMaxLength() int64 { - if x != nil { - return x.MaxLength - } - return 0 -} - -func (x *Schema) GetMinLength() int64 { - if x != nil { - return x.MinLength - } - return 0 -} - -func (x *Schema) GetPattern() string { - if x != nil { - return x.Pattern - } - return "" -} - -func (x *Schema) GetMaxItems() int64 { - if x != nil { - return x.MaxItems - } - return 0 -} - -func (x *Schema) GetMinItems() int64 { - if x != nil { - return x.MinItems - } - return 0 -} - -func (x *Schema) GetUniqueItems() bool { - if x != nil { - return x.UniqueItems - } - return false -} - -func (x *Schema) GetMaxProperties() int64 { - if x != nil { - return x.MaxProperties - } - return 0 -} - -func (x *Schema) GetMinProperties() int64 { - if x != nil { - return x.MinProperties - } - return 0 -} - -func (x *Schema) GetRequired() []string { - if x != nil { - return x.Required - } - return nil -} - -func (x *Schema) GetEnum() []*Any { - if x != nil { - return x.Enum - } - return nil -} - -func (x *Schema) GetAdditionalProperties() *AdditionalPropertiesItem { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -func (x *Schema) GetType() *TypeItem { - if x != nil { - return x.Type - } - return nil -} - -func (x *Schema) GetItems() *ItemsItem { - if x != nil { - return x.Items - } - return nil -} - -func (x *Schema) GetAllOf() []*Schema { - if x != nil { - return x.AllOf - } - return nil -} - -func (x *Schema) GetProperties() *Properties { - if x != nil { - return x.Properties - } - return nil -} - -func (x *Schema) GetDiscriminator() string { - if x != nil { - return x.Discriminator - } - return "" -} - -func (x *Schema) GetReadOnly() bool { - if x != nil { - return x.ReadOnly - } - return false -} - -func (x *Schema) GetXml() *Xml { - if x != nil { - return x.Xml - } - return nil -} - -func (x *Schema) GetExternalDocs() *ExternalDocs { - if x != nil { - return x.ExternalDocs - } - return nil -} - -func (x *Schema) GetExample() *Any { - if x != nil { - return x.Example - } - return nil -} - -func (x *Schema) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type SchemaItem struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Oneof: - // *SchemaItem_Schema - // *SchemaItem_FileSchema - Oneof isSchemaItem_Oneof `protobuf_oneof:"oneof"` -} - -func (x *SchemaItem) Reset() { - *x = SchemaItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[51] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SchemaItem) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SchemaItem) ProtoMessage() {} - -func (x *SchemaItem) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[51] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SchemaItem.ProtoReflect.Descriptor instead. -func (*SchemaItem) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{51} -} - -func (m *SchemaItem) GetOneof() isSchemaItem_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (x *SchemaItem) GetSchema() *Schema { - if x, ok := x.GetOneof().(*SchemaItem_Schema); ok { - return x.Schema - } - return nil -} - -func (x *SchemaItem) GetFileSchema() *FileSchema { - if x, ok := x.GetOneof().(*SchemaItem_FileSchema); ok { - return x.FileSchema - } - return nil -} - -type isSchemaItem_Oneof interface { - isSchemaItem_Oneof() -} - -type SchemaItem_Schema struct { - Schema *Schema `protobuf:"bytes,1,opt,name=schema,proto3,oneof"` -} - -type SchemaItem_FileSchema struct { - FileSchema *FileSchema `protobuf:"bytes,2,opt,name=file_schema,json=fileSchema,proto3,oneof"` -} - -func (*SchemaItem_Schema) isSchemaItem_Oneof() {} - -func (*SchemaItem_FileSchema) isSchemaItem_Oneof() {} - -type SecurityDefinitions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedSecurityDefinitionsItem `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *SecurityDefinitions) Reset() { - *x = SecurityDefinitions{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[52] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SecurityDefinitions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SecurityDefinitions) ProtoMessage() {} - -func (x *SecurityDefinitions) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[52] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SecurityDefinitions.ProtoReflect.Descriptor instead. -func (*SecurityDefinitions) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{52} -} - -func (x *SecurityDefinitions) GetAdditionalProperties() []*NamedSecurityDefinitionsItem { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -type SecurityDefinitionsItem struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Oneof: - // *SecurityDefinitionsItem_BasicAuthenticationSecurity - // *SecurityDefinitionsItem_ApiKeySecurity - // *SecurityDefinitionsItem_Oauth2ImplicitSecurity - // *SecurityDefinitionsItem_Oauth2PasswordSecurity - // *SecurityDefinitionsItem_Oauth2ApplicationSecurity - // *SecurityDefinitionsItem_Oauth2AccessCodeSecurity - Oneof isSecurityDefinitionsItem_Oneof `protobuf_oneof:"oneof"` -} - -func (x *SecurityDefinitionsItem) Reset() { - *x = SecurityDefinitionsItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[53] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SecurityDefinitionsItem) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SecurityDefinitionsItem) ProtoMessage() {} - -func (x *SecurityDefinitionsItem) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[53] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SecurityDefinitionsItem.ProtoReflect.Descriptor instead. -func (*SecurityDefinitionsItem) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{53} -} - -func (m *SecurityDefinitionsItem) GetOneof() isSecurityDefinitionsItem_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (x *SecurityDefinitionsItem) GetBasicAuthenticationSecurity() *BasicAuthenticationSecurity { - if x, ok := x.GetOneof().(*SecurityDefinitionsItem_BasicAuthenticationSecurity); ok { - return x.BasicAuthenticationSecurity - } - return nil -} - -func (x *SecurityDefinitionsItem) GetApiKeySecurity() *ApiKeySecurity { - if x, ok := x.GetOneof().(*SecurityDefinitionsItem_ApiKeySecurity); ok { - return x.ApiKeySecurity - } - return nil -} - -func (x *SecurityDefinitionsItem) GetOauth2ImplicitSecurity() *Oauth2ImplicitSecurity { - if x, ok := x.GetOneof().(*SecurityDefinitionsItem_Oauth2ImplicitSecurity); ok { - return x.Oauth2ImplicitSecurity - } - return nil -} - -func (x *SecurityDefinitionsItem) GetOauth2PasswordSecurity() *Oauth2PasswordSecurity { - if x, ok := x.GetOneof().(*SecurityDefinitionsItem_Oauth2PasswordSecurity); ok { - return x.Oauth2PasswordSecurity - } - return nil -} - -func (x *SecurityDefinitionsItem) GetOauth2ApplicationSecurity() *Oauth2ApplicationSecurity { - if x, ok := x.GetOneof().(*SecurityDefinitionsItem_Oauth2ApplicationSecurity); ok { - return x.Oauth2ApplicationSecurity - } - return nil -} - -func (x *SecurityDefinitionsItem) GetOauth2AccessCodeSecurity() *Oauth2AccessCodeSecurity { - if x, ok := x.GetOneof().(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity); ok { - return x.Oauth2AccessCodeSecurity - } - return nil -} - -type isSecurityDefinitionsItem_Oneof interface { - isSecurityDefinitionsItem_Oneof() -} - -type SecurityDefinitionsItem_BasicAuthenticationSecurity struct { - BasicAuthenticationSecurity *BasicAuthenticationSecurity `protobuf:"bytes,1,opt,name=basic_authentication_security,json=basicAuthenticationSecurity,proto3,oneof"` -} - -type SecurityDefinitionsItem_ApiKeySecurity struct { - ApiKeySecurity *ApiKeySecurity `protobuf:"bytes,2,opt,name=api_key_security,json=apiKeySecurity,proto3,oneof"` -} - -type SecurityDefinitionsItem_Oauth2ImplicitSecurity struct { - Oauth2ImplicitSecurity *Oauth2ImplicitSecurity `protobuf:"bytes,3,opt,name=oauth2_implicit_security,json=oauth2ImplicitSecurity,proto3,oneof"` -} - -type SecurityDefinitionsItem_Oauth2PasswordSecurity struct { - Oauth2PasswordSecurity *Oauth2PasswordSecurity `protobuf:"bytes,4,opt,name=oauth2_password_security,json=oauth2PasswordSecurity,proto3,oneof"` -} - -type SecurityDefinitionsItem_Oauth2ApplicationSecurity struct { - Oauth2ApplicationSecurity *Oauth2ApplicationSecurity `protobuf:"bytes,5,opt,name=oauth2_application_security,json=oauth2ApplicationSecurity,proto3,oneof"` -} - -type SecurityDefinitionsItem_Oauth2AccessCodeSecurity struct { - Oauth2AccessCodeSecurity *Oauth2AccessCodeSecurity `protobuf:"bytes,6,opt,name=oauth2_access_code_security,json=oauth2AccessCodeSecurity,proto3,oneof"` -} - -func (*SecurityDefinitionsItem_BasicAuthenticationSecurity) isSecurityDefinitionsItem_Oneof() {} - -func (*SecurityDefinitionsItem_ApiKeySecurity) isSecurityDefinitionsItem_Oneof() {} - -func (*SecurityDefinitionsItem_Oauth2ImplicitSecurity) isSecurityDefinitionsItem_Oneof() {} - -func (*SecurityDefinitionsItem_Oauth2PasswordSecurity) isSecurityDefinitionsItem_Oneof() {} - -func (*SecurityDefinitionsItem_Oauth2ApplicationSecurity) isSecurityDefinitionsItem_Oneof() {} - -func (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity) isSecurityDefinitionsItem_Oneof() {} - -type SecurityRequirement struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedStringArray `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *SecurityRequirement) Reset() { - *x = SecurityRequirement{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[54] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SecurityRequirement) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SecurityRequirement) ProtoMessage() {} - -func (x *SecurityRequirement) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[54] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SecurityRequirement.ProtoReflect.Descriptor instead. -func (*SecurityRequirement) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{54} -} - -func (x *SecurityRequirement) GetAdditionalProperties() []*NamedStringArray { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -type StringArray struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Value []string `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"` -} - -func (x *StringArray) Reset() { - *x = StringArray{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[55] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StringArray) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StringArray) ProtoMessage() {} - -func (x *StringArray) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[55] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StringArray.ProtoReflect.Descriptor instead. -func (*StringArray) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{55} -} - -func (x *StringArray) GetValue() []string { - if x != nil { - return x.Value - } - return nil -} - -type Tag struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *Tag) Reset() { - *x = Tag{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[56] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Tag) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Tag) ProtoMessage() {} - -func (x *Tag) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[56] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Tag.ProtoReflect.Descriptor instead. -func (*Tag) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{56} -} - -func (x *Tag) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *Tag) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *Tag) GetExternalDocs() *ExternalDocs { - if x != nil { - return x.ExternalDocs - } - return nil -} - -func (x *Tag) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type TypeItem struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Value []string `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"` -} - -func (x *TypeItem) Reset() { - *x = TypeItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[57] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TypeItem) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TypeItem) ProtoMessage() {} - -func (x *TypeItem) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[57] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TypeItem.ProtoReflect.Descriptor instead. -func (*TypeItem) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{57} -} - -func (x *TypeItem) GetValue() []string { - if x != nil { - return x.Value - } - return nil -} - -// Any property starting with x- is valid. -type VendorExtension struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *VendorExtension) Reset() { - *x = VendorExtension{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[58] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *VendorExtension) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*VendorExtension) ProtoMessage() {} - -func (x *VendorExtension) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[58] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use VendorExtension.ProtoReflect.Descriptor instead. -func (*VendorExtension) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{58} -} - -func (x *VendorExtension) GetAdditionalProperties() []*NamedAny { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -type Xml struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` - Prefix string `protobuf:"bytes,3,opt,name=prefix,proto3" json:"prefix,omitempty"` - Attribute bool `protobuf:"varint,4,opt,name=attribute,proto3" json:"attribute,omitempty"` - Wrapped bool `protobuf:"varint,5,opt,name=wrapped,proto3" json:"wrapped,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *Xml) Reset() { - *x = Xml{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[59] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Xml) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Xml) ProtoMessage() {} - -func (x *Xml) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[59] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Xml.ProtoReflect.Descriptor instead. -func (*Xml) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{59} -} - -func (x *Xml) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *Xml) GetNamespace() string { - if x != nil { - return x.Namespace - } - return "" -} - -func (x *Xml) GetPrefix() string { - if x != nil { - return x.Prefix - } - return "" -} - -func (x *Xml) GetAttribute() bool { - if x != nil { - return x.Attribute - } - return false -} - -func (x *Xml) GetWrapped() bool { - if x != nil { - return x.Wrapped - } - return false -} - -func (x *Xml) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -var File_openapiv2_OpenAPIv2_proto protoreflect.FileDescriptor - -var file_openapiv2_OpenAPIv2_proto_rawDesc = []byte{ - 0x0a, 0x19, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x4f, 0x70, 0x65, 0x6e, - 0x41, 0x50, 0x49, 0x76, 0x32, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x6f, 0x70, 0x65, - 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x22, 0x6d, 0x0a, 0x18, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, - 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x2c, - 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, - 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x48, 0x00, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x0a, 0x07, - 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, - 0x07, 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, - 0x66, 0x22, 0x45, 0x0a, 0x03, 0x41, 0x6e, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x22, 0xab, 0x01, 0x0a, 0x0e, 0x41, 0x70, 0x69, - 0x4b, 0x65, 0x79, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x02, 0x69, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, - 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, - 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x94, 0x01, 0x0a, 0x1b, 0x42, 0x61, 0x73, 0x69, 0x63, - 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, - 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x10, - 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, - 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xde, 0x01, - 0x0a, 0x0d, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, - 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, - 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, - 0x64, 0x12, 0x2a, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x3f, 0x0a, - 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, - 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x86, - 0x01, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, - 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, - 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, - 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, - 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x54, 0x0a, 0x07, 0x44, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x12, 0x49, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, - 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, - 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x5b, 0x0a, - 0x0b, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4c, 0x0a, 0x15, - 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, - 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, - 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xe8, 0x05, 0x0a, 0x08, 0x44, - 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x77, 0x61, 0x67, 0x67, - 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, - 0x72, 0x12, 0x24, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x10, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x6e, 0x66, - 0x6f, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x62, - 0x61, 0x73, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x62, 0x61, 0x73, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, - 0x6d, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, - 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x06, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x1a, - 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x08, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x05, 0x70, 0x61, - 0x74, 0x68, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x73, 0x52, 0x05, 0x70, 0x61, - 0x74, 0x68, 0x73, 0x12, 0x39, 0x0a, 0x0b, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x40, - 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x0a, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, - 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, - 0x12, 0x3d, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x0b, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x12, - 0x3b, 0x0a, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, - 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x52, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x52, 0x0a, 0x14, - 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, - 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, - 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x73, 0x65, 0x63, - 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x23, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, - 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, 0x67, 0x52, - 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, - 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, - 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, - 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, - 0x44, 0x6f, 0x63, 0x73, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, - 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, - 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x55, 0x0a, 0x08, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, - 0x73, 0x12, 0x49, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, - 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, - 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x83, 0x01, 0x0a, - 0x0c, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x20, 0x0a, - 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, - 0x6c, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, - 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x22, 0xff, 0x02, 0x0a, 0x0a, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, - 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, - 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x29, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, - 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x1a, 0x0a, 0x08, - 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, - 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, - 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, - 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, - 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x29, 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, - 0x70, 0x6c, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, - 0x70, 0x6c, 0x65, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, - 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, - 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xab, 0x06, 0x0a, 0x1a, 0x46, 0x6f, 0x72, 0x6d, 0x44, 0x61, 0x74, - 0x61, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, - 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, - 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65, - 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x31, 0x0a, - 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, - 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, - 0x69, 0x76, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, - 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, - 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6c, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x29, 0x0a, - 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, - 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, - 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, - 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, - 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, - 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, - 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, - 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x01, - 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, - 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0e, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, - 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, - 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, - 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, - 0x67, 0x74, 0x68, 0x18, 0x10, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, - 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, - 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, - 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, - 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, - 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, - 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, - 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x04, 0x65, - 0x6e, 0x75, 0x6d, 0x18, 0x15, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, - 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, - 0x16, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, - 0x66, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x17, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, - 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x22, 0xab, 0x05, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x12, 0x0a, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, - 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x31, 0x0a, 0x05, 0x69, 0x74, 0x65, - 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, - 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x2b, 0x0a, 0x11, - 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, - 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x29, 0x0a, 0x07, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, - 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, - 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x69, - 0x6d, 0x75, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, - 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x6d, - 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x69, - 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, - 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6d, - 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, - 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, 0x65, 0x6e, 0x67, 0x74, - 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, - 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, - 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x0c, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, - 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, - 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, - 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, - 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x69, - 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x6e, 0x69, 0x71, - 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, - 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x1f, 0x0a, 0x0b, - 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x11, 0x20, 0x01, 0x28, - 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, 0x66, 0x12, 0x20, 0x0a, - 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x12, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, - 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x22, 0xfd, 0x05, 0x0a, 0x18, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, - 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x0a, - 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, - 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x31, 0x0a, 0x05, 0x69, - 0x74, 0x65, 0x6d, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, 0x70, 0x65, - 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, - 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x2b, - 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, - 0x6d, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6c, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x29, 0x0a, 0x07, 0x64, - 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, - 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, - 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, - 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, - 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x61, - 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, - 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x18, 0x0a, - 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, - 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, - 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0d, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x69, 0x6e, - 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, - 0x74, 0x68, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, 0x65, 0x6e, - 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, - 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x67, - 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x10, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, 0x0a, 0x09, - 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, - 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, - 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, - 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x6e, - 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x04, 0x65, 0x6e, 0x75, - 0x6d, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x1f, - 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x15, 0x20, - 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, 0x66, 0x12, - 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, - 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x22, 0x57, 0x0a, 0x07, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x4c, 0x0a, 0x15, 0x61, - 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, - 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6f, 0x70, 0x65, - 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, - 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xa1, 0x02, 0x0a, 0x04, 0x49, 0x6e, - 0x66, 0x6f, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x65, 0x72, 0x6d, 0x73, 0x5f, 0x6f, 0x66, - 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, - 0x74, 0x65, 0x72, 0x6d, 0x73, 0x4f, 0x66, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x2d, - 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x13, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, - 0x74, 0x61, 0x63, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x12, 0x2d, 0x0a, - 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, - 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x63, 0x65, - 0x6e, 0x73, 0x65, 0x52, 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x10, - 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, - 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x37, 0x0a, - 0x09, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x2a, 0x0a, 0x06, 0x73, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, - 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x06, - 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x44, 0x0a, 0x0d, 0x4a, 0x73, 0x6f, 0x6e, 0x52, 0x65, - 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x11, 0x0a, 0x04, 0x5f, 0x72, 0x65, 0x66, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x52, 0x65, 0x66, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x70, 0x0a, 0x07, - 0x4c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, - 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x3f, 0x0a, - 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, - 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x45, - 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x25, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, - 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x4b, 0x0a, 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x48, 0x65, - 0x61, 0x64, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x22, 0x51, 0x0a, 0x0e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x50, 0x61, 0x72, 0x61, 0x6d, - 0x65, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x4f, 0x0a, 0x0d, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x50, 0x61, - 0x74, 0x68, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x49, 0x74, 0x65, 0x6d, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x4f, 0x0a, 0x0d, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, - 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x59, 0x0a, 0x12, 0x4e, 0x61, 0x6d, 0x65, 0x64, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x2f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x22, 0x4b, 0x0a, 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, - 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, - 0x6d, 0x0a, 0x1c, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, - 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, - 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x37, - 0x0a, 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x12, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x55, 0x0a, 0x10, 0x4e, 0x61, 0x6d, 0x65, 0x64, - 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x41, 0x72, 0x72, 0x61, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, - 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x72, 0x69, - 0x6e, 0x67, 0x41, 0x72, 0x72, 0x61, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xb5, - 0x03, 0x0a, 0x10, 0x4e, 0x6f, 0x6e, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, - 0x74, 0x65, 0x72, 0x12, 0x65, 0x0a, 0x1b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x70, 0x61, - 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x75, 0x62, 0x5f, 0x73, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, - 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x48, 0x00, - 0x52, 0x18, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, - 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x6c, 0x0a, 0x1e, 0x66, 0x6f, - 0x72, 0x6d, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, - 0x72, 0x5f, 0x73, 0x75, 0x62, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, - 0x46, 0x6f, 0x72, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, - 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x48, 0x00, 0x52, 0x1a, 0x66, 0x6f, - 0x72, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, - 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x62, 0x0a, 0x1a, 0x71, 0x75, 0x65, 0x72, - 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x75, 0x62, 0x5f, - 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6f, - 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, - 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x48, 0x00, 0x52, 0x17, 0x71, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, - 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x5f, 0x0a, 0x19, - 0x70, 0x61, 0x74, 0x68, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x5f, 0x73, - 0x75, 0x62, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x22, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x74, - 0x68, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x48, 0x00, 0x52, 0x16, 0x70, 0x61, 0x74, 0x68, 0x50, 0x61, 0x72, 0x61, 0x6d, - 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x42, 0x07, 0x0a, - 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xa1, 0x02, 0x0a, 0x18, 0x4f, 0x61, 0x75, 0x74, 0x68, - 0x32, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x63, 0x75, 0x72, - 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x30, 0x0a, 0x06, 0x73, - 0x63, 0x6f, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x53, - 0x63, 0x6f, 0x70, 0x65, 0x73, 0x52, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x2b, 0x0a, - 0x11, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, - 0x72, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, - 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, - 0x6b, 0x65, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, - 0x6f, 0x6b, 0x65, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, - 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, - 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xf5, 0x01, 0x0a, 0x19, 0x4f, - 0x61, 0x75, 0x74, 0x68, 0x32, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, - 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, - 0x12, 0x30, 0x0a, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x61, - 0x75, 0x74, 0x68, 0x32, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x52, 0x06, 0x73, 0x63, 0x6f, 0x70, - 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x55, 0x72, 0x6c, 0x12, - 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, - 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x22, 0x82, 0x02, 0x0a, 0x16, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x49, 0x6d, 0x70, - 0x6c, 0x69, 0x63, 0x69, 0x74, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, - 0x65, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x30, 0x0a, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x52, - 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x61, 0x75, 0x74, 0x68, 0x6f, - 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x10, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, - 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, - 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xf2, 0x01, 0x0a, 0x16, 0x4f, 0x61, 0x75, 0x74, - 0x68, 0x32, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, - 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x30, 0x0a, 0x06, 0x73, 0x63, - 0x6f, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, - 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x53, 0x63, - 0x6f, 0x70, 0x65, 0x73, 0x52, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, - 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x10, 0x76, - 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, - 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x5c, 0x0a, 0x0c, - 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x4c, 0x0a, 0x15, - 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, - 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x74, - 0x72, 0x69, 0x6e, 0x67, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, - 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x9e, 0x04, 0x0a, 0x09, 0x4f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, - 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, - 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x74, - 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x70, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, - 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, - 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, - 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, - 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, - 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x49, 0x74, - 0x65, 0x6d, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x33, - 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x52, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x18, 0x0a, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x12, 0x1e, 0x0a, - 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x3b, 0x0a, - 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x63, - 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x52, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, - 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0d, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, - 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, - 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa6, 0x01, 0x0a, 0x09, - 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x0e, 0x62, 0x6f, 0x64, - 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x42, - 0x6f, 0x64, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0d, - 0x62, 0x6f, 0x64, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x4c, 0x0a, - 0x12, 0x6e, 0x6f, 0x6e, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, - 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x6e, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x61, - 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x10, 0x6e, 0x6f, 0x6e, 0x42, 0x6f, - 0x64, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x42, 0x07, 0x0a, 0x05, 0x6f, - 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x67, 0x0a, 0x14, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, - 0x72, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4f, 0x0a, 0x15, - 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, - 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x50, 0x61, - 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x94, 0x01, - 0x0a, 0x0e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x49, 0x74, 0x65, 0x6d, - 0x12, 0x35, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x09, 0x70, 0x61, - 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x0e, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, - 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4a, 0x73, 0x6f, - 0x6e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x6a, 0x73, - 0x6f, 0x6e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x6f, - 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xcf, 0x03, 0x0a, 0x08, 0x50, 0x61, 0x74, 0x68, 0x49, 0x74, 0x65, - 0x6d, 0x12, 0x11, 0x0a, 0x04, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x52, 0x65, 0x66, 0x12, 0x27, 0x0a, 0x03, 0x67, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x67, 0x65, 0x74, 0x12, 0x27, 0x0a, - 0x03, 0x70, 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, - 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x03, 0x70, 0x75, 0x74, 0x12, 0x29, 0x0a, 0x04, 0x70, 0x6f, 0x73, 0x74, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, - 0x32, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x70, 0x6f, 0x73, - 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x12, 0x2f, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x29, 0x0a, 0x04, 0x68, 0x65, 0x61, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x70, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x68, 0x65, 0x61, 0x64, 0x12, 0x2b, 0x0a, 0x05, - 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x12, 0x3a, 0x0a, 0x0a, 0x70, 0x61, 0x72, - 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, - 0x65, 0x74, 0x65, 0x72, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, - 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, - 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, - 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xfb, 0x05, 0x0a, 0x16, 0x50, 0x61, 0x74, 0x68, 0x50, - 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x0e, 0x0a, - 0x02, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x20, 0x0a, - 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, - 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, - 0x31, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, - 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6d, - 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65, - 0x6d, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, - 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, - 0x29, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, - 0x79, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, - 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, - 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, - 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, - 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, 0x20, 0x01, - 0x28, 0x01, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, - 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, - 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, - 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, - 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, - 0x78, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, - 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, - 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, - 0x6e, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, - 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x11, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, - 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, - 0x69, 0x71, 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0b, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x23, 0x0a, - 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x65, 0x6e, - 0x75, 0x6d, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, - 0x66, 0x18, 0x15, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, - 0x65, 0x4f, 0x66, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, - 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, - 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x77, 0x0a, 0x05, 0x50, 0x61, 0x74, 0x68, 0x73, 0x12, 0x3f, 0x0a, - 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, - 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2d, - 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6f, - 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x50, - 0x61, 0x74, 0x68, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0x92, 0x05, - 0x0a, 0x0f, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, - 0x73, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x31, 0x0a, - 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, - 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, - 0x69, 0x76, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, - 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, - 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6c, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x29, 0x0a, - 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, - 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, - 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, - 0x6d, 0x75, 0x6d, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, - 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, - 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, - 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, - 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01, - 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, - 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x09, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, - 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, - 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, - 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, - 0x67, 0x74, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, - 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, - 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, - 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, - 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, - 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, - 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, - 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x04, 0x65, - 0x6e, 0x75, 0x6d, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, - 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, - 0x11, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, - 0x66, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x12, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, - 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x22, 0x5a, 0x0a, 0x0a, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, - 0x12, 0x4c, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, - 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x17, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, - 0x65, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xa8, - 0x06, 0x0a, 0x17, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, - 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, - 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, - 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x11, - 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x6d, - 0x70, 0x74, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, - 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, - 0x72, 0x6d, 0x61, 0x74, 0x12, 0x31, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x73, - 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x09, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, - 0x72, 0x6d, 0x61, 0x74, 0x12, 0x29, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, - 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, - 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x01, - 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, - 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, - 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, - 0x6d, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, - 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, - 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, - 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, - 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, - 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x10, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, - 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, - 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, - 0x6d, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, - 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, - 0x13, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, - 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, - 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, - 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x15, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, - 0x79, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, - 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x16, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, - 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, 0x66, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, - 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x17, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, - 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, - 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xfe, 0x01, 0x0a, 0x08, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x49, 0x74, 0x65, 0x6d, - 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x2d, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x52, 0x07, - 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x30, 0x0a, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, - 0x6c, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x52, - 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, - 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, - 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x65, 0x0a, 0x13, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x4e, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, - 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, - 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x14, 0x61, 0x64, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, - 0x73, 0x22, 0x90, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x32, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x08, 0x72, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x42, 0x0a, 0x0e, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, - 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4a, 0x73, 0x6f, - 0x6e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x6a, 0x73, - 0x6f, 0x6e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x6f, - 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x91, 0x01, 0x0a, 0x09, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x73, 0x12, 0x43, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, - 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, - 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, - 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xaf, 0x09, 0x0a, 0x06, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x12, 0x11, 0x0a, 0x04, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x52, 0x65, 0x66, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x14, - 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, - 0x69, 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, - 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, - 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, - 0x4f, 0x66, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, - 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, - 0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, - 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, - 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x09, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, - 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, - 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, - 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, - 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0b, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, - 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0c, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, - 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, - 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, - 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, - 0x6d, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, - 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, - 0x6d, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, - 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x72, 0x6f, - 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x6d, - 0x61, 0x78, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, - 0x6d, 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x12, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, - 0x69, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, - 0x13, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, - 0x23, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, - 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, - 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x59, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x15, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, - 0x72, 0x74, 0x69, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, - 0x28, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, - 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x49, - 0x74, 0x65, 0x6d, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x2b, 0x0a, 0x05, 0x69, 0x74, 0x65, - 0x6d, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, - 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x29, 0x0a, 0x06, 0x61, 0x6c, 0x6c, 0x5f, 0x6f, 0x66, - 0x18, 0x18, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x61, 0x6c, 0x6c, 0x4f, - 0x66, 0x12, 0x36, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, - 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x52, 0x0a, 0x70, - 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x24, 0x0a, 0x0d, 0x64, 0x69, 0x73, - 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0d, 0x64, 0x69, 0x73, 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x12, - 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x1b, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x21, 0x0a, 0x03, - 0x78, 0x6d, 0x6c, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x58, 0x6d, 0x6c, 0x52, 0x03, 0x78, 0x6d, 0x6c, 0x12, - 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, - 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, - 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x29, - 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, - 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, - 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x1f, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, - 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x7e, 0x0a, 0x0a, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x48, 0x00, 0x52, 0x06, - 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x39, 0x0a, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x48, 0x00, 0x52, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x74, 0x0a, 0x13, 0x53, 0x65, - 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x5d, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, - 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x28, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, - 0x6d, 0x65, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, - 0x22, 0xe9, 0x04, 0x0a, 0x17, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, - 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x6d, 0x0a, 0x1d, - 0x62, 0x61, 0x73, 0x69, 0x63, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x48, 0x00, 0x52, 0x1b, - 0x62, 0x61, 0x73, 0x69, 0x63, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x46, 0x0a, 0x10, 0x61, - 0x70, 0x69, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, - 0x79, 0x48, 0x00, 0x52, 0x0e, 0x61, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x53, 0x65, 0x63, 0x75, 0x72, - 0x69, 0x74, 0x79, 0x12, 0x5e, 0x0a, 0x18, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x5f, 0x69, 0x6d, - 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x49, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, - 0x74, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x48, 0x00, 0x52, 0x16, 0x6f, 0x61, 0x75, - 0x74, 0x68, 0x32, 0x49, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x53, 0x65, 0x63, 0x75, 0x72, - 0x69, 0x74, 0x79, 0x12, 0x5e, 0x0a, 0x18, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x5f, 0x70, 0x61, - 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, - 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x48, 0x00, 0x52, 0x16, 0x6f, 0x61, 0x75, - 0x74, 0x68, 0x32, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, - 0x69, 0x74, 0x79, 0x12, 0x67, 0x0a, 0x1b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x5f, 0x61, 0x70, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, - 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x41, 0x70, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x48, - 0x00, 0x52, 0x19, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x65, 0x0a, 0x1b, - 0x6f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x63, 0x6f, - 0x64, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, - 0x61, 0x75, 0x74, 0x68, 0x32, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x53, - 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x48, 0x00, 0x52, 0x18, 0x6f, 0x61, 0x75, 0x74, 0x68, - 0x32, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x63, 0x75, 0x72, - 0x69, 0x74, 0x79, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x68, 0x0a, 0x13, - 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x12, 0x51, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, - 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, - 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x41, 0x72, 0x72, 0x61, 0x79, - 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, - 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x23, 0x0a, 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, - 0x41, 0x72, 0x72, 0x61, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xbb, 0x01, 0x0a, 0x03, - 0x54, 0x61, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, - 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, - 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, - 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, - 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, - 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x20, 0x0a, 0x08, 0x54, 0x79, 0x70, - 0x65, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x5c, 0x0a, 0x0f, 0x56, - 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x49, - 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, - 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, - 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, - 0x41, 0x6e, 0x79, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, - 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xc8, 0x01, 0x0a, 0x03, 0x58, 0x6d, - 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1c, 0x0a, 0x09, 0x61, - 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, - 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x77, 0x72, 0x61, - 0x70, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x77, 0x72, 0x61, 0x70, - 0x70, 0x65, 0x64, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, - 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, - 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x3e, 0x0a, 0x0e, 0x6f, 0x72, 0x67, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x42, 0x0c, 0x4f, 0x70, 0x65, 0x6e, 0x41, 0x50, 0x49, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x16, 0x2e, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, - 0x69, 0x76, 0x32, 0x3b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0xa2, 0x02, - 0x03, 0x4f, 0x41, 0x53, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_openapiv2_OpenAPIv2_proto_rawDescOnce sync.Once - file_openapiv2_OpenAPIv2_proto_rawDescData = file_openapiv2_OpenAPIv2_proto_rawDesc -) - -func file_openapiv2_OpenAPIv2_proto_rawDescGZIP() []byte { - file_openapiv2_OpenAPIv2_proto_rawDescOnce.Do(func() { - file_openapiv2_OpenAPIv2_proto_rawDescData = protoimpl.X.CompressGZIP(file_openapiv2_OpenAPIv2_proto_rawDescData) - }) - return file_openapiv2_OpenAPIv2_proto_rawDescData -} - -var file_openapiv2_OpenAPIv2_proto_msgTypes = make([]protoimpl.MessageInfo, 60) -var file_openapiv2_OpenAPIv2_proto_goTypes = []interface{}{ - (*AdditionalPropertiesItem)(nil), // 0: openapi.v2.AdditionalPropertiesItem - (*Any)(nil), // 1: openapi.v2.Any - (*ApiKeySecurity)(nil), // 2: openapi.v2.ApiKeySecurity - (*BasicAuthenticationSecurity)(nil), // 3: openapi.v2.BasicAuthenticationSecurity - (*BodyParameter)(nil), // 4: openapi.v2.BodyParameter - (*Contact)(nil), // 5: openapi.v2.Contact - (*Default)(nil), // 6: openapi.v2.Default - (*Definitions)(nil), // 7: openapi.v2.Definitions - (*Document)(nil), // 8: openapi.v2.Document - (*Examples)(nil), // 9: openapi.v2.Examples - (*ExternalDocs)(nil), // 10: openapi.v2.ExternalDocs - (*FileSchema)(nil), // 11: openapi.v2.FileSchema - (*FormDataParameterSubSchema)(nil), // 12: openapi.v2.FormDataParameterSubSchema - (*Header)(nil), // 13: openapi.v2.Header - (*HeaderParameterSubSchema)(nil), // 14: openapi.v2.HeaderParameterSubSchema - (*Headers)(nil), // 15: openapi.v2.Headers - (*Info)(nil), // 16: openapi.v2.Info - (*ItemsItem)(nil), // 17: openapi.v2.ItemsItem - (*JsonReference)(nil), // 18: openapi.v2.JsonReference - (*License)(nil), // 19: openapi.v2.License - (*NamedAny)(nil), // 20: openapi.v2.NamedAny - (*NamedHeader)(nil), // 21: openapi.v2.NamedHeader - (*NamedParameter)(nil), // 22: openapi.v2.NamedParameter - (*NamedPathItem)(nil), // 23: openapi.v2.NamedPathItem - (*NamedResponse)(nil), // 24: openapi.v2.NamedResponse - (*NamedResponseValue)(nil), // 25: openapi.v2.NamedResponseValue - (*NamedSchema)(nil), // 26: openapi.v2.NamedSchema - (*NamedSecurityDefinitionsItem)(nil), // 27: openapi.v2.NamedSecurityDefinitionsItem - (*NamedString)(nil), // 28: openapi.v2.NamedString - (*NamedStringArray)(nil), // 29: openapi.v2.NamedStringArray - (*NonBodyParameter)(nil), // 30: openapi.v2.NonBodyParameter - (*Oauth2AccessCodeSecurity)(nil), // 31: openapi.v2.Oauth2AccessCodeSecurity - (*Oauth2ApplicationSecurity)(nil), // 32: openapi.v2.Oauth2ApplicationSecurity - (*Oauth2ImplicitSecurity)(nil), // 33: openapi.v2.Oauth2ImplicitSecurity - (*Oauth2PasswordSecurity)(nil), // 34: openapi.v2.Oauth2PasswordSecurity - (*Oauth2Scopes)(nil), // 35: openapi.v2.Oauth2Scopes - (*Operation)(nil), // 36: openapi.v2.Operation - (*Parameter)(nil), // 37: openapi.v2.Parameter - (*ParameterDefinitions)(nil), // 38: openapi.v2.ParameterDefinitions - (*ParametersItem)(nil), // 39: openapi.v2.ParametersItem - (*PathItem)(nil), // 40: openapi.v2.PathItem - (*PathParameterSubSchema)(nil), // 41: openapi.v2.PathParameterSubSchema - (*Paths)(nil), // 42: openapi.v2.Paths - (*PrimitivesItems)(nil), // 43: openapi.v2.PrimitivesItems - (*Properties)(nil), // 44: openapi.v2.Properties - (*QueryParameterSubSchema)(nil), // 45: openapi.v2.QueryParameterSubSchema - (*Response)(nil), // 46: openapi.v2.Response - (*ResponseDefinitions)(nil), // 47: openapi.v2.ResponseDefinitions - (*ResponseValue)(nil), // 48: openapi.v2.ResponseValue - (*Responses)(nil), // 49: openapi.v2.Responses - (*Schema)(nil), // 50: openapi.v2.Schema - (*SchemaItem)(nil), // 51: openapi.v2.SchemaItem - (*SecurityDefinitions)(nil), // 52: openapi.v2.SecurityDefinitions - (*SecurityDefinitionsItem)(nil), // 53: openapi.v2.SecurityDefinitionsItem - (*SecurityRequirement)(nil), // 54: openapi.v2.SecurityRequirement - (*StringArray)(nil), // 55: openapi.v2.StringArray - (*Tag)(nil), // 56: openapi.v2.Tag - (*TypeItem)(nil), // 57: openapi.v2.TypeItem - (*VendorExtension)(nil), // 58: openapi.v2.VendorExtension - (*Xml)(nil), // 59: openapi.v2.Xml - (*anypb.Any)(nil), // 60: google.protobuf.Any -} -var file_openapiv2_OpenAPIv2_proto_depIdxs = []int32{ - 50, // 0: openapi.v2.AdditionalPropertiesItem.schema:type_name -> openapi.v2.Schema - 60, // 1: openapi.v2.Any.value:type_name -> google.protobuf.Any - 20, // 2: openapi.v2.ApiKeySecurity.vendor_extension:type_name -> openapi.v2.NamedAny - 20, // 3: openapi.v2.BasicAuthenticationSecurity.vendor_extension:type_name -> openapi.v2.NamedAny - 50, // 4: openapi.v2.BodyParameter.schema:type_name -> openapi.v2.Schema - 20, // 5: openapi.v2.BodyParameter.vendor_extension:type_name -> openapi.v2.NamedAny - 20, // 6: openapi.v2.Contact.vendor_extension:type_name -> openapi.v2.NamedAny - 20, // 7: openapi.v2.Default.additional_properties:type_name -> openapi.v2.NamedAny - 26, // 8: openapi.v2.Definitions.additional_properties:type_name -> openapi.v2.NamedSchema - 16, // 9: openapi.v2.Document.info:type_name -> openapi.v2.Info - 42, // 10: openapi.v2.Document.paths:type_name -> openapi.v2.Paths - 7, // 11: openapi.v2.Document.definitions:type_name -> openapi.v2.Definitions - 38, // 12: openapi.v2.Document.parameters:type_name -> openapi.v2.ParameterDefinitions - 47, // 13: openapi.v2.Document.responses:type_name -> openapi.v2.ResponseDefinitions - 54, // 14: openapi.v2.Document.security:type_name -> openapi.v2.SecurityRequirement - 52, // 15: openapi.v2.Document.security_definitions:type_name -> openapi.v2.SecurityDefinitions - 56, // 16: openapi.v2.Document.tags:type_name -> openapi.v2.Tag - 10, // 17: openapi.v2.Document.external_docs:type_name -> openapi.v2.ExternalDocs - 20, // 18: openapi.v2.Document.vendor_extension:type_name -> openapi.v2.NamedAny - 20, // 19: openapi.v2.Examples.additional_properties:type_name -> openapi.v2.NamedAny - 20, // 20: openapi.v2.ExternalDocs.vendor_extension:type_name -> openapi.v2.NamedAny - 1, // 21: openapi.v2.FileSchema.default:type_name -> openapi.v2.Any - 10, // 22: openapi.v2.FileSchema.external_docs:type_name -> openapi.v2.ExternalDocs - 1, // 23: openapi.v2.FileSchema.example:type_name -> openapi.v2.Any - 20, // 24: openapi.v2.FileSchema.vendor_extension:type_name -> openapi.v2.NamedAny - 43, // 25: openapi.v2.FormDataParameterSubSchema.items:type_name -> openapi.v2.PrimitivesItems - 1, // 26: openapi.v2.FormDataParameterSubSchema.default:type_name -> openapi.v2.Any - 1, // 27: openapi.v2.FormDataParameterSubSchema.enum:type_name -> openapi.v2.Any - 20, // 28: openapi.v2.FormDataParameterSubSchema.vendor_extension:type_name -> openapi.v2.NamedAny - 43, // 29: openapi.v2.Header.items:type_name -> openapi.v2.PrimitivesItems - 1, // 30: openapi.v2.Header.default:type_name -> openapi.v2.Any - 1, // 31: openapi.v2.Header.enum:type_name -> openapi.v2.Any - 20, // 32: openapi.v2.Header.vendor_extension:type_name -> openapi.v2.NamedAny - 43, // 33: openapi.v2.HeaderParameterSubSchema.items:type_name -> openapi.v2.PrimitivesItems - 1, // 34: openapi.v2.HeaderParameterSubSchema.default:type_name -> openapi.v2.Any - 1, // 35: openapi.v2.HeaderParameterSubSchema.enum:type_name -> openapi.v2.Any - 20, // 36: openapi.v2.HeaderParameterSubSchema.vendor_extension:type_name -> openapi.v2.NamedAny - 21, // 37: openapi.v2.Headers.additional_properties:type_name -> openapi.v2.NamedHeader - 5, // 38: openapi.v2.Info.contact:type_name -> openapi.v2.Contact - 19, // 39: openapi.v2.Info.license:type_name -> openapi.v2.License - 20, // 40: openapi.v2.Info.vendor_extension:type_name -> openapi.v2.NamedAny - 50, // 41: openapi.v2.ItemsItem.schema:type_name -> openapi.v2.Schema - 20, // 42: openapi.v2.License.vendor_extension:type_name -> openapi.v2.NamedAny - 1, // 43: openapi.v2.NamedAny.value:type_name -> openapi.v2.Any - 13, // 44: openapi.v2.NamedHeader.value:type_name -> openapi.v2.Header - 37, // 45: openapi.v2.NamedParameter.value:type_name -> openapi.v2.Parameter - 40, // 46: openapi.v2.NamedPathItem.value:type_name -> openapi.v2.PathItem - 46, // 47: openapi.v2.NamedResponse.value:type_name -> openapi.v2.Response - 48, // 48: openapi.v2.NamedResponseValue.value:type_name -> openapi.v2.ResponseValue - 50, // 49: openapi.v2.NamedSchema.value:type_name -> openapi.v2.Schema - 53, // 50: openapi.v2.NamedSecurityDefinitionsItem.value:type_name -> openapi.v2.SecurityDefinitionsItem - 55, // 51: openapi.v2.NamedStringArray.value:type_name -> openapi.v2.StringArray - 14, // 52: openapi.v2.NonBodyParameter.header_parameter_sub_schema:type_name -> openapi.v2.HeaderParameterSubSchema - 12, // 53: openapi.v2.NonBodyParameter.form_data_parameter_sub_schema:type_name -> openapi.v2.FormDataParameterSubSchema - 45, // 54: openapi.v2.NonBodyParameter.query_parameter_sub_schema:type_name -> openapi.v2.QueryParameterSubSchema - 41, // 55: openapi.v2.NonBodyParameter.path_parameter_sub_schema:type_name -> openapi.v2.PathParameterSubSchema - 35, // 56: openapi.v2.Oauth2AccessCodeSecurity.scopes:type_name -> openapi.v2.Oauth2Scopes - 20, // 57: openapi.v2.Oauth2AccessCodeSecurity.vendor_extension:type_name -> openapi.v2.NamedAny - 35, // 58: openapi.v2.Oauth2ApplicationSecurity.scopes:type_name -> openapi.v2.Oauth2Scopes - 20, // 59: openapi.v2.Oauth2ApplicationSecurity.vendor_extension:type_name -> openapi.v2.NamedAny - 35, // 60: openapi.v2.Oauth2ImplicitSecurity.scopes:type_name -> openapi.v2.Oauth2Scopes - 20, // 61: openapi.v2.Oauth2ImplicitSecurity.vendor_extension:type_name -> openapi.v2.NamedAny - 35, // 62: openapi.v2.Oauth2PasswordSecurity.scopes:type_name -> openapi.v2.Oauth2Scopes - 20, // 63: openapi.v2.Oauth2PasswordSecurity.vendor_extension:type_name -> openapi.v2.NamedAny - 28, // 64: openapi.v2.Oauth2Scopes.additional_properties:type_name -> openapi.v2.NamedString - 10, // 65: openapi.v2.Operation.external_docs:type_name -> openapi.v2.ExternalDocs - 39, // 66: openapi.v2.Operation.parameters:type_name -> openapi.v2.ParametersItem - 49, // 67: openapi.v2.Operation.responses:type_name -> openapi.v2.Responses - 54, // 68: openapi.v2.Operation.security:type_name -> openapi.v2.SecurityRequirement - 20, // 69: openapi.v2.Operation.vendor_extension:type_name -> openapi.v2.NamedAny - 4, // 70: openapi.v2.Parameter.body_parameter:type_name -> openapi.v2.BodyParameter - 30, // 71: openapi.v2.Parameter.non_body_parameter:type_name -> openapi.v2.NonBodyParameter - 22, // 72: openapi.v2.ParameterDefinitions.additional_properties:type_name -> openapi.v2.NamedParameter - 37, // 73: openapi.v2.ParametersItem.parameter:type_name -> openapi.v2.Parameter - 18, // 74: openapi.v2.ParametersItem.json_reference:type_name -> openapi.v2.JsonReference - 36, // 75: openapi.v2.PathItem.get:type_name -> openapi.v2.Operation - 36, // 76: openapi.v2.PathItem.put:type_name -> openapi.v2.Operation - 36, // 77: openapi.v2.PathItem.post:type_name -> openapi.v2.Operation - 36, // 78: openapi.v2.PathItem.delete:type_name -> openapi.v2.Operation - 36, // 79: openapi.v2.PathItem.options:type_name -> openapi.v2.Operation - 36, // 80: openapi.v2.PathItem.head:type_name -> openapi.v2.Operation - 36, // 81: openapi.v2.PathItem.patch:type_name -> openapi.v2.Operation - 39, // 82: openapi.v2.PathItem.parameters:type_name -> openapi.v2.ParametersItem - 20, // 83: openapi.v2.PathItem.vendor_extension:type_name -> openapi.v2.NamedAny - 43, // 84: openapi.v2.PathParameterSubSchema.items:type_name -> openapi.v2.PrimitivesItems - 1, // 85: openapi.v2.PathParameterSubSchema.default:type_name -> openapi.v2.Any - 1, // 86: openapi.v2.PathParameterSubSchema.enum:type_name -> openapi.v2.Any - 20, // 87: openapi.v2.PathParameterSubSchema.vendor_extension:type_name -> openapi.v2.NamedAny - 20, // 88: openapi.v2.Paths.vendor_extension:type_name -> openapi.v2.NamedAny - 23, // 89: openapi.v2.Paths.path:type_name -> openapi.v2.NamedPathItem - 43, // 90: openapi.v2.PrimitivesItems.items:type_name -> openapi.v2.PrimitivesItems - 1, // 91: openapi.v2.PrimitivesItems.default:type_name -> openapi.v2.Any - 1, // 92: openapi.v2.PrimitivesItems.enum:type_name -> openapi.v2.Any - 20, // 93: openapi.v2.PrimitivesItems.vendor_extension:type_name -> openapi.v2.NamedAny - 26, // 94: openapi.v2.Properties.additional_properties:type_name -> openapi.v2.NamedSchema - 43, // 95: openapi.v2.QueryParameterSubSchema.items:type_name -> openapi.v2.PrimitivesItems - 1, // 96: openapi.v2.QueryParameterSubSchema.default:type_name -> openapi.v2.Any - 1, // 97: openapi.v2.QueryParameterSubSchema.enum:type_name -> openapi.v2.Any - 20, // 98: openapi.v2.QueryParameterSubSchema.vendor_extension:type_name -> openapi.v2.NamedAny - 51, // 99: openapi.v2.Response.schema:type_name -> openapi.v2.SchemaItem - 15, // 100: openapi.v2.Response.headers:type_name -> openapi.v2.Headers - 9, // 101: openapi.v2.Response.examples:type_name -> openapi.v2.Examples - 20, // 102: openapi.v2.Response.vendor_extension:type_name -> openapi.v2.NamedAny - 24, // 103: openapi.v2.ResponseDefinitions.additional_properties:type_name -> openapi.v2.NamedResponse - 46, // 104: openapi.v2.ResponseValue.response:type_name -> openapi.v2.Response - 18, // 105: openapi.v2.ResponseValue.json_reference:type_name -> openapi.v2.JsonReference - 25, // 106: openapi.v2.Responses.response_code:type_name -> openapi.v2.NamedResponseValue - 20, // 107: openapi.v2.Responses.vendor_extension:type_name -> openapi.v2.NamedAny - 1, // 108: openapi.v2.Schema.default:type_name -> openapi.v2.Any - 1, // 109: openapi.v2.Schema.enum:type_name -> openapi.v2.Any - 0, // 110: openapi.v2.Schema.additional_properties:type_name -> openapi.v2.AdditionalPropertiesItem - 57, // 111: openapi.v2.Schema.type:type_name -> openapi.v2.TypeItem - 17, // 112: openapi.v2.Schema.items:type_name -> openapi.v2.ItemsItem - 50, // 113: openapi.v2.Schema.all_of:type_name -> openapi.v2.Schema - 44, // 114: openapi.v2.Schema.properties:type_name -> openapi.v2.Properties - 59, // 115: openapi.v2.Schema.xml:type_name -> openapi.v2.Xml - 10, // 116: openapi.v2.Schema.external_docs:type_name -> openapi.v2.ExternalDocs - 1, // 117: openapi.v2.Schema.example:type_name -> openapi.v2.Any - 20, // 118: openapi.v2.Schema.vendor_extension:type_name -> openapi.v2.NamedAny - 50, // 119: openapi.v2.SchemaItem.schema:type_name -> openapi.v2.Schema - 11, // 120: openapi.v2.SchemaItem.file_schema:type_name -> openapi.v2.FileSchema - 27, // 121: openapi.v2.SecurityDefinitions.additional_properties:type_name -> openapi.v2.NamedSecurityDefinitionsItem - 3, // 122: openapi.v2.SecurityDefinitionsItem.basic_authentication_security:type_name -> openapi.v2.BasicAuthenticationSecurity - 2, // 123: openapi.v2.SecurityDefinitionsItem.api_key_security:type_name -> openapi.v2.ApiKeySecurity - 33, // 124: openapi.v2.SecurityDefinitionsItem.oauth2_implicit_security:type_name -> openapi.v2.Oauth2ImplicitSecurity - 34, // 125: openapi.v2.SecurityDefinitionsItem.oauth2_password_security:type_name -> openapi.v2.Oauth2PasswordSecurity - 32, // 126: openapi.v2.SecurityDefinitionsItem.oauth2_application_security:type_name -> openapi.v2.Oauth2ApplicationSecurity - 31, // 127: openapi.v2.SecurityDefinitionsItem.oauth2_access_code_security:type_name -> openapi.v2.Oauth2AccessCodeSecurity - 29, // 128: openapi.v2.SecurityRequirement.additional_properties:type_name -> openapi.v2.NamedStringArray - 10, // 129: openapi.v2.Tag.external_docs:type_name -> openapi.v2.ExternalDocs - 20, // 130: openapi.v2.Tag.vendor_extension:type_name -> openapi.v2.NamedAny - 20, // 131: openapi.v2.VendorExtension.additional_properties:type_name -> openapi.v2.NamedAny - 20, // 132: openapi.v2.Xml.vendor_extension:type_name -> openapi.v2.NamedAny - 133, // [133:133] is the sub-list for method output_type - 133, // [133:133] is the sub-list for method input_type - 133, // [133:133] is the sub-list for extension type_name - 133, // [133:133] is the sub-list for extension extendee - 0, // [0:133] is the sub-list for field type_name -} - -func init() { file_openapiv2_OpenAPIv2_proto_init() } -func file_openapiv2_OpenAPIv2_proto_init() { - if File_openapiv2_OpenAPIv2_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_openapiv2_OpenAPIv2_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AdditionalPropertiesItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Any); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ApiKeySecurity); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BasicAuthenticationSecurity); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BodyParameter); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Contact); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Default); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Definitions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Document); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Examples); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExternalDocs); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FileSchema); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FormDataParameterSubSchema); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Header); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HeaderParameterSubSchema); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Headers); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Info); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ItemsItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*JsonReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*License); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedAny); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedHeader); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedParameter); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedPathItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedResponseValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedSchema); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedSecurityDefinitionsItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedString); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedStringArray); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NonBodyParameter); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Oauth2AccessCodeSecurity); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Oauth2ApplicationSecurity); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Oauth2ImplicitSecurity); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Oauth2PasswordSecurity); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Oauth2Scopes); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Operation); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Parameter); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ParameterDefinitions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ParametersItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PathItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PathParameterSubSchema); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Paths); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PrimitivesItems); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Properties); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QueryParameterSubSchema); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Response); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResponseDefinitions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResponseValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Responses); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Schema); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SchemaItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SecurityDefinitions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SecurityDefinitionsItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SecurityRequirement); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StringArray); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Tag); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TypeItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VendorExtension); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Xml); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[0].OneofWrappers = []interface{}{ - (*AdditionalPropertiesItem_Schema)(nil), - (*AdditionalPropertiesItem_Boolean)(nil), - } - file_openapiv2_OpenAPIv2_proto_msgTypes[30].OneofWrappers = []interface{}{ - (*NonBodyParameter_HeaderParameterSubSchema)(nil), - (*NonBodyParameter_FormDataParameterSubSchema)(nil), - (*NonBodyParameter_QueryParameterSubSchema)(nil), - (*NonBodyParameter_PathParameterSubSchema)(nil), - } - file_openapiv2_OpenAPIv2_proto_msgTypes[37].OneofWrappers = []interface{}{ - (*Parameter_BodyParameter)(nil), - (*Parameter_NonBodyParameter)(nil), - } - file_openapiv2_OpenAPIv2_proto_msgTypes[39].OneofWrappers = []interface{}{ - (*ParametersItem_Parameter)(nil), - (*ParametersItem_JsonReference)(nil), - } - file_openapiv2_OpenAPIv2_proto_msgTypes[48].OneofWrappers = []interface{}{ - (*ResponseValue_Response)(nil), - (*ResponseValue_JsonReference)(nil), - } - file_openapiv2_OpenAPIv2_proto_msgTypes[51].OneofWrappers = []interface{}{ - (*SchemaItem_Schema)(nil), - (*SchemaItem_FileSchema)(nil), - } - file_openapiv2_OpenAPIv2_proto_msgTypes[53].OneofWrappers = []interface{}{ - (*SecurityDefinitionsItem_BasicAuthenticationSecurity)(nil), - (*SecurityDefinitionsItem_ApiKeySecurity)(nil), - (*SecurityDefinitionsItem_Oauth2ImplicitSecurity)(nil), - (*SecurityDefinitionsItem_Oauth2PasswordSecurity)(nil), - (*SecurityDefinitionsItem_Oauth2ApplicationSecurity)(nil), - (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_openapiv2_OpenAPIv2_proto_rawDesc, - NumEnums: 0, - NumMessages: 60, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_openapiv2_OpenAPIv2_proto_goTypes, - DependencyIndexes: file_openapiv2_OpenAPIv2_proto_depIdxs, - MessageInfos: file_openapiv2_OpenAPIv2_proto_msgTypes, - }.Build() - File_openapiv2_OpenAPIv2_proto = out.File - file_openapiv2_OpenAPIv2_proto_rawDesc = nil - file_openapiv2_OpenAPIv2_proto_goTypes = nil - file_openapiv2_OpenAPIv2_proto_depIdxs = nil -} diff --git a/ui/wasm/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.proto b/ui/wasm/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.proto deleted file mode 100644 index 1c59b2f4ae1..00000000000 --- a/ui/wasm/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.proto +++ /dev/null @@ -1,666 +0,0 @@ -// Copyright 2020 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// THIS FILE IS AUTOMATICALLY GENERATED. - -syntax = "proto3"; - -package openapi.v2; - -import "google/protobuf/any.proto"; - -// This option lets the proto compiler generate Java code inside the package -// name (see below) instead of inside an outer class. It creates a simpler -// developer experience by reducing one-level of name nesting and be -// consistent with most programming languages that don't support outer classes. -option java_multiple_files = true; - -// The Java outer classname should be the filename in UpperCamelCase. This -// class is only used to hold proto descriptor, so developers don't need to -// work with it directly. -option java_outer_classname = "OpenAPIProto"; - -// The Java package name must be proto package name with proper prefix. -option java_package = "org.openapi_v2"; - -// A reasonable prefix for the Objective-C symbols generated from the package. -// It should at a minimum be 3 characters long, all uppercase, and convention -// is to use an abbreviation of the package name. Something short, but -// hopefully unique enough to not conflict with things that may come along in -// the future. 'GPB' is reserved for the protocol buffer implementation itself. -option objc_class_prefix = "OAS"; - -// The Go package name. -option go_package = "./openapiv2;openapi_v2"; - -message AdditionalPropertiesItem { - oneof oneof { - Schema schema = 1; - bool boolean = 2; - } -} - -message Any { - google.protobuf.Any value = 1; - string yaml = 2; -} - -message ApiKeySecurity { - string type = 1; - string name = 2; - string in = 3; - string description = 4; - repeated NamedAny vendor_extension = 5; -} - -message BasicAuthenticationSecurity { - string type = 1; - string description = 2; - repeated NamedAny vendor_extension = 3; -} - -message BodyParameter { - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - string description = 1; - // The name of the parameter. - string name = 2; - // Determines the location of the parameter. - string in = 3; - // Determines whether or not this parameter is required or optional. - bool required = 4; - Schema schema = 5; - repeated NamedAny vendor_extension = 6; -} - -// Contact information for the owners of the API. -message Contact { - // The identifying name of the contact person/organization. - string name = 1; - // The URL pointing to the contact information. - string url = 2; - // The email address of the contact person/organization. - string email = 3; - repeated NamedAny vendor_extension = 4; -} - -message Default { - repeated NamedAny additional_properties = 1; -} - -// One or more JSON objects describing the schemas being consumed and produced by the API. -message Definitions { - repeated NamedSchema additional_properties = 1; -} - -message Document { - // The Swagger version of this document. - string swagger = 1; - Info info = 2; - // The host (name or ip) of the API. Example: 'swagger.io' - string host = 3; - // The base path to the API. Example: '/api'. - string base_path = 4; - // The transfer protocol of the API. - repeated string schemes = 5; - // A list of MIME types accepted by the API. - repeated string consumes = 6; - // A list of MIME types the API can produce. - repeated string produces = 7; - Paths paths = 8; - Definitions definitions = 9; - ParameterDefinitions parameters = 10; - ResponseDefinitions responses = 11; - repeated SecurityRequirement security = 12; - SecurityDefinitions security_definitions = 13; - repeated Tag tags = 14; - ExternalDocs external_docs = 15; - repeated NamedAny vendor_extension = 16; -} - -message Examples { - repeated NamedAny additional_properties = 1; -} - -// information about external documentation -message ExternalDocs { - string description = 1; - string url = 2; - repeated NamedAny vendor_extension = 3; -} - -// A deterministic version of a JSON Schema object. -message FileSchema { - string format = 1; - string title = 2; - string description = 3; - Any default = 4; - repeated string required = 5; - string type = 6; - bool read_only = 7; - ExternalDocs external_docs = 8; - Any example = 9; - repeated NamedAny vendor_extension = 10; -} - -message FormDataParameterSubSchema { - // Determines whether or not this parameter is required or optional. - bool required = 1; - // Determines the location of the parameter. - string in = 2; - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - string description = 3; - // The name of the parameter. - string name = 4; - // allows sending a parameter by name only or with an empty value. - bool allow_empty_value = 5; - string type = 6; - string format = 7; - PrimitivesItems items = 8; - string collection_format = 9; - Any default = 10; - double maximum = 11; - bool exclusive_maximum = 12; - double minimum = 13; - bool exclusive_minimum = 14; - int64 max_length = 15; - int64 min_length = 16; - string pattern = 17; - int64 max_items = 18; - int64 min_items = 19; - bool unique_items = 20; - repeated Any enum = 21; - double multiple_of = 22; - repeated NamedAny vendor_extension = 23; -} - -message Header { - string type = 1; - string format = 2; - PrimitivesItems items = 3; - string collection_format = 4; - Any default = 5; - double maximum = 6; - bool exclusive_maximum = 7; - double minimum = 8; - bool exclusive_minimum = 9; - int64 max_length = 10; - int64 min_length = 11; - string pattern = 12; - int64 max_items = 13; - int64 min_items = 14; - bool unique_items = 15; - repeated Any enum = 16; - double multiple_of = 17; - string description = 18; - repeated NamedAny vendor_extension = 19; -} - -message HeaderParameterSubSchema { - // Determines whether or not this parameter is required or optional. - bool required = 1; - // Determines the location of the parameter. - string in = 2; - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - string description = 3; - // The name of the parameter. - string name = 4; - string type = 5; - string format = 6; - PrimitivesItems items = 7; - string collection_format = 8; - Any default = 9; - double maximum = 10; - bool exclusive_maximum = 11; - double minimum = 12; - bool exclusive_minimum = 13; - int64 max_length = 14; - int64 min_length = 15; - string pattern = 16; - int64 max_items = 17; - int64 min_items = 18; - bool unique_items = 19; - repeated Any enum = 20; - double multiple_of = 21; - repeated NamedAny vendor_extension = 22; -} - -message Headers { - repeated NamedHeader additional_properties = 1; -} - -// General information about the API. -message Info { - // A unique and precise title of the API. - string title = 1; - // A semantic version number of the API. - string version = 2; - // A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed. - string description = 3; - // The terms of service for the API. - string terms_of_service = 4; - Contact contact = 5; - License license = 6; - repeated NamedAny vendor_extension = 7; -} - -message ItemsItem { - repeated Schema schema = 1; -} - -message JsonReference { - string _ref = 1; - string description = 2; -} - -message License { - // The name of the license type. It's encouraged to use an OSI compatible license. - string name = 1; - // The URL pointing to the license. - string url = 2; - repeated NamedAny vendor_extension = 3; -} - -// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs. -message NamedAny { - // Map key - string name = 1; - // Mapped value - Any value = 2; -} - -// Automatically-generated message used to represent maps of Header as ordered (name,value) pairs. -message NamedHeader { - // Map key - string name = 1; - // Mapped value - Header value = 2; -} - -// Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs. -message NamedParameter { - // Map key - string name = 1; - // Mapped value - Parameter value = 2; -} - -// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs. -message NamedPathItem { - // Map key - string name = 1; - // Mapped value - PathItem value = 2; -} - -// Automatically-generated message used to represent maps of Response as ordered (name,value) pairs. -message NamedResponse { - // Map key - string name = 1; - // Mapped value - Response value = 2; -} - -// Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs. -message NamedResponseValue { - // Map key - string name = 1; - // Mapped value - ResponseValue value = 2; -} - -// Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs. -message NamedSchema { - // Map key - string name = 1; - // Mapped value - Schema value = 2; -} - -// Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs. -message NamedSecurityDefinitionsItem { - // Map key - string name = 1; - // Mapped value - SecurityDefinitionsItem value = 2; -} - -// Automatically-generated message used to represent maps of string as ordered (name,value) pairs. -message NamedString { - // Map key - string name = 1; - // Mapped value - string value = 2; -} - -// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs. -message NamedStringArray { - // Map key - string name = 1; - // Mapped value - StringArray value = 2; -} - -message NonBodyParameter { - oneof oneof { - HeaderParameterSubSchema header_parameter_sub_schema = 1; - FormDataParameterSubSchema form_data_parameter_sub_schema = 2; - QueryParameterSubSchema query_parameter_sub_schema = 3; - PathParameterSubSchema path_parameter_sub_schema = 4; - } -} - -message Oauth2AccessCodeSecurity { - string type = 1; - string flow = 2; - Oauth2Scopes scopes = 3; - string authorization_url = 4; - string token_url = 5; - string description = 6; - repeated NamedAny vendor_extension = 7; -} - -message Oauth2ApplicationSecurity { - string type = 1; - string flow = 2; - Oauth2Scopes scopes = 3; - string token_url = 4; - string description = 5; - repeated NamedAny vendor_extension = 6; -} - -message Oauth2ImplicitSecurity { - string type = 1; - string flow = 2; - Oauth2Scopes scopes = 3; - string authorization_url = 4; - string description = 5; - repeated NamedAny vendor_extension = 6; -} - -message Oauth2PasswordSecurity { - string type = 1; - string flow = 2; - Oauth2Scopes scopes = 3; - string token_url = 4; - string description = 5; - repeated NamedAny vendor_extension = 6; -} - -message Oauth2Scopes { - repeated NamedString additional_properties = 1; -} - -message Operation { - repeated string tags = 1; - // A brief summary of the operation. - string summary = 2; - // A longer description of the operation, GitHub Flavored Markdown is allowed. - string description = 3; - ExternalDocs external_docs = 4; - // A unique identifier of the operation. - string operation_id = 5; - // A list of MIME types the API can produce. - repeated string produces = 6; - // A list of MIME types the API can consume. - repeated string consumes = 7; - // The parameters needed to send a valid API call. - repeated ParametersItem parameters = 8; - Responses responses = 9; - // The transfer protocol of the API. - repeated string schemes = 10; - bool deprecated = 11; - repeated SecurityRequirement security = 12; - repeated NamedAny vendor_extension = 13; -} - -message Parameter { - oneof oneof { - BodyParameter body_parameter = 1; - NonBodyParameter non_body_parameter = 2; - } -} - -// One or more JSON representations for parameters -message ParameterDefinitions { - repeated NamedParameter additional_properties = 1; -} - -message ParametersItem { - oneof oneof { - Parameter parameter = 1; - JsonReference json_reference = 2; - } -} - -message PathItem { - string _ref = 1; - Operation get = 2; - Operation put = 3; - Operation post = 4; - Operation delete = 5; - Operation options = 6; - Operation head = 7; - Operation patch = 8; - // The parameters needed to send a valid API call. - repeated ParametersItem parameters = 9; - repeated NamedAny vendor_extension = 10; -} - -message PathParameterSubSchema { - // Determines whether or not this parameter is required or optional. - bool required = 1; - // Determines the location of the parameter. - string in = 2; - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - string description = 3; - // The name of the parameter. - string name = 4; - string type = 5; - string format = 6; - PrimitivesItems items = 7; - string collection_format = 8; - Any default = 9; - double maximum = 10; - bool exclusive_maximum = 11; - double minimum = 12; - bool exclusive_minimum = 13; - int64 max_length = 14; - int64 min_length = 15; - string pattern = 16; - int64 max_items = 17; - int64 min_items = 18; - bool unique_items = 19; - repeated Any enum = 20; - double multiple_of = 21; - repeated NamedAny vendor_extension = 22; -} - -// Relative paths to the individual endpoints. They must be relative to the 'basePath'. -message Paths { - repeated NamedAny vendor_extension = 1; - repeated NamedPathItem path = 2; -} - -message PrimitivesItems { - string type = 1; - string format = 2; - PrimitivesItems items = 3; - string collection_format = 4; - Any default = 5; - double maximum = 6; - bool exclusive_maximum = 7; - double minimum = 8; - bool exclusive_minimum = 9; - int64 max_length = 10; - int64 min_length = 11; - string pattern = 12; - int64 max_items = 13; - int64 min_items = 14; - bool unique_items = 15; - repeated Any enum = 16; - double multiple_of = 17; - repeated NamedAny vendor_extension = 18; -} - -message Properties { - repeated NamedSchema additional_properties = 1; -} - -message QueryParameterSubSchema { - // Determines whether or not this parameter is required or optional. - bool required = 1; - // Determines the location of the parameter. - string in = 2; - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - string description = 3; - // The name of the parameter. - string name = 4; - // allows sending a parameter by name only or with an empty value. - bool allow_empty_value = 5; - string type = 6; - string format = 7; - PrimitivesItems items = 8; - string collection_format = 9; - Any default = 10; - double maximum = 11; - bool exclusive_maximum = 12; - double minimum = 13; - bool exclusive_minimum = 14; - int64 max_length = 15; - int64 min_length = 16; - string pattern = 17; - int64 max_items = 18; - int64 min_items = 19; - bool unique_items = 20; - repeated Any enum = 21; - double multiple_of = 22; - repeated NamedAny vendor_extension = 23; -} - -message Response { - string description = 1; - SchemaItem schema = 2; - Headers headers = 3; - Examples examples = 4; - repeated NamedAny vendor_extension = 5; -} - -// One or more JSON representations for responses -message ResponseDefinitions { - repeated NamedResponse additional_properties = 1; -} - -message ResponseValue { - oneof oneof { - Response response = 1; - JsonReference json_reference = 2; - } -} - -// Response objects names can either be any valid HTTP status code or 'default'. -message Responses { - repeated NamedResponseValue response_code = 1; - repeated NamedAny vendor_extension = 2; -} - -// A deterministic version of a JSON Schema object. -message Schema { - string _ref = 1; - string format = 2; - string title = 3; - string description = 4; - Any default = 5; - double multiple_of = 6; - double maximum = 7; - bool exclusive_maximum = 8; - double minimum = 9; - bool exclusive_minimum = 10; - int64 max_length = 11; - int64 min_length = 12; - string pattern = 13; - int64 max_items = 14; - int64 min_items = 15; - bool unique_items = 16; - int64 max_properties = 17; - int64 min_properties = 18; - repeated string required = 19; - repeated Any enum = 20; - AdditionalPropertiesItem additional_properties = 21; - TypeItem type = 22; - ItemsItem items = 23; - repeated Schema all_of = 24; - Properties properties = 25; - string discriminator = 26; - bool read_only = 27; - Xml xml = 28; - ExternalDocs external_docs = 29; - Any example = 30; - repeated NamedAny vendor_extension = 31; -} - -message SchemaItem { - oneof oneof { - Schema schema = 1; - FileSchema file_schema = 2; - } -} - -message SecurityDefinitions { - repeated NamedSecurityDefinitionsItem additional_properties = 1; -} - -message SecurityDefinitionsItem { - oneof oneof { - BasicAuthenticationSecurity basic_authentication_security = 1; - ApiKeySecurity api_key_security = 2; - Oauth2ImplicitSecurity oauth2_implicit_security = 3; - Oauth2PasswordSecurity oauth2_password_security = 4; - Oauth2ApplicationSecurity oauth2_application_security = 5; - Oauth2AccessCodeSecurity oauth2_access_code_security = 6; - } -} - -message SecurityRequirement { - repeated NamedStringArray additional_properties = 1; -} - -message StringArray { - repeated string value = 1; -} - -message Tag { - string name = 1; - string description = 2; - ExternalDocs external_docs = 3; - repeated NamedAny vendor_extension = 4; -} - -message TypeItem { - repeated string value = 1; -} - -// Any property starting with x- is valid. -message VendorExtension { - repeated NamedAny additional_properties = 1; -} - -message Xml { - string name = 1; - string namespace = 2; - string prefix = 3; - bool attribute = 4; - bool wrapped = 5; - repeated NamedAny vendor_extension = 6; -} - diff --git a/ui/wasm/vendor/github.com/googleapis/gnostic/openapiv2/README.md b/ui/wasm/vendor/github.com/googleapis/gnostic/openapiv2/README.md deleted file mode 100644 index 5276128d3b8..00000000000 --- a/ui/wasm/vendor/github.com/googleapis/gnostic/openapiv2/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# OpenAPI v2 Protocol Buffer Models - -This directory contains a Protocol Buffer-language model and related code for -supporting OpenAPI v2. - -Gnostic applications and plugins can use OpenAPIv2.proto to generate Protocol -Buffer support code for their preferred languages. - -OpenAPIv2.go is used by Gnostic to read JSON and YAML OpenAPI descriptions into -the Protocol Buffer-based datastructures generated from OpenAPIv2.proto. - -OpenAPIv2.proto and OpenAPIv2.go are generated by the Gnostic compiler -generator, and OpenAPIv2.pb.go is generated by protoc, the Protocol Buffer -compiler, and protoc-gen-go, the Protocol Buffer Go code generation plugin. diff --git a/ui/wasm/vendor/github.com/googleapis/gnostic/openapiv2/document.go b/ui/wasm/vendor/github.com/googleapis/gnostic/openapiv2/document.go deleted file mode 100644 index 56e5966b4cb..00000000000 --- a/ui/wasm/vendor/github.com/googleapis/gnostic/openapiv2/document.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2020 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package openapi_v2 - -import ( - "github.com/googleapis/gnostic/compiler" - "gopkg.in/yaml.v3" -) - -// ParseDocument reads an OpenAPI v2 description from a YAML/JSON representation. -func ParseDocument(b []byte) (*Document, error) { - info, err := compiler.ReadInfoFromBytes("", b) - if err != nil { - return nil, err - } - root := info.Content[0] - return NewDocument(root, compiler.NewContextWithExtensions("$root", root, nil, nil)) -} - -// YAMLValue produces a serialized YAML representation of the document. -func (d *Document) YAMLValue(comment string) ([]byte, error) { - rawInfo := d.ToRawInfo() - rawInfo = &yaml.Node{ - Kind: yaml.DocumentNode, - Content: []*yaml.Node{rawInfo}, - HeadComment: comment, - } - return yaml.Marshal(rawInfo) -} diff --git a/ui/wasm/vendor/github.com/googleapis/gnostic/openapiv2/openapi-2.0.json b/ui/wasm/vendor/github.com/googleapis/gnostic/openapiv2/openapi-2.0.json deleted file mode 100644 index afa12b79b8f..00000000000 --- a/ui/wasm/vendor/github.com/googleapis/gnostic/openapiv2/openapi-2.0.json +++ /dev/null @@ -1,1610 +0,0 @@ -{ - "title": "A JSON Schema for Swagger 2.0 API.", - "id": "http://swagger.io/v2/schema.json#", - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "required": [ - "swagger", - "info", - "paths" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "swagger": { - "type": "string", - "enum": [ - "2.0" - ], - "description": "The Swagger version of this document." - }, - "info": { - "$ref": "#/definitions/info" - }, - "host": { - "type": "string", - "pattern": "^[^{}/ :\\\\]+(?::\\d+)?$", - "description": "The host (name or ip) of the API. Example: 'swagger.io'" - }, - "basePath": { - "type": "string", - "pattern": "^/", - "description": "The base path to the API. Example: '/api'." - }, - "schemes": { - "$ref": "#/definitions/schemesList" - }, - "consumes": { - "description": "A list of MIME types accepted by the API.", - "allOf": [ - { - "$ref": "#/definitions/mediaTypeList" - } - ] - }, - "produces": { - "description": "A list of MIME types the API can produce.", - "allOf": [ - { - "$ref": "#/definitions/mediaTypeList" - } - ] - }, - "paths": { - "$ref": "#/definitions/paths" - }, - "definitions": { - "$ref": "#/definitions/definitions" - }, - "parameters": { - "$ref": "#/definitions/parameterDefinitions" - }, - "responses": { - "$ref": "#/definitions/responseDefinitions" - }, - "security": { - "$ref": "#/definitions/security" - }, - "securityDefinitions": { - "$ref": "#/definitions/securityDefinitions" - }, - "tags": { - "type": "array", - "items": { - "$ref": "#/definitions/tag" - }, - "uniqueItems": true - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - } - }, - "definitions": { - "info": { - "type": "object", - "description": "General information about the API.", - "required": [ - "version", - "title" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "title": { - "type": "string", - "description": "A unique and precise title of the API." - }, - "version": { - "type": "string", - "description": "A semantic version number of the API." - }, - "description": { - "type": "string", - "description": "A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed." - }, - "termsOfService": { - "type": "string", - "description": "The terms of service for the API." - }, - "contact": { - "$ref": "#/definitions/contact" - }, - "license": { - "$ref": "#/definitions/license" - } - } - }, - "contact": { - "type": "object", - "description": "Contact information for the owners of the API.", - "additionalProperties": false, - "properties": { - "name": { - "type": "string", - "description": "The identifying name of the contact person/organization." - }, - "url": { - "type": "string", - "description": "The URL pointing to the contact information.", - "format": "uri" - }, - "email": { - "type": "string", - "description": "The email address of the contact person/organization.", - "format": "email" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "license": { - "type": "object", - "required": [ - "name" - ], - "additionalProperties": false, - "properties": { - "name": { - "type": "string", - "description": "The name of the license type. It's encouraged to use an OSI compatible license." - }, - "url": { - "type": "string", - "description": "The URL pointing to the license.", - "format": "uri" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "paths": { - "type": "object", - "description": "Relative paths to the individual endpoints. They must be relative to the 'basePath'.", - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - }, - "^/": { - "$ref": "#/definitions/pathItem" - } - }, - "additionalProperties": false - }, - "definitions": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/schema" - }, - "description": "One or more JSON objects describing the schemas being consumed and produced by the API." - }, - "parameterDefinitions": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/parameter" - }, - "description": "One or more JSON representations for parameters" - }, - "responseDefinitions": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/response" - }, - "description": "One or more JSON representations for responses" - }, - "externalDocs": { - "type": "object", - "additionalProperties": false, - "description": "information about external documentation", - "required": [ - "url" - ], - "properties": { - "description": { - "type": "string" - }, - "url": { - "type": "string", - "format": "uri" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "examples": { - "type": "object", - "additionalProperties": true - }, - "mimeType": { - "type": "string", - "description": "The MIME type of the HTTP message." - }, - "operation": { - "type": "object", - "required": [ - "responses" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "tags": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - }, - "summary": { - "type": "string", - "description": "A brief summary of the operation." - }, - "description": { - "type": "string", - "description": "A longer description of the operation, GitHub Flavored Markdown is allowed." - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - }, - "operationId": { - "type": "string", - "description": "A unique identifier of the operation." - }, - "produces": { - "description": "A list of MIME types the API can produce.", - "allOf": [ - { - "$ref": "#/definitions/mediaTypeList" - } - ] - }, - "consumes": { - "description": "A list of MIME types the API can consume.", - "allOf": [ - { - "$ref": "#/definitions/mediaTypeList" - } - ] - }, - "parameters": { - "$ref": "#/definitions/parametersList" - }, - "responses": { - "$ref": "#/definitions/responses" - }, - "schemes": { - "$ref": "#/definitions/schemesList" - }, - "deprecated": { - "type": "boolean", - "default": false - }, - "security": { - "$ref": "#/definitions/security" - } - } - }, - "pathItem": { - "type": "object", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "$ref": { - "type": "string" - }, - "get": { - "$ref": "#/definitions/operation" - }, - "put": { - "$ref": "#/definitions/operation" - }, - "post": { - "$ref": "#/definitions/operation" - }, - "delete": { - "$ref": "#/definitions/operation" - }, - "options": { - "$ref": "#/definitions/operation" - }, - "head": { - "$ref": "#/definitions/operation" - }, - "patch": { - "$ref": "#/definitions/operation" - }, - "parameters": { - "$ref": "#/definitions/parametersList" - } - } - }, - "responses": { - "type": "object", - "description": "Response objects names can either be any valid HTTP status code or 'default'.", - "minProperties": 1, - "additionalProperties": false, - "patternProperties": { - "^([0-9]{3})$|^(default)$": { - "$ref": "#/definitions/responseValue" - }, - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "not": { - "type": "object", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - } - }, - "responseValue": { - "oneOf": [ - { - "$ref": "#/definitions/response" - }, - { - "$ref": "#/definitions/jsonReference" - } - ] - }, - "response": { - "type": "object", - "required": [ - "description" - ], - "properties": { - "description": { - "type": "string" - }, - "schema": { - "oneOf": [ - { - "$ref": "#/definitions/schema" - }, - { - "$ref": "#/definitions/fileSchema" - } - ] - }, - "headers": { - "$ref": "#/definitions/headers" - }, - "examples": { - "$ref": "#/definitions/examples" - } - }, - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "headers": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/header" - } - }, - "header": { - "type": "object", - "additionalProperties": false, - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "string", - "number", - "integer", - "boolean", - "array" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormat" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "vendorExtension": { - "description": "Any property starting with x- is valid.", - "additionalProperties": true, - "additionalItems": true - }, - "bodyParameter": { - "type": "object", - "required": [ - "name", - "in", - "schema" - ], - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "description": { - "type": "string", - "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." - }, - "name": { - "type": "string", - "description": "The name of the parameter." - }, - "in": { - "type": "string", - "description": "Determines the location of the parameter.", - "enum": [ - "body" - ] - }, - "required": { - "type": "boolean", - "description": "Determines whether or not this parameter is required or optional.", - "default": false - }, - "schema": { - "$ref": "#/definitions/schema" - } - }, - "additionalProperties": false - }, - "headerParameterSubSchema": { - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "required": { - "type": "boolean", - "description": "Determines whether or not this parameter is required or optional.", - "default": false - }, - "in": { - "type": "string", - "description": "Determines the location of the parameter.", - "enum": [ - "header" - ] - }, - "description": { - "type": "string", - "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." - }, - "name": { - "type": "string", - "description": "The name of the parameter." - }, - "type": { - "type": "string", - "enum": [ - "string", - "number", - "boolean", - "integer", - "array" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormat" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - } - } - }, - "queryParameterSubSchema": { - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "required": { - "type": "boolean", - "description": "Determines whether or not this parameter is required or optional.", - "default": false - }, - "in": { - "type": "string", - "description": "Determines the location of the parameter.", - "enum": [ - "query" - ] - }, - "description": { - "type": "string", - "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." - }, - "name": { - "type": "string", - "description": "The name of the parameter." - }, - "allowEmptyValue": { - "type": "boolean", - "default": false, - "description": "allows sending a parameter by name only or with an empty value." - }, - "type": { - "type": "string", - "enum": [ - "string", - "number", - "boolean", - "integer", - "array" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormatWithMulti" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - } - } - }, - "formDataParameterSubSchema": { - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "required": { - "type": "boolean", - "description": "Determines whether or not this parameter is required or optional.", - "default": false - }, - "in": { - "type": "string", - "description": "Determines the location of the parameter.", - "enum": [ - "formData" - ] - }, - "description": { - "type": "string", - "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." - }, - "name": { - "type": "string", - "description": "The name of the parameter." - }, - "allowEmptyValue": { - "type": "boolean", - "default": false, - "description": "allows sending a parameter by name only or with an empty value." - }, - "type": { - "type": "string", - "enum": [ - "string", - "number", - "boolean", - "integer", - "array", - "file" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormatWithMulti" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - } - } - }, - "pathParameterSubSchema": { - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "required": [ - "required" - ], - "properties": { - "required": { - "type": "boolean", - "enum": [ - true - ], - "description": "Determines whether or not this parameter is required or optional." - }, - "in": { - "type": "string", - "description": "Determines the location of the parameter.", - "enum": [ - "path" - ] - }, - "description": { - "type": "string", - "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." - }, - "name": { - "type": "string", - "description": "The name of the parameter." - }, - "type": { - "type": "string", - "enum": [ - "string", - "number", - "boolean", - "integer", - "array" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormat" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - } - } - }, - "nonBodyParameter": { - "type": "object", - "required": [ - "name", - "in", - "type" - ], - "oneOf": [ - { - "$ref": "#/definitions/headerParameterSubSchema" - }, - { - "$ref": "#/definitions/formDataParameterSubSchema" - }, - { - "$ref": "#/definitions/queryParameterSubSchema" - }, - { - "$ref": "#/definitions/pathParameterSubSchema" - } - ] - }, - "parameter": { - "oneOf": [ - { - "$ref": "#/definitions/bodyParameter" - }, - { - "$ref": "#/definitions/nonBodyParameter" - } - ] - }, - "schema": { - "type": "object", - "description": "A deterministic version of a JSON Schema object.", - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "$ref": { - "type": "string" - }, - "format": { - "type": "string" - }, - "title": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/title" - }, - "description": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/description" - }, - "default": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/default" - }, - "multipleOf": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" - }, - "maximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" - }, - "exclusiveMaximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" - }, - "minimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" - }, - "exclusiveMinimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" - }, - "maxLength": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" - }, - "minLength": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" - }, - "pattern": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" - }, - "maxItems": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" - }, - "minItems": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" - }, - "uniqueItems": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" - }, - "maxProperties": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" - }, - "minProperties": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" - }, - "required": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" - }, - "enum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" - }, - "additionalProperties": { - "oneOf": [ - { - "$ref": "#/definitions/schema" - }, - { - "type": "boolean" - } - ], - "default": {} - }, - "type": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/type" - }, - "items": { - "anyOf": [ - { - "$ref": "#/definitions/schema" - }, - { - "type": "array", - "minItems": 1, - "items": { - "$ref": "#/definitions/schema" - } - } - ], - "default": {} - }, - "allOf": { - "type": "array", - "minItems": 1, - "items": { - "$ref": "#/definitions/schema" - } - }, - "properties": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/schema" - }, - "default": {} - }, - "discriminator": { - "type": "string" - }, - "readOnly": { - "type": "boolean", - "default": false - }, - "xml": { - "$ref": "#/definitions/xml" - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - }, - "example": {} - }, - "additionalProperties": false - }, - "fileSchema": { - "type": "object", - "description": "A deterministic version of a JSON Schema object.", - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "required": [ - "type" - ], - "properties": { - "format": { - "type": "string" - }, - "title": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/title" - }, - "description": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/description" - }, - "default": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/default" - }, - "required": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" - }, - "type": { - "type": "string", - "enum": [ - "file" - ] - }, - "readOnly": { - "type": "boolean", - "default": false - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - }, - "example": {} - }, - "additionalProperties": false - }, - "primitivesItems": { - "type": "object", - "additionalProperties": false, - "properties": { - "type": { - "type": "string", - "enum": [ - "string", - "number", - "integer", - "boolean", - "array" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormat" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "security": { - "type": "array", - "items": { - "$ref": "#/definitions/securityRequirement" - }, - "uniqueItems": true - }, - "securityRequirement": { - "type": "object", - "additionalProperties": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - } - }, - "xml": { - "type": "object", - "additionalProperties": false, - "properties": { - "name": { - "type": "string" - }, - "namespace": { - "type": "string" - }, - "prefix": { - "type": "string" - }, - "attribute": { - "type": "boolean", - "default": false - }, - "wrapped": { - "type": "boolean", - "default": false - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "tag": { - "type": "object", - "additionalProperties": false, - "required": [ - "name" - ], - "properties": { - "name": { - "type": "string" - }, - "description": { - "type": "string" - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "securityDefinitions": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "$ref": "#/definitions/basicAuthenticationSecurity" - }, - { - "$ref": "#/definitions/apiKeySecurity" - }, - { - "$ref": "#/definitions/oauth2ImplicitSecurity" - }, - { - "$ref": "#/definitions/oauth2PasswordSecurity" - }, - { - "$ref": "#/definitions/oauth2ApplicationSecurity" - }, - { - "$ref": "#/definitions/oauth2AccessCodeSecurity" - } - ] - } - }, - "basicAuthenticationSecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "basic" - ] - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "apiKeySecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type", - "name", - "in" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "apiKey" - ] - }, - "name": { - "type": "string" - }, - "in": { - "type": "string", - "enum": [ - "header", - "query" - ] - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "oauth2ImplicitSecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type", - "flow", - "authorizationUrl" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "oauth2" - ] - }, - "flow": { - "type": "string", - "enum": [ - "implicit" - ] - }, - "scopes": { - "$ref": "#/definitions/oauth2Scopes" - }, - "authorizationUrl": { - "type": "string", - "format": "uri" - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "oauth2PasswordSecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type", - "flow", - "tokenUrl" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "oauth2" - ] - }, - "flow": { - "type": "string", - "enum": [ - "password" - ] - }, - "scopes": { - "$ref": "#/definitions/oauth2Scopes" - }, - "tokenUrl": { - "type": "string", - "format": "uri" - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "oauth2ApplicationSecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type", - "flow", - "tokenUrl" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "oauth2" - ] - }, - "flow": { - "type": "string", - "enum": [ - "application" - ] - }, - "scopes": { - "$ref": "#/definitions/oauth2Scopes" - }, - "tokenUrl": { - "type": "string", - "format": "uri" - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "oauth2AccessCodeSecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type", - "flow", - "authorizationUrl", - "tokenUrl" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "oauth2" - ] - }, - "flow": { - "type": "string", - "enum": [ - "accessCode" - ] - }, - "scopes": { - "$ref": "#/definitions/oauth2Scopes" - }, - "authorizationUrl": { - "type": "string", - "format": "uri" - }, - "tokenUrl": { - "type": "string", - "format": "uri" - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "oauth2Scopes": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "mediaTypeList": { - "type": "array", - "items": { - "$ref": "#/definitions/mimeType" - }, - "uniqueItems": true - }, - "parametersList": { - "type": "array", - "description": "The parameters needed to send a valid API call.", - "additionalItems": false, - "items": { - "oneOf": [ - { - "$ref": "#/definitions/parameter" - }, - { - "$ref": "#/definitions/jsonReference" - } - ] - }, - "uniqueItems": true - }, - "schemesList": { - "type": "array", - "description": "The transfer protocol of the API.", - "items": { - "type": "string", - "enum": [ - "http", - "https", - "ws", - "wss" - ] - }, - "uniqueItems": true - }, - "collectionFormat": { - "type": "string", - "enum": [ - "csv", - "ssv", - "tsv", - "pipes" - ], - "default": "csv" - }, - "collectionFormatWithMulti": { - "type": "string", - "enum": [ - "csv", - "ssv", - "tsv", - "pipes", - "multi" - ], - "default": "csv" - }, - "title": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/title" - }, - "description": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/description" - }, - "default": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/default" - }, - "multipleOf": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" - }, - "maximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" - }, - "exclusiveMaximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" - }, - "minimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" - }, - "exclusiveMinimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" - }, - "maxLength": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" - }, - "minLength": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" - }, - "pattern": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" - }, - "maxItems": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" - }, - "minItems": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" - }, - "uniqueItems": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" - }, - "enum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" - }, - "jsonReference": { - "type": "object", - "required": [ - "$ref" - ], - "additionalProperties": false, - "properties": { - "$ref": { - "type": "string" - }, - "description": { - "type": "string" - } - } - } - } -} \ No newline at end of file diff --git a/ui/wasm/vendor/github.com/gorilla/mux/AUTHORS b/ui/wasm/vendor/github.com/gorilla/mux/AUTHORS deleted file mode 100644 index b722392ee59..00000000000 --- a/ui/wasm/vendor/github.com/gorilla/mux/AUTHORS +++ /dev/null @@ -1,8 +0,0 @@ -# This is the official list of gorilla/mux authors for copyright purposes. -# -# Please keep the list sorted. - -Google LLC (https://opensource.google.com/) -Kamil Kisielk -Matt Silverlock -Rodrigo Moraes (https://github.com/moraes) diff --git a/ui/wasm/vendor/github.com/gorilla/mux/LICENSE b/ui/wasm/vendor/github.com/gorilla/mux/LICENSE deleted file mode 100644 index 6903df6386e..00000000000 --- a/ui/wasm/vendor/github.com/gorilla/mux/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012-2018 The Gorilla Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/ui/wasm/vendor/github.com/gorilla/mux/README.md b/ui/wasm/vendor/github.com/gorilla/mux/README.md deleted file mode 100644 index 35eea9f1063..00000000000 --- a/ui/wasm/vendor/github.com/gorilla/mux/README.md +++ /dev/null @@ -1,805 +0,0 @@ -# gorilla/mux - -[![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux) -[![CircleCI](https://circleci.com/gh/gorilla/mux.svg?style=svg)](https://circleci.com/gh/gorilla/mux) -[![Sourcegraph](https://sourcegraph.com/github.com/gorilla/mux/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/mux?badge) - -![Gorilla Logo](https://cloud-cdn.questionable.services/gorilla-icon-64.png) - -https://www.gorillatoolkit.org/pkg/mux - -Package `gorilla/mux` implements a request router and dispatcher for matching incoming requests to -their respective handler. - -The name mux stands for "HTTP request multiplexer". Like the standard `http.ServeMux`, `mux.Router` matches incoming requests against a list of registered routes and calls a handler for the route that matches the URL or other conditions. The main features are: - -* It implements the `http.Handler` interface so it is compatible with the standard `http.ServeMux`. -* Requests can be matched based on URL host, path, path prefix, schemes, header and query values, HTTP methods or using custom matchers. -* URL hosts, paths and query values can have variables with an optional regular expression. -* Registered URLs can be built, or "reversed", which helps maintaining references to resources. -* Routes can be used as subrouters: nested routes are only tested if the parent route matches. This is useful to define groups of routes that share common conditions like a host, a path prefix or other repeated attributes. As a bonus, this optimizes request matching. - ---- - -* [Install](#install) -* [Examples](#examples) -* [Matching Routes](#matching-routes) -* [Static Files](#static-files) -* [Serving Single Page Applications](#serving-single-page-applications) (e.g. React, Vue, Ember.js, etc.) -* [Registered URLs](#registered-urls) -* [Walking Routes](#walking-routes) -* [Graceful Shutdown](#graceful-shutdown) -* [Middleware](#middleware) -* [Handling CORS Requests](#handling-cors-requests) -* [Testing Handlers](#testing-handlers) -* [Full Example](#full-example) - ---- - -## Install - -With a [correctly configured](https://golang.org/doc/install#testing) Go toolchain: - -```sh -go get -u github.com/gorilla/mux -``` - -## Examples - -Let's start registering a couple of URL paths and handlers: - -```go -func main() { - r := mux.NewRouter() - r.HandleFunc("/", HomeHandler) - r.HandleFunc("/products", ProductsHandler) - r.HandleFunc("/articles", ArticlesHandler) - http.Handle("/", r) -} -``` - -Here we register three routes mapping URL paths to handlers. This is equivalent to how `http.HandleFunc()` works: if an incoming request URL matches one of the paths, the corresponding handler is called passing (`http.ResponseWriter`, `*http.Request`) as parameters. - -Paths can have variables. They are defined using the format `{name}` or `{name:pattern}`. If a regular expression pattern is not defined, the matched variable will be anything until the next slash. For example: - -```go -r := mux.NewRouter() -r.HandleFunc("/products/{key}", ProductHandler) -r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) -r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) -``` - -The names are used to create a map of route variables which can be retrieved calling `mux.Vars()`: - -```go -func ArticlesCategoryHandler(w http.ResponseWriter, r *http.Request) { - vars := mux.Vars(r) - w.WriteHeader(http.StatusOK) - fmt.Fprintf(w, "Category: %v\n", vars["category"]) -} -``` - -And this is all you need to know about the basic usage. More advanced options are explained below. - -### Matching Routes - -Routes can also be restricted to a domain or subdomain. Just define a host pattern to be matched. They can also have variables: - -```go -r := mux.NewRouter() -// Only matches if domain is "www.example.com". -r.Host("www.example.com") -// Matches a dynamic subdomain. -r.Host("{subdomain:[a-z]+}.example.com") -``` - -There are several other matchers that can be added. To match path prefixes: - -```go -r.PathPrefix("/products/") -``` - -...or HTTP methods: - -```go -r.Methods("GET", "POST") -``` - -...or URL schemes: - -```go -r.Schemes("https") -``` - -...or header values: - -```go -r.Headers("X-Requested-With", "XMLHttpRequest") -``` - -...or query values: - -```go -r.Queries("key", "value") -``` - -...or to use a custom matcher function: - -```go -r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { - return r.ProtoMajor == 0 -}) -``` - -...and finally, it is possible to combine several matchers in a single route: - -```go -r.HandleFunc("/products", ProductsHandler). - Host("www.example.com"). - Methods("GET"). - Schemes("http") -``` - -Routes are tested in the order they were added to the router. If two routes match, the first one wins: - -```go -r := mux.NewRouter() -r.HandleFunc("/specific", specificHandler) -r.PathPrefix("/").Handler(catchAllHandler) -``` - -Setting the same matching conditions again and again can be boring, so we have a way to group several routes that share the same requirements. We call it "subrouting". - -For example, let's say we have several URLs that should only match when the host is `www.example.com`. Create a route for that host and get a "subrouter" from it: - -```go -r := mux.NewRouter() -s := r.Host("www.example.com").Subrouter() -``` - -Then register routes in the subrouter: - -```go -s.HandleFunc("/products/", ProductsHandler) -s.HandleFunc("/products/{key}", ProductHandler) -s.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) -``` - -The three URL paths we registered above will only be tested if the domain is `www.example.com`, because the subrouter is tested first. This is not only convenient, but also optimizes request matching. You can create subrouters combining any attribute matchers accepted by a route. - -Subrouters can be used to create domain or path "namespaces": you define subrouters in a central place and then parts of the app can register its paths relatively to a given subrouter. - -There's one more thing about subroutes. When a subrouter has a path prefix, the inner routes use it as base for their paths: - -```go -r := mux.NewRouter() -s := r.PathPrefix("/products").Subrouter() -// "/products/" -s.HandleFunc("/", ProductsHandler) -// "/products/{key}/" -s.HandleFunc("/{key}/", ProductHandler) -// "/products/{key}/details" -s.HandleFunc("/{key}/details", ProductDetailsHandler) -``` - - -### Static Files - -Note that the path provided to `PathPrefix()` represents a "wildcard": calling -`PathPrefix("/static/").Handler(...)` means that the handler will be passed any -request that matches "/static/\*". This makes it easy to serve static files with mux: - -```go -func main() { - var dir string - - flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir") - flag.Parse() - r := mux.NewRouter() - - // This will serve files under http://localhost:8000/static/ - r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir)))) - - srv := &http.Server{ - Handler: r, - Addr: "127.0.0.1:8000", - // Good practice: enforce timeouts for servers you create! - WriteTimeout: 15 * time.Second, - ReadTimeout: 15 * time.Second, - } - - log.Fatal(srv.ListenAndServe()) -} -``` - -### Serving Single Page Applications - -Most of the time it makes sense to serve your SPA on a separate web server from your API, -but sometimes it's desirable to serve them both from one place. It's possible to write a simple -handler for serving your SPA (for use with React Router's [BrowserRouter](https://reacttraining.com/react-router/web/api/BrowserRouter) for example), and leverage -mux's powerful routing for your API endpoints. - -```go -package main - -import ( - "encoding/json" - "log" - "net/http" - "os" - "path/filepath" - "time" - - "github.com/gorilla/mux" -) - -// spaHandler implements the http.Handler interface, so we can use it -// to respond to HTTP requests. The path to the static directory and -// path to the index file within that static directory are used to -// serve the SPA in the given static directory. -type spaHandler struct { - staticPath string - indexPath string -} - -// ServeHTTP inspects the URL path to locate a file within the static dir -// on the SPA handler. If a file is found, it will be served. If not, the -// file located at the index path on the SPA handler will be served. This -// is suitable behavior for serving an SPA (single page application). -func (h spaHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - // get the absolute path to prevent directory traversal - path, err := filepath.Abs(r.URL.Path) - if err != nil { - // if we failed to get the absolute path respond with a 400 bad request - // and stop - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - // prepend the path with the path to the static directory - path = filepath.Join(h.staticPath, path) - - // check whether a file exists at the given path - _, err = os.Stat(path) - if os.IsNotExist(err) { - // file does not exist, serve index.html - http.ServeFile(w, r, filepath.Join(h.staticPath, h.indexPath)) - return - } else if err != nil { - // if we got an error (that wasn't that the file doesn't exist) stating the - // file, return a 500 internal server error and stop - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - // otherwise, use http.FileServer to serve the static dir - http.FileServer(http.Dir(h.staticPath)).ServeHTTP(w, r) -} - -func main() { - router := mux.NewRouter() - - router.HandleFunc("/api/health", func(w http.ResponseWriter, r *http.Request) { - // an example API handler - json.NewEncoder(w).Encode(map[string]bool{"ok": true}) - }) - - spa := spaHandler{staticPath: "build", indexPath: "index.html"} - router.PathPrefix("/").Handler(spa) - - srv := &http.Server{ - Handler: router, - Addr: "127.0.0.1:8000", - // Good practice: enforce timeouts for servers you create! - WriteTimeout: 15 * time.Second, - ReadTimeout: 15 * time.Second, - } - - log.Fatal(srv.ListenAndServe()) -} -``` - -### Registered URLs - -Now let's see how to build registered URLs. - -Routes can be named. All routes that define a name can have their URLs built, or "reversed". We define a name calling `Name()` on a route. For example: - -```go -r := mux.NewRouter() -r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). - Name("article") -``` - -To build a URL, get the route and call the `URL()` method, passing a sequence of key/value pairs for the route variables. For the previous route, we would do: - -```go -url, err := r.Get("article").URL("category", "technology", "id", "42") -``` - -...and the result will be a `url.URL` with the following path: - -``` -"/articles/technology/42" -``` - -This also works for host and query value variables: - -```go -r := mux.NewRouter() -r.Host("{subdomain}.example.com"). - Path("/articles/{category}/{id:[0-9]+}"). - Queries("filter", "{filter}"). - HandlerFunc(ArticleHandler). - Name("article") - -// url.String() will be "http://news.example.com/articles/technology/42?filter=gorilla" -url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42", - "filter", "gorilla") -``` - -All variables defined in the route are required, and their values must conform to the corresponding patterns. These requirements guarantee that a generated URL will always match a registered route -- the only exception is for explicitly defined "build-only" routes which never match. - -Regex support also exists for matching Headers within a route. For example, we could do: - -```go -r.HeadersRegexp("Content-Type", "application/(text|json)") -``` - -...and the route will match both requests with a Content-Type of `application/json` as well as `application/text` - -There's also a way to build only the URL host or path for a route: use the methods `URLHost()` or `URLPath()` instead. For the previous route, we would do: - -```go -// "http://news.example.com/" -host, err := r.Get("article").URLHost("subdomain", "news") - -// "/articles/technology/42" -path, err := r.Get("article").URLPath("category", "technology", "id", "42") -``` - -And if you use subrouters, host and path defined separately can be built as well: - -```go -r := mux.NewRouter() -s := r.Host("{subdomain}.example.com").Subrouter() -s.Path("/articles/{category}/{id:[0-9]+}"). - HandlerFunc(ArticleHandler). - Name("article") - -// "http://news.example.com/articles/technology/42" -url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42") -``` - -### Walking Routes - -The `Walk` function on `mux.Router` can be used to visit all of the routes that are registered on a router. For example, -the following prints all of the registered routes: - -```go -package main - -import ( - "fmt" - "net/http" - "strings" - - "github.com/gorilla/mux" -) - -func handler(w http.ResponseWriter, r *http.Request) { - return -} - -func main() { - r := mux.NewRouter() - r.HandleFunc("/", handler) - r.HandleFunc("/products", handler).Methods("POST") - r.HandleFunc("/articles", handler).Methods("GET") - r.HandleFunc("/articles/{id}", handler).Methods("GET", "PUT") - r.HandleFunc("/authors", handler).Queries("surname", "{surname}") - err := r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error { - pathTemplate, err := route.GetPathTemplate() - if err == nil { - fmt.Println("ROUTE:", pathTemplate) - } - pathRegexp, err := route.GetPathRegexp() - if err == nil { - fmt.Println("Path regexp:", pathRegexp) - } - queriesTemplates, err := route.GetQueriesTemplates() - if err == nil { - fmt.Println("Queries templates:", strings.Join(queriesTemplates, ",")) - } - queriesRegexps, err := route.GetQueriesRegexp() - if err == nil { - fmt.Println("Queries regexps:", strings.Join(queriesRegexps, ",")) - } - methods, err := route.GetMethods() - if err == nil { - fmt.Println("Methods:", strings.Join(methods, ",")) - } - fmt.Println() - return nil - }) - - if err != nil { - fmt.Println(err) - } - - http.Handle("/", r) -} -``` - -### Graceful Shutdown - -Go 1.8 introduced the ability to [gracefully shutdown](https://golang.org/doc/go1.8#http_shutdown) a `*http.Server`. Here's how to do that alongside `mux`: - -```go -package main - -import ( - "context" - "flag" - "log" - "net/http" - "os" - "os/signal" - "time" - - "github.com/gorilla/mux" -) - -func main() { - var wait time.Duration - flag.DurationVar(&wait, "graceful-timeout", time.Second * 15, "the duration for which the server gracefully wait for existing connections to finish - e.g. 15s or 1m") - flag.Parse() - - r := mux.NewRouter() - // Add your routes as needed - - srv := &http.Server{ - Addr: "0.0.0.0:8080", - // Good practice to set timeouts to avoid Slowloris attacks. - WriteTimeout: time.Second * 15, - ReadTimeout: time.Second * 15, - IdleTimeout: time.Second * 60, - Handler: r, // Pass our instance of gorilla/mux in. - } - - // Run our server in a goroutine so that it doesn't block. - go func() { - if err := srv.ListenAndServe(); err != nil { - log.Println(err) - } - }() - - c := make(chan os.Signal, 1) - // We'll accept graceful shutdowns when quit via SIGINT (Ctrl+C) - // SIGKILL, SIGQUIT or SIGTERM (Ctrl+/) will not be caught. - signal.Notify(c, os.Interrupt) - - // Block until we receive our signal. - <-c - - // Create a deadline to wait for. - ctx, cancel := context.WithTimeout(context.Background(), wait) - defer cancel() - // Doesn't block if no connections, but will otherwise wait - // until the timeout deadline. - srv.Shutdown(ctx) - // Optionally, you could run srv.Shutdown in a goroutine and block on - // <-ctx.Done() if your application should wait for other services - // to finalize based on context cancellation. - log.Println("shutting down") - os.Exit(0) -} -``` - -### Middleware - -Mux supports the addition of middlewares to a [Router](https://godoc.org/github.com/gorilla/mux#Router), which are executed in the order they are added if a match is found, including its subrouters. -Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or `ResponseWriter` hijacking. - -Mux middlewares are defined using the de facto standard type: - -```go -type MiddlewareFunc func(http.Handler) http.Handler -``` - -Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc. This takes advantage of closures being able access variables from the context where they are created, while retaining the signature enforced by the receivers. - -A very basic middleware which logs the URI of the request being handled could be written as: - -```go -func loggingMiddleware(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // Do stuff here - log.Println(r.RequestURI) - // Call the next handler, which can be another middleware in the chain, or the final handler. - next.ServeHTTP(w, r) - }) -} -``` - -Middlewares can be added to a router using `Router.Use()`: - -```go -r := mux.NewRouter() -r.HandleFunc("/", handler) -r.Use(loggingMiddleware) -``` - -A more complex authentication middleware, which maps session token to users, could be written as: - -```go -// Define our struct -type authenticationMiddleware struct { - tokenUsers map[string]string -} - -// Initialize it somewhere -func (amw *authenticationMiddleware) Populate() { - amw.tokenUsers["00000000"] = "user0" - amw.tokenUsers["aaaaaaaa"] = "userA" - amw.tokenUsers["05f717e5"] = "randomUser" - amw.tokenUsers["deadbeef"] = "user0" -} - -// Middleware function, which will be called for each request -func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - token := r.Header.Get("X-Session-Token") - - if user, found := amw.tokenUsers[token]; found { - // We found the token in our map - log.Printf("Authenticated user %s\n", user) - // Pass down the request to the next middleware (or final handler) - next.ServeHTTP(w, r) - } else { - // Write an error and stop the handler chain - http.Error(w, "Forbidden", http.StatusForbidden) - } - }) -} -``` - -```go -r := mux.NewRouter() -r.HandleFunc("/", handler) - -amw := authenticationMiddleware{} -amw.Populate() - -r.Use(amw.Middleware) -``` - -Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. Middlewares _should_ write to `ResponseWriter` if they _are_ going to terminate the request, and they _should not_ write to `ResponseWriter` if they _are not_ going to terminate it. - -### Handling CORS Requests - -[CORSMethodMiddleware](https://godoc.org/github.com/gorilla/mux#CORSMethodMiddleware) intends to make it easier to strictly set the `Access-Control-Allow-Methods` response header. - -* You will still need to use your own CORS handler to set the other CORS headers such as `Access-Control-Allow-Origin` -* The middleware will set the `Access-Control-Allow-Methods` header to all the method matchers (e.g. `r.Methods(http.MethodGet, http.MethodPut, http.MethodOptions)` -> `Access-Control-Allow-Methods: GET,PUT,OPTIONS`) on a route -* If you do not specify any methods, then: -> _Important_: there must be an `OPTIONS` method matcher for the middleware to set the headers. - -Here is an example of using `CORSMethodMiddleware` along with a custom `OPTIONS` handler to set all the required CORS headers: - -```go -package main - -import ( - "net/http" - "github.com/gorilla/mux" -) - -func main() { - r := mux.NewRouter() - - // IMPORTANT: you must specify an OPTIONS method matcher for the middleware to set CORS headers - r.HandleFunc("/foo", fooHandler).Methods(http.MethodGet, http.MethodPut, http.MethodPatch, http.MethodOptions) - r.Use(mux.CORSMethodMiddleware(r)) - - http.ListenAndServe(":8080", r) -} - -func fooHandler(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Access-Control-Allow-Origin", "*") - if r.Method == http.MethodOptions { - return - } - - w.Write([]byte("foo")) -} -``` - -And an request to `/foo` using something like: - -```bash -curl localhost:8080/foo -v -``` - -Would look like: - -```bash -* Trying ::1... -* TCP_NODELAY set -* Connected to localhost (::1) port 8080 (#0) -> GET /foo HTTP/1.1 -> Host: localhost:8080 -> User-Agent: curl/7.59.0 -> Accept: */* -> -< HTTP/1.1 200 OK -< Access-Control-Allow-Methods: GET,PUT,PATCH,OPTIONS -< Access-Control-Allow-Origin: * -< Date: Fri, 28 Jun 2019 20:13:30 GMT -< Content-Length: 3 -< Content-Type: text/plain; charset=utf-8 -< -* Connection #0 to host localhost left intact -foo -``` - -### Testing Handlers - -Testing handlers in a Go web application is straightforward, and _mux_ doesn't complicate this any further. Given two files: `endpoints.go` and `endpoints_test.go`, here's how we'd test an application using _mux_. - -First, our simple HTTP handler: - -```go -// endpoints.go -package main - -func HealthCheckHandler(w http.ResponseWriter, r *http.Request) { - // A very simple health check. - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - - // In the future we could report back on the status of our DB, or our cache - // (e.g. Redis) by performing a simple PING, and include them in the response. - io.WriteString(w, `{"alive": true}`) -} - -func main() { - r := mux.NewRouter() - r.HandleFunc("/health", HealthCheckHandler) - - log.Fatal(http.ListenAndServe("localhost:8080", r)) -} -``` - -Our test code: - -```go -// endpoints_test.go -package main - -import ( - "net/http" - "net/http/httptest" - "testing" -) - -func TestHealthCheckHandler(t *testing.T) { - // Create a request to pass to our handler. We don't have any query parameters for now, so we'll - // pass 'nil' as the third parameter. - req, err := http.NewRequest("GET", "/health", nil) - if err != nil { - t.Fatal(err) - } - - // We create a ResponseRecorder (which satisfies http.ResponseWriter) to record the response. - rr := httptest.NewRecorder() - handler := http.HandlerFunc(HealthCheckHandler) - - // Our handlers satisfy http.Handler, so we can call their ServeHTTP method - // directly and pass in our Request and ResponseRecorder. - handler.ServeHTTP(rr, req) - - // Check the status code is what we expect. - if status := rr.Code; status != http.StatusOK { - t.Errorf("handler returned wrong status code: got %v want %v", - status, http.StatusOK) - } - - // Check the response body is what we expect. - expected := `{"alive": true}` - if rr.Body.String() != expected { - t.Errorf("handler returned unexpected body: got %v want %v", - rr.Body.String(), expected) - } -} -``` - -In the case that our routes have [variables](#examples), we can pass those in the request. We could write -[table-driven tests](https://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go) to test multiple -possible route variables as needed. - -```go -// endpoints.go -func main() { - r := mux.NewRouter() - // A route with a route variable: - r.HandleFunc("/metrics/{type}", MetricsHandler) - - log.Fatal(http.ListenAndServe("localhost:8080", r)) -} -``` - -Our test file, with a table-driven test of `routeVariables`: - -```go -// endpoints_test.go -func TestMetricsHandler(t *testing.T) { - tt := []struct{ - routeVariable string - shouldPass bool - }{ - {"goroutines", true}, - {"heap", true}, - {"counters", true}, - {"queries", true}, - {"adhadaeqm3k", false}, - } - - for _, tc := range tt { - path := fmt.Sprintf("/metrics/%s", tc.routeVariable) - req, err := http.NewRequest("GET", path, nil) - if err != nil { - t.Fatal(err) - } - - rr := httptest.NewRecorder() - - // Need to create a router that we can pass the request through so that the vars will be added to the context - router := mux.NewRouter() - router.HandleFunc("/metrics/{type}", MetricsHandler) - router.ServeHTTP(rr, req) - - // In this case, our MetricsHandler returns a non-200 response - // for a route variable it doesn't know about. - if rr.Code == http.StatusOK && !tc.shouldPass { - t.Errorf("handler should have failed on routeVariable %s: got %v want %v", - tc.routeVariable, rr.Code, http.StatusOK) - } - } -} -``` - -## Full Example - -Here's a complete, runnable example of a small `mux` based server: - -```go -package main - -import ( - "net/http" - "log" - "github.com/gorilla/mux" -) - -func YourHandler(w http.ResponseWriter, r *http.Request) { - w.Write([]byte("Gorilla!\n")) -} - -func main() { - r := mux.NewRouter() - // Routes consist of a path and a handler function. - r.HandleFunc("/", YourHandler) - - // Bind to a port and pass our router in - log.Fatal(http.ListenAndServe(":8000", r)) -} -``` - -## License - -BSD licensed. See the LICENSE file for details. diff --git a/ui/wasm/vendor/github.com/gorilla/mux/doc.go b/ui/wasm/vendor/github.com/gorilla/mux/doc.go deleted file mode 100644 index bd5a38b55d8..00000000000 --- a/ui/wasm/vendor/github.com/gorilla/mux/doc.go +++ /dev/null @@ -1,306 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package mux implements a request router and dispatcher. - -The name mux stands for "HTTP request multiplexer". Like the standard -http.ServeMux, mux.Router matches incoming requests against a list of -registered routes and calls a handler for the route that matches the URL -or other conditions. The main features are: - - * Requests can be matched based on URL host, path, path prefix, schemes, - header and query values, HTTP methods or using custom matchers. - * URL hosts, paths and query values can have variables with an optional - regular expression. - * Registered URLs can be built, or "reversed", which helps maintaining - references to resources. - * Routes can be used as subrouters: nested routes are only tested if the - parent route matches. This is useful to define groups of routes that - share common conditions like a host, a path prefix or other repeated - attributes. As a bonus, this optimizes request matching. - * It implements the http.Handler interface so it is compatible with the - standard http.ServeMux. - -Let's start registering a couple of URL paths and handlers: - - func main() { - r := mux.NewRouter() - r.HandleFunc("/", HomeHandler) - r.HandleFunc("/products", ProductsHandler) - r.HandleFunc("/articles", ArticlesHandler) - http.Handle("/", r) - } - -Here we register three routes mapping URL paths to handlers. This is -equivalent to how http.HandleFunc() works: if an incoming request URL matches -one of the paths, the corresponding handler is called passing -(http.ResponseWriter, *http.Request) as parameters. - -Paths can have variables. They are defined using the format {name} or -{name:pattern}. If a regular expression pattern is not defined, the matched -variable will be anything until the next slash. For example: - - r := mux.NewRouter() - r.HandleFunc("/products/{key}", ProductHandler) - r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) - r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) - -Groups can be used inside patterns, as long as they are non-capturing (?:re). For example: - - r.HandleFunc("/articles/{category}/{sort:(?:asc|desc|new)}", ArticlesCategoryHandler) - -The names are used to create a map of route variables which can be retrieved -calling mux.Vars(): - - vars := mux.Vars(request) - category := vars["category"] - -Note that if any capturing groups are present, mux will panic() during parsing. To prevent -this, convert any capturing groups to non-capturing, e.g. change "/{sort:(asc|desc)}" to -"/{sort:(?:asc|desc)}". This is a change from prior versions which behaved unpredictably -when capturing groups were present. - -And this is all you need to know about the basic usage. More advanced options -are explained below. - -Routes can also be restricted to a domain or subdomain. Just define a host -pattern to be matched. They can also have variables: - - r := mux.NewRouter() - // Only matches if domain is "www.example.com". - r.Host("www.example.com") - // Matches a dynamic subdomain. - r.Host("{subdomain:[a-z]+}.domain.com") - -There are several other matchers that can be added. To match path prefixes: - - r.PathPrefix("/products/") - -...or HTTP methods: - - r.Methods("GET", "POST") - -...or URL schemes: - - r.Schemes("https") - -...or header values: - - r.Headers("X-Requested-With", "XMLHttpRequest") - -...or query values: - - r.Queries("key", "value") - -...or to use a custom matcher function: - - r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { - return r.ProtoMajor == 0 - }) - -...and finally, it is possible to combine several matchers in a single route: - - r.HandleFunc("/products", ProductsHandler). - Host("www.example.com"). - Methods("GET"). - Schemes("http") - -Setting the same matching conditions again and again can be boring, so we have -a way to group several routes that share the same requirements. -We call it "subrouting". - -For example, let's say we have several URLs that should only match when the -host is "www.example.com". Create a route for that host and get a "subrouter" -from it: - - r := mux.NewRouter() - s := r.Host("www.example.com").Subrouter() - -Then register routes in the subrouter: - - s.HandleFunc("/products/", ProductsHandler) - s.HandleFunc("/products/{key}", ProductHandler) - s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) - -The three URL paths we registered above will only be tested if the domain is -"www.example.com", because the subrouter is tested first. This is not -only convenient, but also optimizes request matching. You can create -subrouters combining any attribute matchers accepted by a route. - -Subrouters can be used to create domain or path "namespaces": you define -subrouters in a central place and then parts of the app can register its -paths relatively to a given subrouter. - -There's one more thing about subroutes. When a subrouter has a path prefix, -the inner routes use it as base for their paths: - - r := mux.NewRouter() - s := r.PathPrefix("/products").Subrouter() - // "/products/" - s.HandleFunc("/", ProductsHandler) - // "/products/{key}/" - s.HandleFunc("/{key}/", ProductHandler) - // "/products/{key}/details" - s.HandleFunc("/{key}/details", ProductDetailsHandler) - -Note that the path provided to PathPrefix() represents a "wildcard": calling -PathPrefix("/static/").Handler(...) means that the handler will be passed any -request that matches "/static/*". This makes it easy to serve static files with mux: - - func main() { - var dir string - - flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir") - flag.Parse() - r := mux.NewRouter() - - // This will serve files under http://localhost:8000/static/ - r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir)))) - - srv := &http.Server{ - Handler: r, - Addr: "127.0.0.1:8000", - // Good practice: enforce timeouts for servers you create! - WriteTimeout: 15 * time.Second, - ReadTimeout: 15 * time.Second, - } - - log.Fatal(srv.ListenAndServe()) - } - -Now let's see how to build registered URLs. - -Routes can be named. All routes that define a name can have their URLs built, -or "reversed". We define a name calling Name() on a route. For example: - - r := mux.NewRouter() - r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). - Name("article") - -To build a URL, get the route and call the URL() method, passing a sequence of -key/value pairs for the route variables. For the previous route, we would do: - - url, err := r.Get("article").URL("category", "technology", "id", "42") - -...and the result will be a url.URL with the following path: - - "/articles/technology/42" - -This also works for host and query value variables: - - r := mux.NewRouter() - r.Host("{subdomain}.domain.com"). - Path("/articles/{category}/{id:[0-9]+}"). - Queries("filter", "{filter}"). - HandlerFunc(ArticleHandler). - Name("article") - - // url.String() will be "http://news.domain.com/articles/technology/42?filter=gorilla" - url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42", - "filter", "gorilla") - -All variables defined in the route are required, and their values must -conform to the corresponding patterns. These requirements guarantee that a -generated URL will always match a registered route -- the only exception is -for explicitly defined "build-only" routes which never match. - -Regex support also exists for matching Headers within a route. For example, we could do: - - r.HeadersRegexp("Content-Type", "application/(text|json)") - -...and the route will match both requests with a Content-Type of `application/json` as well as -`application/text` - -There's also a way to build only the URL host or path for a route: -use the methods URLHost() or URLPath() instead. For the previous route, -we would do: - - // "http://news.domain.com/" - host, err := r.Get("article").URLHost("subdomain", "news") - - // "/articles/technology/42" - path, err := r.Get("article").URLPath("category", "technology", "id", "42") - -And if you use subrouters, host and path defined separately can be built -as well: - - r := mux.NewRouter() - s := r.Host("{subdomain}.domain.com").Subrouter() - s.Path("/articles/{category}/{id:[0-9]+}"). - HandlerFunc(ArticleHandler). - Name("article") - - // "http://news.domain.com/articles/technology/42" - url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42") - -Mux supports the addition of middlewares to a Router, which are executed in the order they are added if a match is found, including its subrouters. Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or ResponseWriter hijacking. - - type MiddlewareFunc func(http.Handler) http.Handler - -Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc (closures can access variables from the context where they are created). - -A very basic middleware which logs the URI of the request being handled could be written as: - - func simpleMw(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // Do stuff here - log.Println(r.RequestURI) - // Call the next handler, which can be another middleware in the chain, or the final handler. - next.ServeHTTP(w, r) - }) - } - -Middlewares can be added to a router using `Router.Use()`: - - r := mux.NewRouter() - r.HandleFunc("/", handler) - r.Use(simpleMw) - -A more complex authentication middleware, which maps session token to users, could be written as: - - // Define our struct - type authenticationMiddleware struct { - tokenUsers map[string]string - } - - // Initialize it somewhere - func (amw *authenticationMiddleware) Populate() { - amw.tokenUsers["00000000"] = "user0" - amw.tokenUsers["aaaaaaaa"] = "userA" - amw.tokenUsers["05f717e5"] = "randomUser" - amw.tokenUsers["deadbeef"] = "user0" - } - - // Middleware function, which will be called for each request - func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - token := r.Header.Get("X-Session-Token") - - if user, found := amw.tokenUsers[token]; found { - // We found the token in our map - log.Printf("Authenticated user %s\n", user) - next.ServeHTTP(w, r) - } else { - http.Error(w, "Forbidden", http.StatusForbidden) - } - }) - } - - r := mux.NewRouter() - r.HandleFunc("/", handler) - - amw := authenticationMiddleware{tokenUsers: make(map[string]string)} - amw.Populate() - - r.Use(amw.Middleware) - -Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. - -*/ -package mux diff --git a/ui/wasm/vendor/github.com/gorilla/mux/middleware.go b/ui/wasm/vendor/github.com/gorilla/mux/middleware.go deleted file mode 100644 index cb51c565ebd..00000000000 --- a/ui/wasm/vendor/github.com/gorilla/mux/middleware.go +++ /dev/null @@ -1,74 +0,0 @@ -package mux - -import ( - "net/http" - "strings" -) - -// MiddlewareFunc is a function which receives an http.Handler and returns another http.Handler. -// Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed -// to it, and then calls the handler passed as parameter to the MiddlewareFunc. -type MiddlewareFunc func(http.Handler) http.Handler - -// middleware interface is anything which implements a MiddlewareFunc named Middleware. -type middleware interface { - Middleware(handler http.Handler) http.Handler -} - -// Middleware allows MiddlewareFunc to implement the middleware interface. -func (mw MiddlewareFunc) Middleware(handler http.Handler) http.Handler { - return mw(handler) -} - -// Use appends a MiddlewareFunc to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router. -func (r *Router) Use(mwf ...MiddlewareFunc) { - for _, fn := range mwf { - r.middlewares = append(r.middlewares, fn) - } -} - -// useInterface appends a middleware to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router. -func (r *Router) useInterface(mw middleware) { - r.middlewares = append(r.middlewares, mw) -} - -// CORSMethodMiddleware automatically sets the Access-Control-Allow-Methods response header -// on requests for routes that have an OPTIONS method matcher to all the method matchers on -// the route. Routes that do not explicitly handle OPTIONS requests will not be processed -// by the middleware. See examples for usage. -func CORSMethodMiddleware(r *Router) MiddlewareFunc { - return func(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - allMethods, err := getAllMethodsForRoute(r, req) - if err == nil { - for _, v := range allMethods { - if v == http.MethodOptions { - w.Header().Set("Access-Control-Allow-Methods", strings.Join(allMethods, ",")) - } - } - } - - next.ServeHTTP(w, req) - }) - } -} - -// getAllMethodsForRoute returns all the methods from method matchers matching a given -// request. -func getAllMethodsForRoute(r *Router, req *http.Request) ([]string, error) { - var allMethods []string - - for _, route := range r.routes { - var match RouteMatch - if route.Match(req, &match) || match.MatchErr == ErrMethodMismatch { - methods, err := route.GetMethods() - if err != nil { - return nil, err - } - - allMethods = append(allMethods, methods...) - } - } - - return allMethods, nil -} diff --git a/ui/wasm/vendor/github.com/gorilla/mux/mux.go b/ui/wasm/vendor/github.com/gorilla/mux/mux.go deleted file mode 100644 index 782a34b22a6..00000000000 --- a/ui/wasm/vendor/github.com/gorilla/mux/mux.go +++ /dev/null @@ -1,606 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "context" - "errors" - "fmt" - "net/http" - "path" - "regexp" -) - -var ( - // ErrMethodMismatch is returned when the method in the request does not match - // the method defined against the route. - ErrMethodMismatch = errors.New("method is not allowed") - // ErrNotFound is returned when no route match is found. - ErrNotFound = errors.New("no matching route was found") -) - -// NewRouter returns a new router instance. -func NewRouter() *Router { - return &Router{namedRoutes: make(map[string]*Route)} -} - -// Router registers routes to be matched and dispatches a handler. -// -// It implements the http.Handler interface, so it can be registered to serve -// requests: -// -// var router = mux.NewRouter() -// -// func main() { -// http.Handle("/", router) -// } -// -// Or, for Google App Engine, register it in a init() function: -// -// func init() { -// http.Handle("/", router) -// } -// -// This will send all incoming requests to the router. -type Router struct { - // Configurable Handler to be used when no route matches. - NotFoundHandler http.Handler - - // Configurable Handler to be used when the request method does not match the route. - MethodNotAllowedHandler http.Handler - - // Routes to be matched, in order. - routes []*Route - - // Routes by name for URL building. - namedRoutes map[string]*Route - - // If true, do not clear the request context after handling the request. - // - // Deprecated: No effect, since the context is stored on the request itself. - KeepContext bool - - // Slice of middlewares to be called after a match is found - middlewares []middleware - - // configuration shared with `Route` - routeConf -} - -// common route configuration shared between `Router` and `Route` -type routeConf struct { - // If true, "/path/foo%2Fbar/to" will match the path "/path/{var}/to" - useEncodedPath bool - - // If true, when the path pattern is "/path/", accessing "/path" will - // redirect to the former and vice versa. - strictSlash bool - - // If true, when the path pattern is "/path//to", accessing "/path//to" - // will not redirect - skipClean bool - - // Manager for the variables from host and path. - regexp routeRegexpGroup - - // List of matchers. - matchers []matcher - - // The scheme used when building URLs. - buildScheme string - - buildVarsFunc BuildVarsFunc -} - -// returns an effective deep copy of `routeConf` -func copyRouteConf(r routeConf) routeConf { - c := r - - if r.regexp.path != nil { - c.regexp.path = copyRouteRegexp(r.regexp.path) - } - - if r.regexp.host != nil { - c.regexp.host = copyRouteRegexp(r.regexp.host) - } - - c.regexp.queries = make([]*routeRegexp, 0, len(r.regexp.queries)) - for _, q := range r.regexp.queries { - c.regexp.queries = append(c.regexp.queries, copyRouteRegexp(q)) - } - - c.matchers = make([]matcher, len(r.matchers)) - copy(c.matchers, r.matchers) - - return c -} - -func copyRouteRegexp(r *routeRegexp) *routeRegexp { - c := *r - return &c -} - -// Match attempts to match the given request against the router's registered routes. -// -// If the request matches a route of this router or one of its subrouters the Route, -// Handler, and Vars fields of the the match argument are filled and this function -// returns true. -// -// If the request does not match any of this router's or its subrouters' routes -// then this function returns false. If available, a reason for the match failure -// will be filled in the match argument's MatchErr field. If the match failure type -// (eg: not found) has a registered handler, the handler is assigned to the Handler -// field of the match argument. -func (r *Router) Match(req *http.Request, match *RouteMatch) bool { - for _, route := range r.routes { - if route.Match(req, match) { - // Build middleware chain if no error was found - if match.MatchErr == nil { - for i := len(r.middlewares) - 1; i >= 0; i-- { - match.Handler = r.middlewares[i].Middleware(match.Handler) - } - } - return true - } - } - - if match.MatchErr == ErrMethodMismatch { - if r.MethodNotAllowedHandler != nil { - match.Handler = r.MethodNotAllowedHandler - return true - } - - return false - } - - // Closest match for a router (includes sub-routers) - if r.NotFoundHandler != nil { - match.Handler = r.NotFoundHandler - match.MatchErr = ErrNotFound - return true - } - - match.MatchErr = ErrNotFound - return false -} - -// ServeHTTP dispatches the handler registered in the matched route. -// -// When there is a match, the route variables can be retrieved calling -// mux.Vars(request). -func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { - if !r.skipClean { - path := req.URL.Path - if r.useEncodedPath { - path = req.URL.EscapedPath() - } - // Clean path to canonical form and redirect. - if p := cleanPath(path); p != path { - - // Added 3 lines (Philip Schlump) - It was dropping the query string and #whatever from query. - // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue: - // http://code.google.com/p/go/issues/detail?id=5252 - url := *req.URL - url.Path = p - p = url.String() - - w.Header().Set("Location", p) - w.WriteHeader(http.StatusMovedPermanently) - return - } - } - var match RouteMatch - var handler http.Handler - if r.Match(req, &match) { - handler = match.Handler - req = requestWithVars(req, match.Vars) - req = requestWithRoute(req, match.Route) - } - - if handler == nil && match.MatchErr == ErrMethodMismatch { - handler = methodNotAllowedHandler() - } - - if handler == nil { - handler = http.NotFoundHandler() - } - - handler.ServeHTTP(w, req) -} - -// Get returns a route registered with the given name. -func (r *Router) Get(name string) *Route { - return r.namedRoutes[name] -} - -// GetRoute returns a route registered with the given name. This method -// was renamed to Get() and remains here for backwards compatibility. -func (r *Router) GetRoute(name string) *Route { - return r.namedRoutes[name] -} - -// StrictSlash defines the trailing slash behavior for new routes. The initial -// value is false. -// -// When true, if the route path is "/path/", accessing "/path" will perform a redirect -// to the former and vice versa. In other words, your application will always -// see the path as specified in the route. -// -// When false, if the route path is "/path", accessing "/path/" will not match -// this route and vice versa. -// -// The re-direct is a HTTP 301 (Moved Permanently). Note that when this is set for -// routes with a non-idempotent method (e.g. POST, PUT), the subsequent re-directed -// request will be made as a GET by most clients. Use middleware or client settings -// to modify this behaviour as needed. -// -// Special case: when a route sets a path prefix using the PathPrefix() method, -// strict slash is ignored for that route because the redirect behavior can't -// be determined from a prefix alone. However, any subrouters created from that -// route inherit the original StrictSlash setting. -func (r *Router) StrictSlash(value bool) *Router { - r.strictSlash = value - return r -} - -// SkipClean defines the path cleaning behaviour for new routes. The initial -// value is false. Users should be careful about which routes are not cleaned -// -// When true, if the route path is "/path//to", it will remain with the double -// slash. This is helpful if you have a route like: /fetch/http://xkcd.com/534/ -// -// When false, the path will be cleaned, so /fetch/http://xkcd.com/534/ will -// become /fetch/http/xkcd.com/534 -func (r *Router) SkipClean(value bool) *Router { - r.skipClean = value - return r -} - -// UseEncodedPath tells the router to match the encoded original path -// to the routes. -// For eg. "/path/foo%2Fbar/to" will match the path "/path/{var}/to". -// -// If not called, the router will match the unencoded path to the routes. -// For eg. "/path/foo%2Fbar/to" will match the path "/path/foo/bar/to" -func (r *Router) UseEncodedPath() *Router { - r.useEncodedPath = true - return r -} - -// ---------------------------------------------------------------------------- -// Route factories -// ---------------------------------------------------------------------------- - -// NewRoute registers an empty route. -func (r *Router) NewRoute() *Route { - // initialize a route with a copy of the parent router's configuration - route := &Route{routeConf: copyRouteConf(r.routeConf), namedRoutes: r.namedRoutes} - r.routes = append(r.routes, route) - return route -} - -// Name registers a new route with a name. -// See Route.Name(). -func (r *Router) Name(name string) *Route { - return r.NewRoute().Name(name) -} - -// Handle registers a new route with a matcher for the URL path. -// See Route.Path() and Route.Handler(). -func (r *Router) Handle(path string, handler http.Handler) *Route { - return r.NewRoute().Path(path).Handler(handler) -} - -// HandleFunc registers a new route with a matcher for the URL path. -// See Route.Path() and Route.HandlerFunc(). -func (r *Router) HandleFunc(path string, f func(http.ResponseWriter, - *http.Request)) *Route { - return r.NewRoute().Path(path).HandlerFunc(f) -} - -// Headers registers a new route with a matcher for request header values. -// See Route.Headers(). -func (r *Router) Headers(pairs ...string) *Route { - return r.NewRoute().Headers(pairs...) -} - -// Host registers a new route with a matcher for the URL host. -// See Route.Host(). -func (r *Router) Host(tpl string) *Route { - return r.NewRoute().Host(tpl) -} - -// MatcherFunc registers a new route with a custom matcher function. -// See Route.MatcherFunc(). -func (r *Router) MatcherFunc(f MatcherFunc) *Route { - return r.NewRoute().MatcherFunc(f) -} - -// Methods registers a new route with a matcher for HTTP methods. -// See Route.Methods(). -func (r *Router) Methods(methods ...string) *Route { - return r.NewRoute().Methods(methods...) -} - -// Path registers a new route with a matcher for the URL path. -// See Route.Path(). -func (r *Router) Path(tpl string) *Route { - return r.NewRoute().Path(tpl) -} - -// PathPrefix registers a new route with a matcher for the URL path prefix. -// See Route.PathPrefix(). -func (r *Router) PathPrefix(tpl string) *Route { - return r.NewRoute().PathPrefix(tpl) -} - -// Queries registers a new route with a matcher for URL query values. -// See Route.Queries(). -func (r *Router) Queries(pairs ...string) *Route { - return r.NewRoute().Queries(pairs...) -} - -// Schemes registers a new route with a matcher for URL schemes. -// See Route.Schemes(). -func (r *Router) Schemes(schemes ...string) *Route { - return r.NewRoute().Schemes(schemes...) -} - -// BuildVarsFunc registers a new route with a custom function for modifying -// route variables before building a URL. -func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route { - return r.NewRoute().BuildVarsFunc(f) -} - -// Walk walks the router and all its sub-routers, calling walkFn for each route -// in the tree. The routes are walked in the order they were added. Sub-routers -// are explored depth-first. -func (r *Router) Walk(walkFn WalkFunc) error { - return r.walk(walkFn, []*Route{}) -} - -// SkipRouter is used as a return value from WalkFuncs to indicate that the -// router that walk is about to descend down to should be skipped. -var SkipRouter = errors.New("skip this router") - -// WalkFunc is the type of the function called for each route visited by Walk. -// At every invocation, it is given the current route, and the current router, -// and a list of ancestor routes that lead to the current route. -type WalkFunc func(route *Route, router *Router, ancestors []*Route) error - -func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error { - for _, t := range r.routes { - err := walkFn(t, r, ancestors) - if err == SkipRouter { - continue - } - if err != nil { - return err - } - for _, sr := range t.matchers { - if h, ok := sr.(*Router); ok { - ancestors = append(ancestors, t) - err := h.walk(walkFn, ancestors) - if err != nil { - return err - } - ancestors = ancestors[:len(ancestors)-1] - } - } - if h, ok := t.handler.(*Router); ok { - ancestors = append(ancestors, t) - err := h.walk(walkFn, ancestors) - if err != nil { - return err - } - ancestors = ancestors[:len(ancestors)-1] - } - } - return nil -} - -// ---------------------------------------------------------------------------- -// Context -// ---------------------------------------------------------------------------- - -// RouteMatch stores information about a matched route. -type RouteMatch struct { - Route *Route - Handler http.Handler - Vars map[string]string - - // MatchErr is set to appropriate matching error - // It is set to ErrMethodMismatch if there is a mismatch in - // the request method and route method - MatchErr error -} - -type contextKey int - -const ( - varsKey contextKey = iota - routeKey -) - -// Vars returns the route variables for the current request, if any. -func Vars(r *http.Request) map[string]string { - if rv := r.Context().Value(varsKey); rv != nil { - return rv.(map[string]string) - } - return nil -} - -// CurrentRoute returns the matched route for the current request, if any. -// This only works when called inside the handler of the matched route -// because the matched route is stored in the request context which is cleared -// after the handler returns. -func CurrentRoute(r *http.Request) *Route { - if rv := r.Context().Value(routeKey); rv != nil { - return rv.(*Route) - } - return nil -} - -func requestWithVars(r *http.Request, vars map[string]string) *http.Request { - ctx := context.WithValue(r.Context(), varsKey, vars) - return r.WithContext(ctx) -} - -func requestWithRoute(r *http.Request, route *Route) *http.Request { - ctx := context.WithValue(r.Context(), routeKey, route) - return r.WithContext(ctx) -} - -// ---------------------------------------------------------------------------- -// Helpers -// ---------------------------------------------------------------------------- - -// cleanPath returns the canonical path for p, eliminating . and .. elements. -// Borrowed from the net/http package. -func cleanPath(p string) string { - if p == "" { - return "/" - } - if p[0] != '/' { - p = "/" + p - } - np := path.Clean(p) - // path.Clean removes trailing slash except for root; - // put the trailing slash back if necessary. - if p[len(p)-1] == '/' && np != "/" { - np += "/" - } - - return np -} - -// uniqueVars returns an error if two slices contain duplicated strings. -func uniqueVars(s1, s2 []string) error { - for _, v1 := range s1 { - for _, v2 := range s2 { - if v1 == v2 { - return fmt.Errorf("mux: duplicated route variable %q", v2) - } - } - } - return nil -} - -// checkPairs returns the count of strings passed in, and an error if -// the count is not an even number. -func checkPairs(pairs ...string) (int, error) { - length := len(pairs) - if length%2 != 0 { - return length, fmt.Errorf( - "mux: number of parameters must be multiple of 2, got %v", pairs) - } - return length, nil -} - -// mapFromPairsToString converts variadic string parameters to a -// string to string map. -func mapFromPairsToString(pairs ...string) (map[string]string, error) { - length, err := checkPairs(pairs...) - if err != nil { - return nil, err - } - m := make(map[string]string, length/2) - for i := 0; i < length; i += 2 { - m[pairs[i]] = pairs[i+1] - } - return m, nil -} - -// mapFromPairsToRegex converts variadic string parameters to a -// string to regex map. -func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) { - length, err := checkPairs(pairs...) - if err != nil { - return nil, err - } - m := make(map[string]*regexp.Regexp, length/2) - for i := 0; i < length; i += 2 { - regex, err := regexp.Compile(pairs[i+1]) - if err != nil { - return nil, err - } - m[pairs[i]] = regex - } - return m, nil -} - -// matchInArray returns true if the given string value is in the array. -func matchInArray(arr []string, value string) bool { - for _, v := range arr { - if v == value { - return true - } - } - return false -} - -// matchMapWithString returns true if the given key/value pairs exist in a given map. -func matchMapWithString(toCheck map[string]string, toMatch map[string][]string, canonicalKey bool) bool { - for k, v := range toCheck { - // Check if key exists. - if canonicalKey { - k = http.CanonicalHeaderKey(k) - } - if values := toMatch[k]; values == nil { - return false - } else if v != "" { - // If value was defined as an empty string we only check that the - // key exists. Otherwise we also check for equality. - valueExists := false - for _, value := range values { - if v == value { - valueExists = true - break - } - } - if !valueExists { - return false - } - } - } - return true -} - -// matchMapWithRegex returns true if the given key/value pairs exist in a given map compiled against -// the given regex -func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]string, canonicalKey bool) bool { - for k, v := range toCheck { - // Check if key exists. - if canonicalKey { - k = http.CanonicalHeaderKey(k) - } - if values := toMatch[k]; values == nil { - return false - } else if v != nil { - // If value was defined as an empty string we only check that the - // key exists. Otherwise we also check for equality. - valueExists := false - for _, value := range values { - if v.MatchString(value) { - valueExists = true - break - } - } - if !valueExists { - return false - } - } - } - return true -} - -// methodNotAllowed replies to the request with an HTTP status code 405. -func methodNotAllowed(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusMethodNotAllowed) -} - -// methodNotAllowedHandler returns a simple request handler -// that replies to each request with a status code 405. -func methodNotAllowedHandler() http.Handler { return http.HandlerFunc(methodNotAllowed) } diff --git a/ui/wasm/vendor/github.com/gorilla/mux/regexp.go b/ui/wasm/vendor/github.com/gorilla/mux/regexp.go deleted file mode 100644 index 0144842bb23..00000000000 --- a/ui/wasm/vendor/github.com/gorilla/mux/regexp.go +++ /dev/null @@ -1,388 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "bytes" - "fmt" - "net/http" - "net/url" - "regexp" - "strconv" - "strings" -) - -type routeRegexpOptions struct { - strictSlash bool - useEncodedPath bool -} - -type regexpType int - -const ( - regexpTypePath regexpType = 0 - regexpTypeHost regexpType = 1 - regexpTypePrefix regexpType = 2 - regexpTypeQuery regexpType = 3 -) - -// newRouteRegexp parses a route template and returns a routeRegexp, -// used to match a host, a path or a query string. -// -// It will extract named variables, assemble a regexp to be matched, create -// a "reverse" template to build URLs and compile regexps to validate variable -// values used in URL building. -// -// Previously we accepted only Python-like identifiers for variable -// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that -// name and pattern can't be empty, and names can't contain a colon. -func newRouteRegexp(tpl string, typ regexpType, options routeRegexpOptions) (*routeRegexp, error) { - // Check if it is well-formed. - idxs, errBraces := braceIndices(tpl) - if errBraces != nil { - return nil, errBraces - } - // Backup the original. - template := tpl - // Now let's parse it. - defaultPattern := "[^/]+" - if typ == regexpTypeQuery { - defaultPattern = ".*" - } else if typ == regexpTypeHost { - defaultPattern = "[^.]+" - } - // Only match strict slash if not matching - if typ != regexpTypePath { - options.strictSlash = false - } - // Set a flag for strictSlash. - endSlash := false - if options.strictSlash && strings.HasSuffix(tpl, "/") { - tpl = tpl[:len(tpl)-1] - endSlash = true - } - varsN := make([]string, len(idxs)/2) - varsR := make([]*regexp.Regexp, len(idxs)/2) - pattern := bytes.NewBufferString("") - pattern.WriteByte('^') - reverse := bytes.NewBufferString("") - var end int - var err error - for i := 0; i < len(idxs); i += 2 { - // Set all values we are interested in. - raw := tpl[end:idxs[i]] - end = idxs[i+1] - parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2) - name := parts[0] - patt := defaultPattern - if len(parts) == 2 { - patt = parts[1] - } - // Name or pattern can't be empty. - if name == "" || patt == "" { - return nil, fmt.Errorf("mux: missing name or pattern in %q", - tpl[idxs[i]:end]) - } - // Build the regexp pattern. - fmt.Fprintf(pattern, "%s(?P<%s>%s)", regexp.QuoteMeta(raw), varGroupName(i/2), patt) - - // Build the reverse template. - fmt.Fprintf(reverse, "%s%%s", raw) - - // Append variable name and compiled pattern. - varsN[i/2] = name - varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt)) - if err != nil { - return nil, err - } - } - // Add the remaining. - raw := tpl[end:] - pattern.WriteString(regexp.QuoteMeta(raw)) - if options.strictSlash { - pattern.WriteString("[/]?") - } - if typ == regexpTypeQuery { - // Add the default pattern if the query value is empty - if queryVal := strings.SplitN(template, "=", 2)[1]; queryVal == "" { - pattern.WriteString(defaultPattern) - } - } - if typ != regexpTypePrefix { - pattern.WriteByte('$') - } - - var wildcardHostPort bool - if typ == regexpTypeHost { - if !strings.Contains(pattern.String(), ":") { - wildcardHostPort = true - } - } - reverse.WriteString(raw) - if endSlash { - reverse.WriteByte('/') - } - // Compile full regexp. - reg, errCompile := regexp.Compile(pattern.String()) - if errCompile != nil { - return nil, errCompile - } - - // Check for capturing groups which used to work in older versions - if reg.NumSubexp() != len(idxs)/2 { - panic(fmt.Sprintf("route %s contains capture groups in its regexp. ", template) + - "Only non-capturing groups are accepted: e.g. (?:pattern) instead of (pattern)") - } - - // Done! - return &routeRegexp{ - template: template, - regexpType: typ, - options: options, - regexp: reg, - reverse: reverse.String(), - varsN: varsN, - varsR: varsR, - wildcardHostPort: wildcardHostPort, - }, nil -} - -// routeRegexp stores a regexp to match a host or path and information to -// collect and validate route variables. -type routeRegexp struct { - // The unmodified template. - template string - // The type of match - regexpType regexpType - // Options for matching - options routeRegexpOptions - // Expanded regexp. - regexp *regexp.Regexp - // Reverse template. - reverse string - // Variable names. - varsN []string - // Variable regexps (validators). - varsR []*regexp.Regexp - // Wildcard host-port (no strict port match in hostname) - wildcardHostPort bool -} - -// Match matches the regexp against the URL host or path. -func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { - if r.regexpType == regexpTypeHost { - host := getHost(req) - if r.wildcardHostPort { - // Don't be strict on the port match - if i := strings.Index(host, ":"); i != -1 { - host = host[:i] - } - } - return r.regexp.MatchString(host) - } - - if r.regexpType == regexpTypeQuery { - return r.matchQueryString(req) - } - path := req.URL.Path - if r.options.useEncodedPath { - path = req.URL.EscapedPath() - } - return r.regexp.MatchString(path) -} - -// url builds a URL part using the given values. -func (r *routeRegexp) url(values map[string]string) (string, error) { - urlValues := make([]interface{}, len(r.varsN), len(r.varsN)) - for k, v := range r.varsN { - value, ok := values[v] - if !ok { - return "", fmt.Errorf("mux: missing route variable %q", v) - } - if r.regexpType == regexpTypeQuery { - value = url.QueryEscape(value) - } - urlValues[k] = value - } - rv := fmt.Sprintf(r.reverse, urlValues...) - if !r.regexp.MatchString(rv) { - // The URL is checked against the full regexp, instead of checking - // individual variables. This is faster but to provide a good error - // message, we check individual regexps if the URL doesn't match. - for k, v := range r.varsN { - if !r.varsR[k].MatchString(values[v]) { - return "", fmt.Errorf( - "mux: variable %q doesn't match, expected %q", values[v], - r.varsR[k].String()) - } - } - } - return rv, nil -} - -// getURLQuery returns a single query parameter from a request URL. -// For a URL with foo=bar&baz=ding, we return only the relevant key -// value pair for the routeRegexp. -func (r *routeRegexp) getURLQuery(req *http.Request) string { - if r.regexpType != regexpTypeQuery { - return "" - } - templateKey := strings.SplitN(r.template, "=", 2)[0] - val, ok := findFirstQueryKey(req.URL.RawQuery, templateKey) - if ok { - return templateKey + "=" + val - } - return "" -} - -// findFirstQueryKey returns the same result as (*url.URL).Query()[key][0]. -// If key was not found, empty string and false is returned. -func findFirstQueryKey(rawQuery, key string) (value string, ok bool) { - query := []byte(rawQuery) - for len(query) > 0 { - foundKey := query - if i := bytes.IndexAny(foundKey, "&;"); i >= 0 { - foundKey, query = foundKey[:i], foundKey[i+1:] - } else { - query = query[:0] - } - if len(foundKey) == 0 { - continue - } - var value []byte - if i := bytes.IndexByte(foundKey, '='); i >= 0 { - foundKey, value = foundKey[:i], foundKey[i+1:] - } - if len(foundKey) < len(key) { - // Cannot possibly be key. - continue - } - keyString, err := url.QueryUnescape(string(foundKey)) - if err != nil { - continue - } - if keyString != key { - continue - } - valueString, err := url.QueryUnescape(string(value)) - if err != nil { - continue - } - return valueString, true - } - return "", false -} - -func (r *routeRegexp) matchQueryString(req *http.Request) bool { - return r.regexp.MatchString(r.getURLQuery(req)) -} - -// braceIndices returns the first level curly brace indices from a string. -// It returns an error in case of unbalanced braces. -func braceIndices(s string) ([]int, error) { - var level, idx int - var idxs []int - for i := 0; i < len(s); i++ { - switch s[i] { - case '{': - if level++; level == 1 { - idx = i - } - case '}': - if level--; level == 0 { - idxs = append(idxs, idx, i+1) - } else if level < 0 { - return nil, fmt.Errorf("mux: unbalanced braces in %q", s) - } - } - } - if level != 0 { - return nil, fmt.Errorf("mux: unbalanced braces in %q", s) - } - return idxs, nil -} - -// varGroupName builds a capturing group name for the indexed variable. -func varGroupName(idx int) string { - return "v" + strconv.Itoa(idx) -} - -// ---------------------------------------------------------------------------- -// routeRegexpGroup -// ---------------------------------------------------------------------------- - -// routeRegexpGroup groups the route matchers that carry variables. -type routeRegexpGroup struct { - host *routeRegexp - path *routeRegexp - queries []*routeRegexp -} - -// setMatch extracts the variables from the URL once a route matches. -func (v routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) { - // Store host variables. - if v.host != nil { - host := getHost(req) - if v.host.wildcardHostPort { - // Don't be strict on the port match - if i := strings.Index(host, ":"); i != -1 { - host = host[:i] - } - } - matches := v.host.regexp.FindStringSubmatchIndex(host) - if len(matches) > 0 { - extractVars(host, matches, v.host.varsN, m.Vars) - } - } - path := req.URL.Path - if r.useEncodedPath { - path = req.URL.EscapedPath() - } - // Store path variables. - if v.path != nil { - matches := v.path.regexp.FindStringSubmatchIndex(path) - if len(matches) > 0 { - extractVars(path, matches, v.path.varsN, m.Vars) - // Check if we should redirect. - if v.path.options.strictSlash { - p1 := strings.HasSuffix(path, "/") - p2 := strings.HasSuffix(v.path.template, "/") - if p1 != p2 { - u, _ := url.Parse(req.URL.String()) - if p1 { - u.Path = u.Path[:len(u.Path)-1] - } else { - u.Path += "/" - } - m.Handler = http.RedirectHandler(u.String(), http.StatusMovedPermanently) - } - } - } - } - // Store query string variables. - for _, q := range v.queries { - queryURL := q.getURLQuery(req) - matches := q.regexp.FindStringSubmatchIndex(queryURL) - if len(matches) > 0 { - extractVars(queryURL, matches, q.varsN, m.Vars) - } - } -} - -// getHost tries its best to return the request host. -// According to section 14.23 of RFC 2616 the Host header -// can include the port number if the default value of 80 is not used. -func getHost(r *http.Request) string { - if r.URL.IsAbs() { - return r.URL.Host - } - return r.Host -} - -func extractVars(input string, matches []int, names []string, output map[string]string) { - for i, name := range names { - output[name] = input[matches[2*i+2]:matches[2*i+3]] - } -} diff --git a/ui/wasm/vendor/github.com/gorilla/mux/route.go b/ui/wasm/vendor/github.com/gorilla/mux/route.go deleted file mode 100644 index 750afe570d0..00000000000 --- a/ui/wasm/vendor/github.com/gorilla/mux/route.go +++ /dev/null @@ -1,736 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "errors" - "fmt" - "net/http" - "net/url" - "regexp" - "strings" -) - -// Route stores information to match a request and build URLs. -type Route struct { - // Request handler for the route. - handler http.Handler - // If true, this route never matches: it is only used to build URLs. - buildOnly bool - // The name used to build URLs. - name string - // Error resulted from building a route. - err error - - // "global" reference to all named routes - namedRoutes map[string]*Route - - // config possibly passed in from `Router` - routeConf -} - -// SkipClean reports whether path cleaning is enabled for this route via -// Router.SkipClean. -func (r *Route) SkipClean() bool { - return r.skipClean -} - -// Match matches the route against the request. -func (r *Route) Match(req *http.Request, match *RouteMatch) bool { - if r.buildOnly || r.err != nil { - return false - } - - var matchErr error - - // Match everything. - for _, m := range r.matchers { - if matched := m.Match(req, match); !matched { - if _, ok := m.(methodMatcher); ok { - matchErr = ErrMethodMismatch - continue - } - - // Ignore ErrNotFound errors. These errors arise from match call - // to Subrouters. - // - // This prevents subsequent matching subrouters from failing to - // run middleware. If not ignored, the middleware would see a - // non-nil MatchErr and be skipped, even when there was a - // matching route. - if match.MatchErr == ErrNotFound { - match.MatchErr = nil - } - - matchErr = nil - return false - } - } - - if matchErr != nil { - match.MatchErr = matchErr - return false - } - - if match.MatchErr == ErrMethodMismatch && r.handler != nil { - // We found a route which matches request method, clear MatchErr - match.MatchErr = nil - // Then override the mis-matched handler - match.Handler = r.handler - } - - // Yay, we have a match. Let's collect some info about it. - if match.Route == nil { - match.Route = r - } - if match.Handler == nil { - match.Handler = r.handler - } - if match.Vars == nil { - match.Vars = make(map[string]string) - } - - // Set variables. - r.regexp.setMatch(req, match, r) - return true -} - -// ---------------------------------------------------------------------------- -// Route attributes -// ---------------------------------------------------------------------------- - -// GetError returns an error resulted from building the route, if any. -func (r *Route) GetError() error { - return r.err -} - -// BuildOnly sets the route to never match: it is only used to build URLs. -func (r *Route) BuildOnly() *Route { - r.buildOnly = true - return r -} - -// Handler -------------------------------------------------------------------- - -// Handler sets a handler for the route. -func (r *Route) Handler(handler http.Handler) *Route { - if r.err == nil { - r.handler = handler - } - return r -} - -// HandlerFunc sets a handler function for the route. -func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route { - return r.Handler(http.HandlerFunc(f)) -} - -// GetHandler returns the handler for the route, if any. -func (r *Route) GetHandler() http.Handler { - return r.handler -} - -// Name ----------------------------------------------------------------------- - -// Name sets the name for the route, used to build URLs. -// It is an error to call Name more than once on a route. -func (r *Route) Name(name string) *Route { - if r.name != "" { - r.err = fmt.Errorf("mux: route already has name %q, can't set %q", - r.name, name) - } - if r.err == nil { - r.name = name - r.namedRoutes[name] = r - } - return r -} - -// GetName returns the name for the route, if any. -func (r *Route) GetName() string { - return r.name -} - -// ---------------------------------------------------------------------------- -// Matchers -// ---------------------------------------------------------------------------- - -// matcher types try to match a request. -type matcher interface { - Match(*http.Request, *RouteMatch) bool -} - -// addMatcher adds a matcher to the route. -func (r *Route) addMatcher(m matcher) *Route { - if r.err == nil { - r.matchers = append(r.matchers, m) - } - return r -} - -// addRegexpMatcher adds a host or path matcher and builder to a route. -func (r *Route) addRegexpMatcher(tpl string, typ regexpType) error { - if r.err != nil { - return r.err - } - if typ == regexpTypePath || typ == regexpTypePrefix { - if len(tpl) > 0 && tpl[0] != '/' { - return fmt.Errorf("mux: path must start with a slash, got %q", tpl) - } - if r.regexp.path != nil { - tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl - } - } - rr, err := newRouteRegexp(tpl, typ, routeRegexpOptions{ - strictSlash: r.strictSlash, - useEncodedPath: r.useEncodedPath, - }) - if err != nil { - return err - } - for _, q := range r.regexp.queries { - if err = uniqueVars(rr.varsN, q.varsN); err != nil { - return err - } - } - if typ == regexpTypeHost { - if r.regexp.path != nil { - if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil { - return err - } - } - r.regexp.host = rr - } else { - if r.regexp.host != nil { - if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil { - return err - } - } - if typ == regexpTypeQuery { - r.regexp.queries = append(r.regexp.queries, rr) - } else { - r.regexp.path = rr - } - } - r.addMatcher(rr) - return nil -} - -// Headers -------------------------------------------------------------------- - -// headerMatcher matches the request against header values. -type headerMatcher map[string]string - -func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchMapWithString(m, r.Header, true) -} - -// Headers adds a matcher for request header values. -// It accepts a sequence of key/value pairs to be matched. For example: -// -// r := mux.NewRouter() -// r.Headers("Content-Type", "application/json", -// "X-Requested-With", "XMLHttpRequest") -// -// The above route will only match if both request header values match. -// If the value is an empty string, it will match any value if the key is set. -func (r *Route) Headers(pairs ...string) *Route { - if r.err == nil { - var headers map[string]string - headers, r.err = mapFromPairsToString(pairs...) - return r.addMatcher(headerMatcher(headers)) - } - return r -} - -// headerRegexMatcher matches the request against the route given a regex for the header -type headerRegexMatcher map[string]*regexp.Regexp - -func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchMapWithRegex(m, r.Header, true) -} - -// HeadersRegexp accepts a sequence of key/value pairs, where the value has regex -// support. For example: -// -// r := mux.NewRouter() -// r.HeadersRegexp("Content-Type", "application/(text|json)", -// "X-Requested-With", "XMLHttpRequest") -// -// The above route will only match if both the request header matches both regular expressions. -// If the value is an empty string, it will match any value if the key is set. -// Use the start and end of string anchors (^ and $) to match an exact value. -func (r *Route) HeadersRegexp(pairs ...string) *Route { - if r.err == nil { - var headers map[string]*regexp.Regexp - headers, r.err = mapFromPairsToRegex(pairs...) - return r.addMatcher(headerRegexMatcher(headers)) - } - return r -} - -// Host ----------------------------------------------------------------------- - -// Host adds a matcher for the URL host. -// It accepts a template with zero or more URL variables enclosed by {}. -// Variables can define an optional regexp pattern to be matched: -// -// - {name} matches anything until the next dot. -// -// - {name:pattern} matches the given regexp pattern. -// -// For example: -// -// r := mux.NewRouter() -// r.Host("www.example.com") -// r.Host("{subdomain}.domain.com") -// r.Host("{subdomain:[a-z]+}.domain.com") -// -// Variable names must be unique in a given route. They can be retrieved -// calling mux.Vars(request). -func (r *Route) Host(tpl string) *Route { - r.err = r.addRegexpMatcher(tpl, regexpTypeHost) - return r -} - -// MatcherFunc ---------------------------------------------------------------- - -// MatcherFunc is the function signature used by custom matchers. -type MatcherFunc func(*http.Request, *RouteMatch) bool - -// Match returns the match for a given request. -func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool { - return m(r, match) -} - -// MatcherFunc adds a custom function to be used as request matcher. -func (r *Route) MatcherFunc(f MatcherFunc) *Route { - return r.addMatcher(f) -} - -// Methods -------------------------------------------------------------------- - -// methodMatcher matches the request against HTTP methods. -type methodMatcher []string - -func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchInArray(m, r.Method) -} - -// Methods adds a matcher for HTTP methods. -// It accepts a sequence of one or more methods to be matched, e.g.: -// "GET", "POST", "PUT". -func (r *Route) Methods(methods ...string) *Route { - for k, v := range methods { - methods[k] = strings.ToUpper(v) - } - return r.addMatcher(methodMatcher(methods)) -} - -// Path ----------------------------------------------------------------------- - -// Path adds a matcher for the URL path. -// It accepts a template with zero or more URL variables enclosed by {}. The -// template must start with a "/". -// Variables can define an optional regexp pattern to be matched: -// -// - {name} matches anything until the next slash. -// -// - {name:pattern} matches the given regexp pattern. -// -// For example: -// -// r := mux.NewRouter() -// r.Path("/products/").Handler(ProductsHandler) -// r.Path("/products/{key}").Handler(ProductsHandler) -// r.Path("/articles/{category}/{id:[0-9]+}"). -// Handler(ArticleHandler) -// -// Variable names must be unique in a given route. They can be retrieved -// calling mux.Vars(request). -func (r *Route) Path(tpl string) *Route { - r.err = r.addRegexpMatcher(tpl, regexpTypePath) - return r -} - -// PathPrefix ----------------------------------------------------------------- - -// PathPrefix adds a matcher for the URL path prefix. This matches if the given -// template is a prefix of the full URL path. See Route.Path() for details on -// the tpl argument. -// -// Note that it does not treat slashes specially ("/foobar/" will be matched by -// the prefix "/foo") so you may want to use a trailing slash here. -// -// Also note that the setting of Router.StrictSlash() has no effect on routes -// with a PathPrefix matcher. -func (r *Route) PathPrefix(tpl string) *Route { - r.err = r.addRegexpMatcher(tpl, regexpTypePrefix) - return r -} - -// Query ---------------------------------------------------------------------- - -// Queries adds a matcher for URL query values. -// It accepts a sequence of key/value pairs. Values may define variables. -// For example: -// -// r := mux.NewRouter() -// r.Queries("foo", "bar", "id", "{id:[0-9]+}") -// -// The above route will only match if the URL contains the defined queries -// values, e.g.: ?foo=bar&id=42. -// -// If the value is an empty string, it will match any value if the key is set. -// -// Variables can define an optional regexp pattern to be matched: -// -// - {name} matches anything until the next slash. -// -// - {name:pattern} matches the given regexp pattern. -func (r *Route) Queries(pairs ...string) *Route { - length := len(pairs) - if length%2 != 0 { - r.err = fmt.Errorf( - "mux: number of parameters must be multiple of 2, got %v", pairs) - return nil - } - for i := 0; i < length; i += 2 { - if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], regexpTypeQuery); r.err != nil { - return r - } - } - - return r -} - -// Schemes -------------------------------------------------------------------- - -// schemeMatcher matches the request against URL schemes. -type schemeMatcher []string - -func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool { - scheme := r.URL.Scheme - // https://golang.org/pkg/net/http/#Request - // "For [most] server requests, fields other than Path and RawQuery will be - // empty." - // Since we're an http muxer, the scheme is either going to be http or https - // though, so we can just set it based on the tls termination state. - if scheme == "" { - if r.TLS == nil { - scheme = "http" - } else { - scheme = "https" - } - } - return matchInArray(m, scheme) -} - -// Schemes adds a matcher for URL schemes. -// It accepts a sequence of schemes to be matched, e.g.: "http", "https". -// If the request's URL has a scheme set, it will be matched against. -// Generally, the URL scheme will only be set if a previous handler set it, -// such as the ProxyHeaders handler from gorilla/handlers. -// If unset, the scheme will be determined based on the request's TLS -// termination state. -// The first argument to Schemes will be used when constructing a route URL. -func (r *Route) Schemes(schemes ...string) *Route { - for k, v := range schemes { - schemes[k] = strings.ToLower(v) - } - if len(schemes) > 0 { - r.buildScheme = schemes[0] - } - return r.addMatcher(schemeMatcher(schemes)) -} - -// BuildVarsFunc -------------------------------------------------------------- - -// BuildVarsFunc is the function signature used by custom build variable -// functions (which can modify route variables before a route's URL is built). -type BuildVarsFunc func(map[string]string) map[string]string - -// BuildVarsFunc adds a custom function to be used to modify build variables -// before a route's URL is built. -func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route { - if r.buildVarsFunc != nil { - // compose the old and new functions - old := r.buildVarsFunc - r.buildVarsFunc = func(m map[string]string) map[string]string { - return f(old(m)) - } - } else { - r.buildVarsFunc = f - } - return r -} - -// Subrouter ------------------------------------------------------------------ - -// Subrouter creates a subrouter for the route. -// -// It will test the inner routes only if the parent route matched. For example: -// -// r := mux.NewRouter() -// s := r.Host("www.example.com").Subrouter() -// s.HandleFunc("/products/", ProductsHandler) -// s.HandleFunc("/products/{key}", ProductHandler) -// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) -// -// Here, the routes registered in the subrouter won't be tested if the host -// doesn't match. -func (r *Route) Subrouter() *Router { - // initialize a subrouter with a copy of the parent route's configuration - router := &Router{routeConf: copyRouteConf(r.routeConf), namedRoutes: r.namedRoutes} - r.addMatcher(router) - return router -} - -// ---------------------------------------------------------------------------- -// URL building -// ---------------------------------------------------------------------------- - -// URL builds a URL for the route. -// -// It accepts a sequence of key/value pairs for the route variables. For -// example, given this route: -// -// r := mux.NewRouter() -// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). -// Name("article") -// -// ...a URL for it can be built using: -// -// url, err := r.Get("article").URL("category", "technology", "id", "42") -// -// ...which will return an url.URL with the following path: -// -// "/articles/technology/42" -// -// This also works for host variables: -// -// r := mux.NewRouter() -// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). -// Host("{subdomain}.domain.com"). -// Name("article") -// -// // url.String() will be "http://news.domain.com/articles/technology/42" -// url, err := r.Get("article").URL("subdomain", "news", -// "category", "technology", -// "id", "42") -// -// The scheme of the resulting url will be the first argument that was passed to Schemes: -// -// // url.String() will be "https://example.com" -// r := mux.NewRouter() -// url, err := r.Host("example.com") -// .Schemes("https", "http").URL() -// -// All variables defined in the route are required, and their values must -// conform to the corresponding patterns. -func (r *Route) URL(pairs ...string) (*url.URL, error) { - if r.err != nil { - return nil, r.err - } - values, err := r.prepareVars(pairs...) - if err != nil { - return nil, err - } - var scheme, host, path string - queries := make([]string, 0, len(r.regexp.queries)) - if r.regexp.host != nil { - if host, err = r.regexp.host.url(values); err != nil { - return nil, err - } - scheme = "http" - if r.buildScheme != "" { - scheme = r.buildScheme - } - } - if r.regexp.path != nil { - if path, err = r.regexp.path.url(values); err != nil { - return nil, err - } - } - for _, q := range r.regexp.queries { - var query string - if query, err = q.url(values); err != nil { - return nil, err - } - queries = append(queries, query) - } - return &url.URL{ - Scheme: scheme, - Host: host, - Path: path, - RawQuery: strings.Join(queries, "&"), - }, nil -} - -// URLHost builds the host part of the URL for a route. See Route.URL(). -// -// The route must have a host defined. -func (r *Route) URLHost(pairs ...string) (*url.URL, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp.host == nil { - return nil, errors.New("mux: route doesn't have a host") - } - values, err := r.prepareVars(pairs...) - if err != nil { - return nil, err - } - host, err := r.regexp.host.url(values) - if err != nil { - return nil, err - } - u := &url.URL{ - Scheme: "http", - Host: host, - } - if r.buildScheme != "" { - u.Scheme = r.buildScheme - } - return u, nil -} - -// URLPath builds the path part of the URL for a route. See Route.URL(). -// -// The route must have a path defined. -func (r *Route) URLPath(pairs ...string) (*url.URL, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp.path == nil { - return nil, errors.New("mux: route doesn't have a path") - } - values, err := r.prepareVars(pairs...) - if err != nil { - return nil, err - } - path, err := r.regexp.path.url(values) - if err != nil { - return nil, err - } - return &url.URL{ - Path: path, - }, nil -} - -// GetPathTemplate returns the template used to build the -// route match. -// This is useful for building simple REST API documentation and for instrumentation -// against third-party services. -// An error will be returned if the route does not define a path. -func (r *Route) GetPathTemplate() (string, error) { - if r.err != nil { - return "", r.err - } - if r.regexp.path == nil { - return "", errors.New("mux: route doesn't have a path") - } - return r.regexp.path.template, nil -} - -// GetPathRegexp returns the expanded regular expression used to match route path. -// This is useful for building simple REST API documentation and for instrumentation -// against third-party services. -// An error will be returned if the route does not define a path. -func (r *Route) GetPathRegexp() (string, error) { - if r.err != nil { - return "", r.err - } - if r.regexp.path == nil { - return "", errors.New("mux: route does not have a path") - } - return r.regexp.path.regexp.String(), nil -} - -// GetQueriesRegexp returns the expanded regular expressions used to match the -// route queries. -// This is useful for building simple REST API documentation and for instrumentation -// against third-party services. -// An error will be returned if the route does not have queries. -func (r *Route) GetQueriesRegexp() ([]string, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp.queries == nil { - return nil, errors.New("mux: route doesn't have queries") - } - queries := make([]string, 0, len(r.regexp.queries)) - for _, query := range r.regexp.queries { - queries = append(queries, query.regexp.String()) - } - return queries, nil -} - -// GetQueriesTemplates returns the templates used to build the -// query matching. -// This is useful for building simple REST API documentation and for instrumentation -// against third-party services. -// An error will be returned if the route does not define queries. -func (r *Route) GetQueriesTemplates() ([]string, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp.queries == nil { - return nil, errors.New("mux: route doesn't have queries") - } - queries := make([]string, 0, len(r.regexp.queries)) - for _, query := range r.regexp.queries { - queries = append(queries, query.template) - } - return queries, nil -} - -// GetMethods returns the methods the route matches against -// This is useful for building simple REST API documentation and for instrumentation -// against third-party services. -// An error will be returned if route does not have methods. -func (r *Route) GetMethods() ([]string, error) { - if r.err != nil { - return nil, r.err - } - for _, m := range r.matchers { - if methods, ok := m.(methodMatcher); ok { - return []string(methods), nil - } - } - return nil, errors.New("mux: route doesn't have methods") -} - -// GetHostTemplate returns the template used to build the -// route match. -// This is useful for building simple REST API documentation and for instrumentation -// against third-party services. -// An error will be returned if the route does not define a host. -func (r *Route) GetHostTemplate() (string, error) { - if r.err != nil { - return "", r.err - } - if r.regexp.host == nil { - return "", errors.New("mux: route doesn't have a host") - } - return r.regexp.host.template, nil -} - -// prepareVars converts the route variable pairs into a map. If the route has a -// BuildVarsFunc, it is invoked. -func (r *Route) prepareVars(pairs ...string) (map[string]string, error) { - m, err := mapFromPairsToString(pairs...) - if err != nil { - return nil, err - } - return r.buildVars(m), nil -} - -func (r *Route) buildVars(m map[string]string) map[string]string { - if r.buildVarsFunc != nil { - m = r.buildVarsFunc(m) - } - return m -} diff --git a/ui/wasm/vendor/github.com/gorilla/mux/test_helpers.go b/ui/wasm/vendor/github.com/gorilla/mux/test_helpers.go deleted file mode 100644 index 5f5c496de01..00000000000 --- a/ui/wasm/vendor/github.com/gorilla/mux/test_helpers.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import "net/http" - -// SetURLVars sets the URL variables for the given request, to be accessed via -// mux.Vars for testing route behaviour. Arguments are not modified, a shallow -// copy is returned. -// -// This API should only be used for testing purposes; it provides a way to -// inject variables into the request context. Alternatively, URL variables -// can be set by making a route that captures the required variables, -// starting a server and sending the request to that server. -func SetURLVars(r *http.Request, val map[string]string) *http.Request { - return requestWithVars(r, val) -} diff --git a/ui/wasm/vendor/github.com/gregjones/httpcache/.travis.yml b/ui/wasm/vendor/github.com/gregjones/httpcache/.travis.yml deleted file mode 100644 index b5ffbe03d86..00000000000 --- a/ui/wasm/vendor/github.com/gregjones/httpcache/.travis.yml +++ /dev/null @@ -1,19 +0,0 @@ -sudo: false -language: go -go: - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - master -matrix: - allow_failures: - - go: master - fast_finish: true -install: - - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). -script: - - go get -t -v ./... - - diff -u <(echo -n) <(gofmt -d .) - - go tool vet . - - go test -v -race ./... diff --git a/ui/wasm/vendor/github.com/gregjones/httpcache/LICENSE.txt b/ui/wasm/vendor/github.com/gregjones/httpcache/LICENSE.txt deleted file mode 100644 index 81316beb0cb..00000000000 --- a/ui/wasm/vendor/github.com/gregjones/httpcache/LICENSE.txt +++ /dev/null @@ -1,7 +0,0 @@ -Copyright © 2012 Greg Jones (greg.jones@gmail.com) - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/ui/wasm/vendor/github.com/gregjones/httpcache/README.md b/ui/wasm/vendor/github.com/gregjones/httpcache/README.md deleted file mode 100644 index 09c9e7c173a..00000000000 --- a/ui/wasm/vendor/github.com/gregjones/httpcache/README.md +++ /dev/null @@ -1,25 +0,0 @@ -httpcache -========= - -[![Build Status](https://travis-ci.org/gregjones/httpcache.svg?branch=master)](https://travis-ci.org/gregjones/httpcache) [![GoDoc](https://godoc.org/github.com/gregjones/httpcache?status.svg)](https://godoc.org/github.com/gregjones/httpcache) - -Package httpcache provides a http.RoundTripper implementation that works as a mostly [RFC 7234](https://tools.ietf.org/html/rfc7234) compliant cache for http responses. - -It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client and not for a shared proxy). - -Cache Backends --------------- - -- The built-in 'memory' cache stores responses in an in-memory map. -- [`github.com/gregjones/httpcache/diskcache`](https://github.com/gregjones/httpcache/tree/master/diskcache) provides a filesystem-backed cache using the [diskv](https://github.com/peterbourgon/diskv) library. -- [`github.com/gregjones/httpcache/memcache`](https://github.com/gregjones/httpcache/tree/master/memcache) provides memcache implementations, for both App Engine and 'normal' memcache servers. -- [`sourcegraph.com/sourcegraph/s3cache`](https://sourcegraph.com/github.com/sourcegraph/s3cache) uses Amazon S3 for storage. -- [`github.com/gregjones/httpcache/leveldbcache`](https://github.com/gregjones/httpcache/tree/master/leveldbcache) provides a filesystem-backed cache using [leveldb](https://github.com/syndtr/goleveldb/leveldb). -- [`github.com/die-net/lrucache`](https://github.com/die-net/lrucache) provides an in-memory cache that will evict least-recently used entries. -- [`github.com/die-net/lrucache/twotier`](https://github.com/die-net/lrucache/tree/master/twotier) allows caches to be combined, for example to use lrucache above with a persistent disk-cache. -- [`github.com/birkelund/boltdbcache`](https://github.com/birkelund/boltdbcache) provides a BoltDB implementation (based on the [bbolt](https://github.com/coreos/bbolt) fork). - -License -------- - -- [MIT License](LICENSE.txt) diff --git a/ui/wasm/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go b/ui/wasm/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go deleted file mode 100644 index 42e3129d823..00000000000 --- a/ui/wasm/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go +++ /dev/null @@ -1,61 +0,0 @@ -// Package diskcache provides an implementation of httpcache.Cache that uses the diskv package -// to supplement an in-memory map with persistent storage -// -package diskcache - -import ( - "bytes" - "crypto/md5" - "encoding/hex" - "github.com/peterbourgon/diskv" - "io" -) - -// Cache is an implementation of httpcache.Cache that supplements the in-memory map with persistent storage -type Cache struct { - d *diskv.Diskv -} - -// Get returns the response corresponding to key if present -func (c *Cache) Get(key string) (resp []byte, ok bool) { - key = keyToFilename(key) - resp, err := c.d.Read(key) - if err != nil { - return []byte{}, false - } - return resp, true -} - -// Set saves a response to the cache as key -func (c *Cache) Set(key string, resp []byte) { - key = keyToFilename(key) - c.d.WriteStream(key, bytes.NewReader(resp), true) -} - -// Delete removes the response with key from the cache -func (c *Cache) Delete(key string) { - key = keyToFilename(key) - c.d.Erase(key) -} - -func keyToFilename(key string) string { - h := md5.New() - io.WriteString(h, key) - return hex.EncodeToString(h.Sum(nil)) -} - -// New returns a new Cache that will store files in basePath -func New(basePath string) *Cache { - return &Cache{ - d: diskv.New(diskv.Options{ - BasePath: basePath, - CacheSizeMax: 100 * 1024 * 1024, // 100MB - }), - } -} - -// NewWithDiskv returns a new Cache using the provided Diskv as underlying -// storage. -func NewWithDiskv(d *diskv.Diskv) *Cache { - return &Cache{d} -} diff --git a/ui/wasm/vendor/github.com/gregjones/httpcache/httpcache.go b/ui/wasm/vendor/github.com/gregjones/httpcache/httpcache.go deleted file mode 100644 index f6a2ec4a53e..00000000000 --- a/ui/wasm/vendor/github.com/gregjones/httpcache/httpcache.go +++ /dev/null @@ -1,551 +0,0 @@ -// Package httpcache provides a http.RoundTripper implementation that works as a -// mostly RFC-compliant cache for http responses. -// -// It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client -// and not for a shared proxy). -// -package httpcache - -import ( - "bufio" - "bytes" - "errors" - "io" - "io/ioutil" - "net/http" - "net/http/httputil" - "strings" - "sync" - "time" -) - -const ( - stale = iota - fresh - transparent - // XFromCache is the header added to responses that are returned from the cache - XFromCache = "X-From-Cache" -) - -// A Cache interface is used by the Transport to store and retrieve responses. -type Cache interface { - // Get returns the []byte representation of a cached response and a bool - // set to true if the value isn't empty - Get(key string) (responseBytes []byte, ok bool) - // Set stores the []byte representation of a response against a key - Set(key string, responseBytes []byte) - // Delete removes the value associated with the key - Delete(key string) -} - -// cacheKey returns the cache key for req. -func cacheKey(req *http.Request) string { - if req.Method == http.MethodGet { - return req.URL.String() - } else { - return req.Method + " " + req.URL.String() - } -} - -// CachedResponse returns the cached http.Response for req if present, and nil -// otherwise. -func CachedResponse(c Cache, req *http.Request) (resp *http.Response, err error) { - cachedVal, ok := c.Get(cacheKey(req)) - if !ok { - return - } - - b := bytes.NewBuffer(cachedVal) - return http.ReadResponse(bufio.NewReader(b), req) -} - -// MemoryCache is an implemtation of Cache that stores responses in an in-memory map. -type MemoryCache struct { - mu sync.RWMutex - items map[string][]byte -} - -// Get returns the []byte representation of the response and true if present, false if not -func (c *MemoryCache) Get(key string) (resp []byte, ok bool) { - c.mu.RLock() - resp, ok = c.items[key] - c.mu.RUnlock() - return resp, ok -} - -// Set saves response resp to the cache with key -func (c *MemoryCache) Set(key string, resp []byte) { - c.mu.Lock() - c.items[key] = resp - c.mu.Unlock() -} - -// Delete removes key from the cache -func (c *MemoryCache) Delete(key string) { - c.mu.Lock() - delete(c.items, key) - c.mu.Unlock() -} - -// NewMemoryCache returns a new Cache that will store items in an in-memory map -func NewMemoryCache() *MemoryCache { - c := &MemoryCache{items: map[string][]byte{}} - return c -} - -// Transport is an implementation of http.RoundTripper that will return values from a cache -// where possible (avoiding a network request) and will additionally add validators (etag/if-modified-since) -// to repeated requests allowing servers to return 304 / Not Modified -type Transport struct { - // The RoundTripper interface actually used to make requests - // If nil, http.DefaultTransport is used - Transport http.RoundTripper - Cache Cache - // If true, responses returned from the cache will be given an extra header, X-From-Cache - MarkCachedResponses bool -} - -// NewTransport returns a new Transport with the -// provided Cache implementation and MarkCachedResponses set to true -func NewTransport(c Cache) *Transport { - return &Transport{Cache: c, MarkCachedResponses: true} -} - -// Client returns an *http.Client that caches responses. -func (t *Transport) Client() *http.Client { - return &http.Client{Transport: t} -} - -// varyMatches will return false unless all of the cached values for the headers listed in Vary -// match the new request -func varyMatches(cachedResp *http.Response, req *http.Request) bool { - for _, header := range headerAllCommaSepValues(cachedResp.Header, "vary") { - header = http.CanonicalHeaderKey(header) - if header != "" && req.Header.Get(header) != cachedResp.Header.Get("X-Varied-"+header) { - return false - } - } - return true -} - -// RoundTrip takes a Request and returns a Response -// -// If there is a fresh Response already in cache, then it will be returned without connecting to -// the server. -// -// If there is a stale Response, then any validators it contains will be set on the new request -// to give the server a chance to respond with NotModified. If this happens, then the cached Response -// will be returned. -func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) { - cacheKey := cacheKey(req) - cacheable := (req.Method == "GET" || req.Method == "HEAD") && req.Header.Get("range") == "" - var cachedResp *http.Response - if cacheable { - cachedResp, err = CachedResponse(t.Cache, req) - } else { - // Need to invalidate an existing value - t.Cache.Delete(cacheKey) - } - - transport := t.Transport - if transport == nil { - transport = http.DefaultTransport - } - - if cacheable && cachedResp != nil && err == nil { - if t.MarkCachedResponses { - cachedResp.Header.Set(XFromCache, "1") - } - - if varyMatches(cachedResp, req) { - // Can only use cached value if the new request doesn't Vary significantly - freshness := getFreshness(cachedResp.Header, req.Header) - if freshness == fresh { - return cachedResp, nil - } - - if freshness == stale { - var req2 *http.Request - // Add validators if caller hasn't already done so - etag := cachedResp.Header.Get("etag") - if etag != "" && req.Header.Get("etag") == "" { - req2 = cloneRequest(req) - req2.Header.Set("if-none-match", etag) - } - lastModified := cachedResp.Header.Get("last-modified") - if lastModified != "" && req.Header.Get("last-modified") == "" { - if req2 == nil { - req2 = cloneRequest(req) - } - req2.Header.Set("if-modified-since", lastModified) - } - if req2 != nil { - req = req2 - } - } - } - - resp, err = transport.RoundTrip(req) - if err == nil && req.Method == "GET" && resp.StatusCode == http.StatusNotModified { - // Replace the 304 response with the one from cache, but update with some new headers - endToEndHeaders := getEndToEndHeaders(resp.Header) - for _, header := range endToEndHeaders { - cachedResp.Header[header] = resp.Header[header] - } - resp = cachedResp - } else if (err != nil || (cachedResp != nil && resp.StatusCode >= 500)) && - req.Method == "GET" && canStaleOnError(cachedResp.Header, req.Header) { - // In case of transport failure and stale-if-error activated, returns cached content - // when available - return cachedResp, nil - } else { - if err != nil || resp.StatusCode != http.StatusOK { - t.Cache.Delete(cacheKey) - } - if err != nil { - return nil, err - } - } - } else { - reqCacheControl := parseCacheControl(req.Header) - if _, ok := reqCacheControl["only-if-cached"]; ok { - resp = newGatewayTimeoutResponse(req) - } else { - resp, err = transport.RoundTrip(req) - if err != nil { - return nil, err - } - } - } - - if cacheable && canStore(parseCacheControl(req.Header), parseCacheControl(resp.Header)) { - for _, varyKey := range headerAllCommaSepValues(resp.Header, "vary") { - varyKey = http.CanonicalHeaderKey(varyKey) - fakeHeader := "X-Varied-" + varyKey - reqValue := req.Header.Get(varyKey) - if reqValue != "" { - resp.Header.Set(fakeHeader, reqValue) - } - } - switch req.Method { - case "GET": - // Delay caching until EOF is reached. - resp.Body = &cachingReadCloser{ - R: resp.Body, - OnEOF: func(r io.Reader) { - resp := *resp - resp.Body = ioutil.NopCloser(r) - respBytes, err := httputil.DumpResponse(&resp, true) - if err == nil { - t.Cache.Set(cacheKey, respBytes) - } - }, - } - default: - respBytes, err := httputil.DumpResponse(resp, true) - if err == nil { - t.Cache.Set(cacheKey, respBytes) - } - } - } else { - t.Cache.Delete(cacheKey) - } - return resp, nil -} - -// ErrNoDateHeader indicates that the HTTP headers contained no Date header. -var ErrNoDateHeader = errors.New("no Date header") - -// Date parses and returns the value of the Date header. -func Date(respHeaders http.Header) (date time.Time, err error) { - dateHeader := respHeaders.Get("date") - if dateHeader == "" { - err = ErrNoDateHeader - return - } - - return time.Parse(time.RFC1123, dateHeader) -} - -type realClock struct{} - -func (c *realClock) since(d time.Time) time.Duration { - return time.Since(d) -} - -type timer interface { - since(d time.Time) time.Duration -} - -var clock timer = &realClock{} - -// getFreshness will return one of fresh/stale/transparent based on the cache-control -// values of the request and the response -// -// fresh indicates the response can be returned -// stale indicates that the response needs validating before it is returned -// transparent indicates the response should not be used to fulfil the request -// -// Because this is only a private cache, 'public' and 'private' in cache-control aren't -// signficant. Similarly, smax-age isn't used. -func getFreshness(respHeaders, reqHeaders http.Header) (freshness int) { - respCacheControl := parseCacheControl(respHeaders) - reqCacheControl := parseCacheControl(reqHeaders) - if _, ok := reqCacheControl["no-cache"]; ok { - return transparent - } - if _, ok := respCacheControl["no-cache"]; ok { - return stale - } - if _, ok := reqCacheControl["only-if-cached"]; ok { - return fresh - } - - date, err := Date(respHeaders) - if err != nil { - return stale - } - currentAge := clock.since(date) - - var lifetime time.Duration - var zeroDuration time.Duration - - // If a response includes both an Expires header and a max-age directive, - // the max-age directive overrides the Expires header, even if the Expires header is more restrictive. - if maxAge, ok := respCacheControl["max-age"]; ok { - lifetime, err = time.ParseDuration(maxAge + "s") - if err != nil { - lifetime = zeroDuration - } - } else { - expiresHeader := respHeaders.Get("Expires") - if expiresHeader != "" { - expires, err := time.Parse(time.RFC1123, expiresHeader) - if err != nil { - lifetime = zeroDuration - } else { - lifetime = expires.Sub(date) - } - } - } - - if maxAge, ok := reqCacheControl["max-age"]; ok { - // the client is willing to accept a response whose age is no greater than the specified time in seconds - lifetime, err = time.ParseDuration(maxAge + "s") - if err != nil { - lifetime = zeroDuration - } - } - if minfresh, ok := reqCacheControl["min-fresh"]; ok { - // the client wants a response that will still be fresh for at least the specified number of seconds. - minfreshDuration, err := time.ParseDuration(minfresh + "s") - if err == nil { - currentAge = time.Duration(currentAge + minfreshDuration) - } - } - - if maxstale, ok := reqCacheControl["max-stale"]; ok { - // Indicates that the client is willing to accept a response that has exceeded its expiration time. - // If max-stale is assigned a value, then the client is willing to accept a response that has exceeded - // its expiration time by no more than the specified number of seconds. - // If no value is assigned to max-stale, then the client is willing to accept a stale response of any age. - // - // Responses served only because of a max-stale value are supposed to have a Warning header added to them, - // but that seems like a hassle, and is it actually useful? If so, then there needs to be a different - // return-value available here. - if maxstale == "" { - return fresh - } - maxstaleDuration, err := time.ParseDuration(maxstale + "s") - if err == nil { - currentAge = time.Duration(currentAge - maxstaleDuration) - } - } - - if lifetime > currentAge { - return fresh - } - - return stale -} - -// Returns true if either the request or the response includes the stale-if-error -// cache control extension: https://tools.ietf.org/html/rfc5861 -func canStaleOnError(respHeaders, reqHeaders http.Header) bool { - respCacheControl := parseCacheControl(respHeaders) - reqCacheControl := parseCacheControl(reqHeaders) - - var err error - lifetime := time.Duration(-1) - - if staleMaxAge, ok := respCacheControl["stale-if-error"]; ok { - if staleMaxAge != "" { - lifetime, err = time.ParseDuration(staleMaxAge + "s") - if err != nil { - return false - } - } else { - return true - } - } - if staleMaxAge, ok := reqCacheControl["stale-if-error"]; ok { - if staleMaxAge != "" { - lifetime, err = time.ParseDuration(staleMaxAge + "s") - if err != nil { - return false - } - } else { - return true - } - } - - if lifetime >= 0 { - date, err := Date(respHeaders) - if err != nil { - return false - } - currentAge := clock.since(date) - if lifetime > currentAge { - return true - } - } - - return false -} - -func getEndToEndHeaders(respHeaders http.Header) []string { - // These headers are always hop-by-hop - hopByHopHeaders := map[string]struct{}{ - "Connection": struct{}{}, - "Keep-Alive": struct{}{}, - "Proxy-Authenticate": struct{}{}, - "Proxy-Authorization": struct{}{}, - "Te": struct{}{}, - "Trailers": struct{}{}, - "Transfer-Encoding": struct{}{}, - "Upgrade": struct{}{}, - } - - for _, extra := range strings.Split(respHeaders.Get("connection"), ",") { - // any header listed in connection, if present, is also considered hop-by-hop - if strings.Trim(extra, " ") != "" { - hopByHopHeaders[http.CanonicalHeaderKey(extra)] = struct{}{} - } - } - endToEndHeaders := []string{} - for respHeader, _ := range respHeaders { - if _, ok := hopByHopHeaders[respHeader]; !ok { - endToEndHeaders = append(endToEndHeaders, respHeader) - } - } - return endToEndHeaders -} - -func canStore(reqCacheControl, respCacheControl cacheControl) (canStore bool) { - if _, ok := respCacheControl["no-store"]; ok { - return false - } - if _, ok := reqCacheControl["no-store"]; ok { - return false - } - return true -} - -func newGatewayTimeoutResponse(req *http.Request) *http.Response { - var braw bytes.Buffer - braw.WriteString("HTTP/1.1 504 Gateway Timeout\r\n\r\n") - resp, err := http.ReadResponse(bufio.NewReader(&braw), req) - if err != nil { - panic(err) - } - return resp -} - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -// (This function copyright goauth2 authors: https://code.google.com/p/goauth2) -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header) - for k, s := range r.Header { - r2.Header[k] = s - } - return r2 -} - -type cacheControl map[string]string - -func parseCacheControl(headers http.Header) cacheControl { - cc := cacheControl{} - ccHeader := headers.Get("Cache-Control") - for _, part := range strings.Split(ccHeader, ",") { - part = strings.Trim(part, " ") - if part == "" { - continue - } - if strings.ContainsRune(part, '=') { - keyval := strings.Split(part, "=") - cc[strings.Trim(keyval[0], " ")] = strings.Trim(keyval[1], ",") - } else { - cc[part] = "" - } - } - return cc -} - -// headerAllCommaSepValues returns all comma-separated values (each -// with whitespace trimmed) for header name in headers. According to -// Section 4.2 of the HTTP/1.1 spec -// (http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2), -// values from multiple occurrences of a header should be concatenated, if -// the header's value is a comma-separated list. -func headerAllCommaSepValues(headers http.Header, name string) []string { - var vals []string - for _, val := range headers[http.CanonicalHeaderKey(name)] { - fields := strings.Split(val, ",") - for i, f := range fields { - fields[i] = strings.TrimSpace(f) - } - vals = append(vals, fields...) - } - return vals -} - -// cachingReadCloser is a wrapper around ReadCloser R that calls OnEOF -// handler with a full copy of the content read from R when EOF is -// reached. -type cachingReadCloser struct { - // Underlying ReadCloser. - R io.ReadCloser - // OnEOF is called with a copy of the content of R when EOF is reached. - OnEOF func(io.Reader) - - buf bytes.Buffer // buf stores a copy of the content of R. -} - -// Read reads the next len(p) bytes from R or until R is drained. The -// return value n is the number of bytes read. If R has no data to -// return, err is io.EOF and OnEOF is called with a full copy of what -// has been read so far. -func (r *cachingReadCloser) Read(p []byte) (n int, err error) { - n, err = r.R.Read(p) - r.buf.Write(p[:n]) - if err == io.EOF { - r.OnEOF(bytes.NewReader(r.buf.Bytes())) - } - return n, err -} - -func (r *cachingReadCloser) Close() error { - return r.R.Close() -} - -// NewMemoryCacheTransport returns a new Transport using the in-memory cache implementation -func NewMemoryCacheTransport() *Transport { - c := NewMemoryCache() - t := NewTransport(c) - return t -} diff --git a/ui/wasm/vendor/github.com/hashicorp/errwrap/LICENSE b/ui/wasm/vendor/github.com/hashicorp/errwrap/LICENSE deleted file mode 100644 index c33dcc7c928..00000000000 --- a/ui/wasm/vendor/github.com/hashicorp/errwrap/LICENSE +++ /dev/null @@ -1,354 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/ui/wasm/vendor/github.com/hashicorp/errwrap/README.md b/ui/wasm/vendor/github.com/hashicorp/errwrap/README.md deleted file mode 100644 index 444df08f8e7..00000000000 --- a/ui/wasm/vendor/github.com/hashicorp/errwrap/README.md +++ /dev/null @@ -1,89 +0,0 @@ -# errwrap - -`errwrap` is a package for Go that formalizes the pattern of wrapping errors -and checking if an error contains another error. - -There is a common pattern in Go of taking a returned `error` value and -then wrapping it (such as with `fmt.Errorf`) before returning it. The problem -with this pattern is that you completely lose the original `error` structure. - -Arguably the _correct_ approach is that you should make a custom structure -implementing the `error` interface, and have the original error as a field -on that structure, such [as this example](http://golang.org/pkg/os/#PathError). -This is a good approach, but you have to know the entire chain of possible -rewrapping that happens, when you might just care about one. - -`errwrap` formalizes this pattern (it doesn't matter what approach you use -above) by giving a single interface for wrapping errors, checking if a specific -error is wrapped, and extracting that error. - -## Installation and Docs - -Install using `go get github.com/hashicorp/errwrap`. - -Full documentation is available at -http://godoc.org/github.com/hashicorp/errwrap - -## Usage - -#### Basic Usage - -Below is a very basic example of its usage: - -```go -// A function that always returns an error, but wraps it, like a real -// function might. -func tryOpen() error { - _, err := os.Open("/i/dont/exist") - if err != nil { - return errwrap.Wrapf("Doesn't exist: {{err}}", err) - } - - return nil -} - -func main() { - err := tryOpen() - - // We can use the Contains helpers to check if an error contains - // another error. It is safe to do this with a nil error, or with - // an error that doesn't even use the errwrap package. - if errwrap.Contains(err, "does not exist") { - // Do something - } - if errwrap.ContainsType(err, new(os.PathError)) { - // Do something - } - - // Or we can use the associated `Get` functions to just extract - // a specific error. This would return nil if that specific error doesn't - // exist. - perr := errwrap.GetType(err, new(os.PathError)) -} -``` - -#### Custom Types - -If you're already making custom types that properly wrap errors, then -you can get all the functionality of `errwraps.Contains` and such by -implementing the `Wrapper` interface with just one function. Example: - -```go -type AppError { - Code ErrorCode - Err error -} - -func (e *AppError) WrappedErrors() []error { - return []error{e.Err} -} -``` - -Now this works: - -```go -err := &AppError{Err: fmt.Errorf("an error")} -if errwrap.ContainsType(err, fmt.Errorf("")) { - // This will work! -} -``` diff --git a/ui/wasm/vendor/github.com/hashicorp/errwrap/errwrap.go b/ui/wasm/vendor/github.com/hashicorp/errwrap/errwrap.go deleted file mode 100644 index a733bef18c0..00000000000 --- a/ui/wasm/vendor/github.com/hashicorp/errwrap/errwrap.go +++ /dev/null @@ -1,169 +0,0 @@ -// Package errwrap implements methods to formalize error wrapping in Go. -// -// All of the top-level functions that take an `error` are built to be able -// to take any error, not just wrapped errors. This allows you to use errwrap -// without having to type-check and type-cast everywhere. -package errwrap - -import ( - "errors" - "reflect" - "strings" -) - -// WalkFunc is the callback called for Walk. -type WalkFunc func(error) - -// Wrapper is an interface that can be implemented by custom types to -// have all the Contains, Get, etc. functions in errwrap work. -// -// When Walk reaches a Wrapper, it will call the callback for every -// wrapped error in addition to the wrapper itself. Since all the top-level -// functions in errwrap use Walk, this means that all those functions work -// with your custom type. -type Wrapper interface { - WrappedErrors() []error -} - -// Wrap defines that outer wraps inner, returning an error type that -// can be cleanly used with the other methods in this package, such as -// Contains, GetAll, etc. -// -// This function won't modify the error message at all (the outer message -// will be used). -func Wrap(outer, inner error) error { - return &wrappedError{ - Outer: outer, - Inner: inner, - } -} - -// Wrapf wraps an error with a formatting message. This is similar to using -// `fmt.Errorf` to wrap an error. If you're using `fmt.Errorf` to wrap -// errors, you should replace it with this. -// -// format is the format of the error message. The string '{{err}}' will -// be replaced with the original error message. -func Wrapf(format string, err error) error { - outerMsg := "" - if err != nil { - outerMsg = err.Error() - } - - outer := errors.New(strings.Replace( - format, "{{err}}", outerMsg, -1)) - - return Wrap(outer, err) -} - -// Contains checks if the given error contains an error with the -// message msg. If err is not a wrapped error, this will always return -// false unless the error itself happens to match this msg. -func Contains(err error, msg string) bool { - return len(GetAll(err, msg)) > 0 -} - -// ContainsType checks if the given error contains an error with -// the same concrete type as v. If err is not a wrapped error, this will -// check the err itself. -func ContainsType(err error, v interface{}) bool { - return len(GetAllType(err, v)) > 0 -} - -// Get is the same as GetAll but returns the deepest matching error. -func Get(err error, msg string) error { - es := GetAll(err, msg) - if len(es) > 0 { - return es[len(es)-1] - } - - return nil -} - -// GetType is the same as GetAllType but returns the deepest matching error. -func GetType(err error, v interface{}) error { - es := GetAllType(err, v) - if len(es) > 0 { - return es[len(es)-1] - } - - return nil -} - -// GetAll gets all the errors that might be wrapped in err with the -// given message. The order of the errors is such that the outermost -// matching error (the most recent wrap) is index zero, and so on. -func GetAll(err error, msg string) []error { - var result []error - - Walk(err, func(err error) { - if err.Error() == msg { - result = append(result, err) - } - }) - - return result -} - -// GetAllType gets all the errors that are the same type as v. -// -// The order of the return value is the same as described in GetAll. -func GetAllType(err error, v interface{}) []error { - var result []error - - var search string - if v != nil { - search = reflect.TypeOf(v).String() - } - Walk(err, func(err error) { - var needle string - if err != nil { - needle = reflect.TypeOf(err).String() - } - - if needle == search { - result = append(result, err) - } - }) - - return result -} - -// Walk walks all the wrapped errors in err and calls the callback. If -// err isn't a wrapped error, this will be called once for err. If err -// is a wrapped error, the callback will be called for both the wrapper -// that implements error as well as the wrapped error itself. -func Walk(err error, cb WalkFunc) { - if err == nil { - return - } - - switch e := err.(type) { - case *wrappedError: - cb(e.Outer) - Walk(e.Inner, cb) - case Wrapper: - cb(err) - - for _, err := range e.WrappedErrors() { - Walk(err, cb) - } - default: - cb(err) - } -} - -// wrappedError is an implementation of error that has both the -// outer and inner errors. -type wrappedError struct { - Outer error - Inner error -} - -func (w *wrappedError) Error() string { - return w.Outer.Error() -} - -func (w *wrappedError) WrappedErrors() []error { - return []error{w.Outer, w.Inner} -} diff --git a/ui/wasm/vendor/github.com/hashicorp/go-multierror/LICENSE b/ui/wasm/vendor/github.com/hashicorp/go-multierror/LICENSE deleted file mode 100644 index 82b4de97c7e..00000000000 --- a/ui/wasm/vendor/github.com/hashicorp/go-multierror/LICENSE +++ /dev/null @@ -1,353 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. diff --git a/ui/wasm/vendor/github.com/hashicorp/go-multierror/Makefile b/ui/wasm/vendor/github.com/hashicorp/go-multierror/Makefile deleted file mode 100644 index b97cd6ed02b..00000000000 --- a/ui/wasm/vendor/github.com/hashicorp/go-multierror/Makefile +++ /dev/null @@ -1,31 +0,0 @@ -TEST?=./... - -default: test - -# test runs the test suite and vets the code. -test: generate - @echo "==> Running tests..." - @go list $(TEST) \ - | grep -v "/vendor/" \ - | xargs -n1 go test -timeout=60s -parallel=10 ${TESTARGS} - -# testrace runs the race checker -testrace: generate - @echo "==> Running tests (race)..." - @go list $(TEST) \ - | grep -v "/vendor/" \ - | xargs -n1 go test -timeout=60s -race ${TESTARGS} - -# updatedeps installs all the dependencies needed to run and build. -updatedeps: - @sh -c "'${CURDIR}/scripts/deps.sh' '${NAME}'" - -# generate runs `go generate` to build the dynamically generated source files. -generate: - @echo "==> Generating..." - @find . -type f -name '.DS_Store' -delete - @go list ./... \ - | grep -v "/vendor/" \ - | xargs -n1 go generate - -.PHONY: default test testrace updatedeps generate diff --git a/ui/wasm/vendor/github.com/hashicorp/go-multierror/README.md b/ui/wasm/vendor/github.com/hashicorp/go-multierror/README.md deleted file mode 100644 index 71dd308ed81..00000000000 --- a/ui/wasm/vendor/github.com/hashicorp/go-multierror/README.md +++ /dev/null @@ -1,150 +0,0 @@ -# go-multierror - -[![CircleCI](https://img.shields.io/circleci/build/github/hashicorp/go-multierror/master)](https://circleci.com/gh/hashicorp/go-multierror) -[![Go Reference](https://pkg.go.dev/badge/github.com/hashicorp/go-multierror.svg)](https://pkg.go.dev/github.com/hashicorp/go-multierror) -![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/hashicorp/go-multierror) - -[circleci]: https://app.circleci.com/pipelines/github/hashicorp/go-multierror -[godocs]: https://pkg.go.dev/github.com/hashicorp/go-multierror - -`go-multierror` is a package for Go that provides a mechanism for -representing a list of `error` values as a single `error`. - -This allows a function in Go to return an `error` that might actually -be a list of errors. If the caller knows this, they can unwrap the -list and access the errors. If the caller doesn't know, the error -formats to a nice human-readable format. - -`go-multierror` is fully compatible with the Go standard library -[errors](https://golang.org/pkg/errors/) package, including the -functions `As`, `Is`, and `Unwrap`. This provides a standardized approach -for introspecting on error values. - -## Installation and Docs - -Install using `go get github.com/hashicorp/go-multierror`. - -Full documentation is available at -https://pkg.go.dev/github.com/hashicorp/go-multierror - -### Requires go version 1.13 or newer - -`go-multierror` requires go version 1.13 or newer. Go 1.13 introduced -[error wrapping](https://golang.org/doc/go1.13#error_wrapping), which -this library takes advantage of. - -If you need to use an earlier version of go, you can use the -[v1.0.0](https://github.com/hashicorp/go-multierror/tree/v1.0.0) -tag, which doesn't rely on features in go 1.13. - -If you see compile errors that look like the below, it's likely that -you're on an older version of go: - -``` -/go/src/github.com/hashicorp/go-multierror/multierror.go:112:9: undefined: errors.As -/go/src/github.com/hashicorp/go-multierror/multierror.go:117:9: undefined: errors.Is -``` - -## Usage - -go-multierror is easy to use and purposely built to be unobtrusive in -existing Go applications/libraries that may not be aware of it. - -**Building a list of errors** - -The `Append` function is used to create a list of errors. This function -behaves a lot like the Go built-in `append` function: it doesn't matter -if the first argument is nil, a `multierror.Error`, or any other `error`, -the function behaves as you would expect. - -```go -var result error - -if err := step1(); err != nil { - result = multierror.Append(result, err) -} -if err := step2(); err != nil { - result = multierror.Append(result, err) -} - -return result -``` - -**Customizing the formatting of the errors** - -By specifying a custom `ErrorFormat`, you can customize the format -of the `Error() string` function: - -```go -var result *multierror.Error - -// ... accumulate errors here, maybe using Append - -if result != nil { - result.ErrorFormat = func([]error) string { - return "errors!" - } -} -``` - -**Accessing the list of errors** - -`multierror.Error` implements `error` so if the caller doesn't know about -multierror, it will work just fine. But if you're aware a multierror might -be returned, you can use type switches to access the list of errors: - -```go -if err := something(); err != nil { - if merr, ok := err.(*multierror.Error); ok { - // Use merr.Errors - } -} -``` - -You can also use the standard [`errors.Unwrap`](https://golang.org/pkg/errors/#Unwrap) -function. This will continue to unwrap into subsequent errors until none exist. - -**Extracting an error** - -The standard library [`errors.As`](https://golang.org/pkg/errors/#As) -function can be used directly with a multierror to extract a specific error: - -```go -// Assume err is a multierror value -err := somefunc() - -// We want to know if "err" has a "RichErrorType" in it and extract it. -var errRich RichErrorType -if errors.As(err, &errRich) { - // It has it, and now errRich is populated. -} -``` - -**Checking for an exact error value** - -Some errors are returned as exact errors such as the [`ErrNotExist`](https://golang.org/pkg/os/#pkg-variables) -error in the `os` package. You can check if this error is present by using -the standard [`errors.Is`](https://golang.org/pkg/errors/#Is) function. - -```go -// Assume err is a multierror value -err := somefunc() -if errors.Is(err, os.ErrNotExist) { - // err contains os.ErrNotExist -} -``` - -**Returning a multierror only if there are errors** - -If you build a `multierror.Error`, you can use the `ErrorOrNil` function -to return an `error` implementation only if there are errors to return: - -```go -var result *multierror.Error - -// ... accumulate errors here - -// Return the `error` only if errors were added to the multierror, otherwise -// return nil since there are no errors. -return result.ErrorOrNil() -``` diff --git a/ui/wasm/vendor/github.com/hashicorp/go-multierror/append.go b/ui/wasm/vendor/github.com/hashicorp/go-multierror/append.go deleted file mode 100644 index 3e2589bfde0..00000000000 --- a/ui/wasm/vendor/github.com/hashicorp/go-multierror/append.go +++ /dev/null @@ -1,43 +0,0 @@ -package multierror - -// Append is a helper function that will append more errors -// onto an Error in order to create a larger multi-error. -// -// If err is not a multierror.Error, then it will be turned into -// one. If any of the errs are multierr.Error, they will be flattened -// one level into err. -// Any nil errors within errs will be ignored. If err is nil, a new -// *Error will be returned. -func Append(err error, errs ...error) *Error { - switch err := err.(type) { - case *Error: - // Typed nils can reach here, so initialize if we are nil - if err == nil { - err = new(Error) - } - - // Go through each error and flatten - for _, e := range errs { - switch e := e.(type) { - case *Error: - if e != nil { - err.Errors = append(err.Errors, e.Errors...) - } - default: - if e != nil { - err.Errors = append(err.Errors, e) - } - } - } - - return err - default: - newErrs := make([]error, 0, len(errs)+1) - if err != nil { - newErrs = append(newErrs, err) - } - newErrs = append(newErrs, errs...) - - return Append(&Error{}, newErrs...) - } -} diff --git a/ui/wasm/vendor/github.com/hashicorp/go-multierror/flatten.go b/ui/wasm/vendor/github.com/hashicorp/go-multierror/flatten.go deleted file mode 100644 index aab8e9abec9..00000000000 --- a/ui/wasm/vendor/github.com/hashicorp/go-multierror/flatten.go +++ /dev/null @@ -1,26 +0,0 @@ -package multierror - -// Flatten flattens the given error, merging any *Errors together into -// a single *Error. -func Flatten(err error) error { - // If it isn't an *Error, just return the error as-is - if _, ok := err.(*Error); !ok { - return err - } - - // Otherwise, make the result and flatten away! - flatErr := new(Error) - flatten(err, flatErr) - return flatErr -} - -func flatten(err error, flatErr *Error) { - switch err := err.(type) { - case *Error: - for _, e := range err.Errors { - flatten(e, flatErr) - } - default: - flatErr.Errors = append(flatErr.Errors, err) - } -} diff --git a/ui/wasm/vendor/github.com/hashicorp/go-multierror/format.go b/ui/wasm/vendor/github.com/hashicorp/go-multierror/format.go deleted file mode 100644 index 47f13c49a67..00000000000 --- a/ui/wasm/vendor/github.com/hashicorp/go-multierror/format.go +++ /dev/null @@ -1,27 +0,0 @@ -package multierror - -import ( - "fmt" - "strings" -) - -// ErrorFormatFunc is a function callback that is called by Error to -// turn the list of errors into a string. -type ErrorFormatFunc func([]error) string - -// ListFormatFunc is a basic formatter that outputs the number of errors -// that occurred along with a bullet point list of the errors. -func ListFormatFunc(es []error) string { - if len(es) == 1 { - return fmt.Sprintf("1 error occurred:\n\t* %s\n\n", es[0]) - } - - points := make([]string, len(es)) - for i, err := range es { - points[i] = fmt.Sprintf("* %s", err) - } - - return fmt.Sprintf( - "%d errors occurred:\n\t%s\n\n", - len(es), strings.Join(points, "\n\t")) -} diff --git a/ui/wasm/vendor/github.com/hashicorp/go-multierror/group.go b/ui/wasm/vendor/github.com/hashicorp/go-multierror/group.go deleted file mode 100644 index 9c29efb7f87..00000000000 --- a/ui/wasm/vendor/github.com/hashicorp/go-multierror/group.go +++ /dev/null @@ -1,38 +0,0 @@ -package multierror - -import "sync" - -// Group is a collection of goroutines which return errors that need to be -// coalesced. -type Group struct { - mutex sync.Mutex - err *Error - wg sync.WaitGroup -} - -// Go calls the given function in a new goroutine. -// -// If the function returns an error it is added to the group multierror which -// is returned by Wait. -func (g *Group) Go(f func() error) { - g.wg.Add(1) - - go func() { - defer g.wg.Done() - - if err := f(); err != nil { - g.mutex.Lock() - g.err = Append(g.err, err) - g.mutex.Unlock() - } - }() -} - -// Wait blocks until all function calls from the Go method have returned, then -// returns the multierror. -func (g *Group) Wait() *Error { - g.wg.Wait() - g.mutex.Lock() - defer g.mutex.Unlock() - return g.err -} diff --git a/ui/wasm/vendor/github.com/hashicorp/go-multierror/multierror.go b/ui/wasm/vendor/github.com/hashicorp/go-multierror/multierror.go deleted file mode 100644 index f5457432646..00000000000 --- a/ui/wasm/vendor/github.com/hashicorp/go-multierror/multierror.go +++ /dev/null @@ -1,121 +0,0 @@ -package multierror - -import ( - "errors" - "fmt" -) - -// Error is an error type to track multiple errors. This is used to -// accumulate errors in cases and return them as a single "error". -type Error struct { - Errors []error - ErrorFormat ErrorFormatFunc -} - -func (e *Error) Error() string { - fn := e.ErrorFormat - if fn == nil { - fn = ListFormatFunc - } - - return fn(e.Errors) -} - -// ErrorOrNil returns an error interface if this Error represents -// a list of errors, or returns nil if the list of errors is empty. This -// function is useful at the end of accumulation to make sure that the value -// returned represents the existence of errors. -func (e *Error) ErrorOrNil() error { - if e == nil { - return nil - } - if len(e.Errors) == 0 { - return nil - } - - return e -} - -func (e *Error) GoString() string { - return fmt.Sprintf("*%#v", *e) -} - -// WrappedErrors returns the list of errors that this Error is wrapping. It is -// an implementation of the errwrap.Wrapper interface so that multierror.Error -// can be used with that library. -// -// This method is not safe to be called concurrently. Unlike accessing the -// Errors field directly, this function also checks if the multierror is nil to -// prevent a null-pointer panic. It satisfies the errwrap.Wrapper interface. -func (e *Error) WrappedErrors() []error { - if e == nil { - return nil - } - return e.Errors -} - -// Unwrap returns an error from Error (or nil if there are no errors). -// This error returned will further support Unwrap to get the next error, -// etc. The order will match the order of Errors in the multierror.Error -// at the time of calling. -// -// The resulting error supports errors.As/Is/Unwrap so you can continue -// to use the stdlib errors package to introspect further. -// -// This will perform a shallow copy of the errors slice. Any errors appended -// to this error after calling Unwrap will not be available until a new -// Unwrap is called on the multierror.Error. -func (e *Error) Unwrap() error { - // If we have no errors then we do nothing - if e == nil || len(e.Errors) == 0 { - return nil - } - - // If we have exactly one error, we can just return that directly. - if len(e.Errors) == 1 { - return e.Errors[0] - } - - // Shallow copy the slice - errs := make([]error, len(e.Errors)) - copy(errs, e.Errors) - return chain(errs) -} - -// chain implements the interfaces necessary for errors.Is/As/Unwrap to -// work in a deterministic way with multierror. A chain tracks a list of -// errors while accounting for the current represented error. This lets -// Is/As be meaningful. -// -// Unwrap returns the next error. In the cleanest form, Unwrap would return -// the wrapped error here but we can't do that if we want to properly -// get access to all the errors. Instead, users are recommended to use -// Is/As to get the correct error type out. -// -// Precondition: []error is non-empty (len > 0) -type chain []error - -// Error implements the error interface -func (e chain) Error() string { - return e[0].Error() -} - -// Unwrap implements errors.Unwrap by returning the next error in the -// chain or nil if there are no more errors. -func (e chain) Unwrap() error { - if len(e) == 1 { - return nil - } - - return e[1:] -} - -// As implements errors.As by attempting to map to the current value. -func (e chain) As(target interface{}) bool { - return errors.As(e[0], target) -} - -// Is implements errors.Is by comparing the current value directly. -func (e chain) Is(target error) bool { - return errors.Is(e[0], target) -} diff --git a/ui/wasm/vendor/github.com/hashicorp/go-multierror/prefix.go b/ui/wasm/vendor/github.com/hashicorp/go-multierror/prefix.go deleted file mode 100644 index 5c477abe44f..00000000000 --- a/ui/wasm/vendor/github.com/hashicorp/go-multierror/prefix.go +++ /dev/null @@ -1,37 +0,0 @@ -package multierror - -import ( - "fmt" - - "github.com/hashicorp/errwrap" -) - -// Prefix is a helper function that will prefix some text -// to the given error. If the error is a multierror.Error, then -// it will be prefixed to each wrapped error. -// -// This is useful to use when appending multiple multierrors -// together in order to give better scoping. -func Prefix(err error, prefix string) error { - if err == nil { - return nil - } - - format := fmt.Sprintf("%s {{err}}", prefix) - switch err := err.(type) { - case *Error: - // Typed nils can reach here, so initialize if we are nil - if err == nil { - err = new(Error) - } - - // Wrap each of the errors - for i, e := range err.Errors { - err.Errors[i] = errwrap.Wrapf(format, e) - } - - return err - default: - return errwrap.Wrapf(format, err) - } -} diff --git a/ui/wasm/vendor/github.com/hashicorp/go-multierror/sort.go b/ui/wasm/vendor/github.com/hashicorp/go-multierror/sort.go deleted file mode 100644 index fecb14e81c5..00000000000 --- a/ui/wasm/vendor/github.com/hashicorp/go-multierror/sort.go +++ /dev/null @@ -1,16 +0,0 @@ -package multierror - -// Len implements sort.Interface function for length -func (err Error) Len() int { - return len(err.Errors) -} - -// Swap implements sort.Interface function for swapping elements -func (err Error) Swap(i, j int) { - err.Errors[i], err.Errors[j] = err.Errors[j], err.Errors[i] -} - -// Less implements sort.Interface function for determining order -func (err Error) Less(i, j int) bool { - return err.Errors[i].Error() < err.Errors[j].Error() -} diff --git a/ui/wasm/vendor/github.com/hashicorp/go-version/CHANGELOG.md b/ui/wasm/vendor/github.com/hashicorp/go-version/CHANGELOG.md deleted file mode 100644 index 2020c472743..00000000000 --- a/ui/wasm/vendor/github.com/hashicorp/go-version/CHANGELOG.md +++ /dev/null @@ -1,32 +0,0 @@ -# 1.4.0 (January 5, 2021) - -FEATURES: - - - Introduce `MustConstraints()` ([#87](https://github.com/hashicorp/go-version/pull/87)) - - `Constraints`: Introduce `Equals()` and `sort.Interface` methods ([#88](https://github.com/hashicorp/go-version/pull/88)) - -# 1.3.0 (March 31, 2021) - -Please note that CHANGELOG.md does not exist in the source code prior to this release. - -FEATURES: - - Add `Core` function to return a version without prerelease or metadata ([#85](https://github.com/hashicorp/go-version/pull/85)) - -# 1.2.1 (June 17, 2020) - -BUG FIXES: - - Prevent `Version.Equal` method from panicking on `nil` encounter ([#73](https://github.com/hashicorp/go-version/pull/73)) - -# 1.2.0 (April 23, 2019) - -FEATURES: - - Add `GreaterThanOrEqual` and `LessThanOrEqual` helper methods ([#53](https://github.com/hashicorp/go-version/pull/53)) - -# 1.1.0 (Jan 07, 2019) - -FEATURES: - - Add `NewSemver` constructor ([#45](https://github.com/hashicorp/go-version/pull/45)) - -# 1.0.0 (August 24, 2018) - -Initial release. diff --git a/ui/wasm/vendor/github.com/hashicorp/go-version/LICENSE b/ui/wasm/vendor/github.com/hashicorp/go-version/LICENSE deleted file mode 100644 index c33dcc7c928..00000000000 --- a/ui/wasm/vendor/github.com/hashicorp/go-version/LICENSE +++ /dev/null @@ -1,354 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/ui/wasm/vendor/github.com/hashicorp/go-version/README.md b/ui/wasm/vendor/github.com/hashicorp/go-version/README.md deleted file mode 100644 index 851a337beb4..00000000000 --- a/ui/wasm/vendor/github.com/hashicorp/go-version/README.md +++ /dev/null @@ -1,66 +0,0 @@ -# Versioning Library for Go -[![Build Status](https://circleci.com/gh/hashicorp/go-version/tree/master.svg?style=svg)](https://circleci.com/gh/hashicorp/go-version/tree/master) -[![GoDoc](https://godoc.org/github.com/hashicorp/go-version?status.svg)](https://godoc.org/github.com/hashicorp/go-version) - -go-version is a library for parsing versions and version constraints, -and verifying versions against a set of constraints. go-version -can sort a collection of versions properly, handles prerelease/beta -versions, can increment versions, etc. - -Versions used with go-version must follow [SemVer](http://semver.org/). - -## Installation and Usage - -Package documentation can be found on -[GoDoc](http://godoc.org/github.com/hashicorp/go-version). - -Installation can be done with a normal `go get`: - -``` -$ go get github.com/hashicorp/go-version -``` - -#### Version Parsing and Comparison - -```go -v1, err := version.NewVersion("1.2") -v2, err := version.NewVersion("1.5+metadata") - -// Comparison example. There is also GreaterThan, Equal, and just -// a simple Compare that returns an int allowing easy >=, <=, etc. -if v1.LessThan(v2) { - fmt.Printf("%s is less than %s", v1, v2) -} -``` - -#### Version Constraints - -```go -v1, err := version.NewVersion("1.2") - -// Constraints example. -constraints, err := version.NewConstraint(">= 1.0, < 1.4") -if constraints.Check(v1) { - fmt.Printf("%s satisfies constraints %s", v1, constraints) -} -``` - -#### Version Sorting - -```go -versionsRaw := []string{"1.1", "0.7.1", "1.4-beta", "1.4", "2"} -versions := make([]*version.Version, len(versionsRaw)) -for i, raw := range versionsRaw { - v, _ := version.NewVersion(raw) - versions[i] = v -} - -// After this, the versions are properly sorted -sort.Sort(version.Collection(versions)) -``` - -## Issues and Contributing - -If you find an issue with this library, please report an issue. If you'd -like, we welcome any contributions. Fork this library and submit a pull -request. diff --git a/ui/wasm/vendor/github.com/hashicorp/go-version/constraint.go b/ui/wasm/vendor/github.com/hashicorp/go-version/constraint.go deleted file mode 100644 index 1d880902810..00000000000 --- a/ui/wasm/vendor/github.com/hashicorp/go-version/constraint.go +++ /dev/null @@ -1,290 +0,0 @@ -package version - -import ( - "fmt" - "reflect" - "regexp" - "sort" - "strings" -) - -// Constraint represents a single constraint for a version, such as -// ">= 1.0". -type Constraint struct { - f constraintFunc - op operator - check *Version - original string -} - -func (c *Constraint) Equals(con *Constraint) bool { - return c.op == con.op && c.check.Equal(con.check) -} - -// Constraints is a slice of constraints. We make a custom type so that -// we can add methods to it. -type Constraints []*Constraint - -type constraintFunc func(v, c *Version) bool - -var constraintOperators map[string]constraintOperation - -type constraintOperation struct { - op operator - f constraintFunc -} - -var constraintRegexp *regexp.Regexp - -func init() { - constraintOperators = map[string]constraintOperation{ - "": {op: equal, f: constraintEqual}, - "=": {op: equal, f: constraintEqual}, - "!=": {op: notEqual, f: constraintNotEqual}, - ">": {op: greaterThan, f: constraintGreaterThan}, - "<": {op: lessThan, f: constraintLessThan}, - ">=": {op: greaterThanEqual, f: constraintGreaterThanEqual}, - "<=": {op: lessThanEqual, f: constraintLessThanEqual}, - "~>": {op: pessimistic, f: constraintPessimistic}, - } - - ops := make([]string, 0, len(constraintOperators)) - for k := range constraintOperators { - ops = append(ops, regexp.QuoteMeta(k)) - } - - constraintRegexp = regexp.MustCompile(fmt.Sprintf( - `^\s*(%s)\s*(%s)\s*$`, - strings.Join(ops, "|"), - VersionRegexpRaw)) -} - -// NewConstraint will parse one or more constraints from the given -// constraint string. The string must be a comma-separated list of -// constraints. -func NewConstraint(v string) (Constraints, error) { - vs := strings.Split(v, ",") - result := make([]*Constraint, len(vs)) - for i, single := range vs { - c, err := parseSingle(single) - if err != nil { - return nil, err - } - - result[i] = c - } - - return Constraints(result), nil -} - -// MustConstraints is a helper that wraps a call to a function -// returning (Constraints, error) and panics if error is non-nil. -func MustConstraints(c Constraints, err error) Constraints { - if err != nil { - panic(err) - } - - return c -} - -// Check tests if a version satisfies all the constraints. -func (cs Constraints) Check(v *Version) bool { - for _, c := range cs { - if !c.Check(v) { - return false - } - } - - return true -} - -// Equals compares Constraints with other Constraints -// for equality. This may not represent logical equivalence -// of compared constraints. -// e.g. even though '>0.1,>0.2' is logically equivalent -// to '>0.2' it is *NOT* treated as equal. -// -// Missing operator is treated as equal to '=', whitespaces -// are ignored and constraints are sorted before comaparison. -func (cs Constraints) Equals(c Constraints) bool { - if len(cs) != len(c) { - return false - } - - // make copies to retain order of the original slices - left := make(Constraints, len(cs)) - copy(left, cs) - sort.Stable(left) - right := make(Constraints, len(c)) - copy(right, c) - sort.Stable(right) - - // compare sorted slices - for i, con := range left { - if !con.Equals(right[i]) { - return false - } - } - - return true -} - -func (cs Constraints) Len() int { - return len(cs) -} - -func (cs Constraints) Less(i, j int) bool { - if cs[i].op < cs[j].op { - return true - } - if cs[i].op > cs[j].op { - return false - } - - return cs[i].check.LessThan(cs[j].check) -} - -func (cs Constraints) Swap(i, j int) { - cs[i], cs[j] = cs[j], cs[i] -} - -// Returns the string format of the constraints -func (cs Constraints) String() string { - csStr := make([]string, len(cs)) - for i, c := range cs { - csStr[i] = c.String() - } - - return strings.Join(csStr, ",") -} - -// Check tests if a constraint is validated by the given version. -func (c *Constraint) Check(v *Version) bool { - return c.f(v, c.check) -} - -func (c *Constraint) String() string { - return c.original -} - -func parseSingle(v string) (*Constraint, error) { - matches := constraintRegexp.FindStringSubmatch(v) - if matches == nil { - return nil, fmt.Errorf("Malformed constraint: %s", v) - } - - check, err := NewVersion(matches[2]) - if err != nil { - return nil, err - } - - cop := constraintOperators[matches[1]] - - return &Constraint{ - f: cop.f, - op: cop.op, - check: check, - original: v, - }, nil -} - -func prereleaseCheck(v, c *Version) bool { - switch vPre, cPre := v.Prerelease() != "", c.Prerelease() != ""; { - case cPre && vPre: - // A constraint with a pre-release can only match a pre-release version - // with the same base segments. - return reflect.DeepEqual(c.Segments64(), v.Segments64()) - - case !cPre && vPre: - // A constraint without a pre-release can only match a version without a - // pre-release. - return false - - case cPre && !vPre: - // OK, except with the pessimistic operator - case !cPre && !vPre: - // OK - } - return true -} - -//------------------------------------------------------------------- -// Constraint functions -//------------------------------------------------------------------- - -type operator rune - -const ( - equal operator = '=' - notEqual operator = '≠' - greaterThan operator = '>' - lessThan operator = '<' - greaterThanEqual operator = '≥' - lessThanEqual operator = '≤' - pessimistic operator = '~' -) - -func constraintEqual(v, c *Version) bool { - return v.Equal(c) -} - -func constraintNotEqual(v, c *Version) bool { - return !v.Equal(c) -} - -func constraintGreaterThan(v, c *Version) bool { - return prereleaseCheck(v, c) && v.Compare(c) == 1 -} - -func constraintLessThan(v, c *Version) bool { - return prereleaseCheck(v, c) && v.Compare(c) == -1 -} - -func constraintGreaterThanEqual(v, c *Version) bool { - return prereleaseCheck(v, c) && v.Compare(c) >= 0 -} - -func constraintLessThanEqual(v, c *Version) bool { - return prereleaseCheck(v, c) && v.Compare(c) <= 0 -} - -func constraintPessimistic(v, c *Version) bool { - // Using a pessimistic constraint with a pre-release, restricts versions to pre-releases - if !prereleaseCheck(v, c) || (c.Prerelease() != "" && v.Prerelease() == "") { - return false - } - - // If the version being checked is naturally less than the constraint, then there - // is no way for the version to be valid against the constraint - if v.LessThan(c) { - return false - } - // We'll use this more than once, so grab the length now so it's a little cleaner - // to write the later checks - cs := len(c.segments) - - // If the version being checked has less specificity than the constraint, then there - // is no way for the version to be valid against the constraint - if cs > len(v.segments) { - return false - } - - // Check the segments in the constraint against those in the version. If the version - // being checked, at any point, does not have the same values in each index of the - // constraints segments, then it cannot be valid against the constraint. - for i := 0; i < c.si-1; i++ { - if v.segments[i] != c.segments[i] { - return false - } - } - - // Check the last part of the segment in the constraint. If the version segment at - // this index is less than the constraints segment at this index, then it cannot - // be valid against the constraint - if c.segments[cs-1] > v.segments[cs-1] { - return false - } - - // If nothing has rejected the version by now, it's valid - return true -} diff --git a/ui/wasm/vendor/github.com/hashicorp/go-version/version.go b/ui/wasm/vendor/github.com/hashicorp/go-version/version.go deleted file mode 100644 index 116a74466db..00000000000 --- a/ui/wasm/vendor/github.com/hashicorp/go-version/version.go +++ /dev/null @@ -1,390 +0,0 @@ -package version - -import ( - "bytes" - "fmt" - "reflect" - "regexp" - "strconv" - "strings" -) - -// The compiled regular expression used to test the validity of a version. -var ( - versionRegexp *regexp.Regexp - semverRegexp *regexp.Regexp -) - -// The raw regular expression string used for testing the validity -// of a version. -const ( - VersionRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` + - `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-?([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` + - `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` + - `?` - - // SemverRegexpRaw requires a separator between version and prerelease - SemverRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` + - `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` + - `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` + - `?` -) - -// Version represents a single version. -type Version struct { - metadata string - pre string - segments []int64 - si int - original string -} - -func init() { - versionRegexp = regexp.MustCompile("^" + VersionRegexpRaw + "$") - semverRegexp = regexp.MustCompile("^" + SemverRegexpRaw + "$") -} - -// NewVersion parses the given version and returns a new -// Version. -func NewVersion(v string) (*Version, error) { - return newVersion(v, versionRegexp) -} - -// NewSemver parses the given version and returns a new -// Version that adheres strictly to SemVer specs -// https://semver.org/ -func NewSemver(v string) (*Version, error) { - return newVersion(v, semverRegexp) -} - -func newVersion(v string, pattern *regexp.Regexp) (*Version, error) { - matches := pattern.FindStringSubmatch(v) - if matches == nil { - return nil, fmt.Errorf("Malformed version: %s", v) - } - segmentsStr := strings.Split(matches[1], ".") - segments := make([]int64, len(segmentsStr)) - for i, str := range segmentsStr { - val, err := strconv.ParseInt(str, 10, 64) - if err != nil { - return nil, fmt.Errorf( - "Error parsing version: %s", err) - } - - segments[i] = val - } - - // Even though we could support more than three segments, if we - // got less than three, pad it with 0s. This is to cover the basic - // default usecase of semver, which is MAJOR.MINOR.PATCH at the minimum - for i := len(segments); i < 3; i++ { - segments = append(segments, 0) - } - - pre := matches[7] - if pre == "" { - pre = matches[4] - } - - return &Version{ - metadata: matches[10], - pre: pre, - segments: segments, - si: len(segmentsStr), - original: v, - }, nil -} - -// Must is a helper that wraps a call to a function returning (*Version, error) -// and panics if error is non-nil. -func Must(v *Version, err error) *Version { - if err != nil { - panic(err) - } - - return v -} - -// Compare compares this version to another version. This -// returns -1, 0, or 1 if this version is smaller, equal, -// or larger than the other version, respectively. -// -// If you want boolean results, use the LessThan, Equal, -// GreaterThan, GreaterThanOrEqual or LessThanOrEqual methods. -func (v *Version) Compare(other *Version) int { - // A quick, efficient equality check - if v.String() == other.String() { - return 0 - } - - segmentsSelf := v.Segments64() - segmentsOther := other.Segments64() - - // If the segments are the same, we must compare on prerelease info - if reflect.DeepEqual(segmentsSelf, segmentsOther) { - preSelf := v.Prerelease() - preOther := other.Prerelease() - if preSelf == "" && preOther == "" { - return 0 - } - if preSelf == "" { - return 1 - } - if preOther == "" { - return -1 - } - - return comparePrereleases(preSelf, preOther) - } - - // Get the highest specificity (hS), or if they're equal, just use segmentSelf length - lenSelf := len(segmentsSelf) - lenOther := len(segmentsOther) - hS := lenSelf - if lenSelf < lenOther { - hS = lenOther - } - // Compare the segments - // Because a constraint could have more/less specificity than the version it's - // checking, we need to account for a lopsided or jagged comparison - for i := 0; i < hS; i++ { - if i > lenSelf-1 { - // This means Self had the lower specificity - // Check to see if the remaining segments in Other are all zeros - if !allZero(segmentsOther[i:]) { - // if not, it means that Other has to be greater than Self - return -1 - } - break - } else if i > lenOther-1 { - // this means Other had the lower specificity - // Check to see if the remaining segments in Self are all zeros - - if !allZero(segmentsSelf[i:]) { - //if not, it means that Self has to be greater than Other - return 1 - } - break - } - lhs := segmentsSelf[i] - rhs := segmentsOther[i] - if lhs == rhs { - continue - } else if lhs < rhs { - return -1 - } - // Otherwis, rhs was > lhs, they're not equal - return 1 - } - - // if we got this far, they're equal - return 0 -} - -func allZero(segs []int64) bool { - for _, s := range segs { - if s != 0 { - return false - } - } - return true -} - -func comparePart(preSelf string, preOther string) int { - if preSelf == preOther { - return 0 - } - - var selfInt int64 - selfNumeric := true - selfInt, err := strconv.ParseInt(preSelf, 10, 64) - if err != nil { - selfNumeric = false - } - - var otherInt int64 - otherNumeric := true - otherInt, err = strconv.ParseInt(preOther, 10, 64) - if err != nil { - otherNumeric = false - } - - // if a part is empty, we use the other to decide - if preSelf == "" { - if otherNumeric { - return -1 - } - return 1 - } - - if preOther == "" { - if selfNumeric { - return 1 - } - return -1 - } - - if selfNumeric && !otherNumeric { - return -1 - } else if !selfNumeric && otherNumeric { - return 1 - } else if !selfNumeric && !otherNumeric && preSelf > preOther { - return 1 - } else if selfInt > otherInt { - return 1 - } - - return -1 -} - -func comparePrereleases(v string, other string) int { - // the same pre release! - if v == other { - return 0 - } - - // split both pre releases for analyse their parts - selfPreReleaseMeta := strings.Split(v, ".") - otherPreReleaseMeta := strings.Split(other, ".") - - selfPreReleaseLen := len(selfPreReleaseMeta) - otherPreReleaseLen := len(otherPreReleaseMeta) - - biggestLen := otherPreReleaseLen - if selfPreReleaseLen > otherPreReleaseLen { - biggestLen = selfPreReleaseLen - } - - // loop for parts to find the first difference - for i := 0; i < biggestLen; i = i + 1 { - partSelfPre := "" - if i < selfPreReleaseLen { - partSelfPre = selfPreReleaseMeta[i] - } - - partOtherPre := "" - if i < otherPreReleaseLen { - partOtherPre = otherPreReleaseMeta[i] - } - - compare := comparePart(partSelfPre, partOtherPre) - // if parts are equals, continue the loop - if compare != 0 { - return compare - } - } - - return 0 -} - -// Core returns a new version constructed from only the MAJOR.MINOR.PATCH -// segments of the version, without prerelease or metadata. -func (v *Version) Core() *Version { - segments := v.Segments64() - segmentsOnly := fmt.Sprintf("%d.%d.%d", segments[0], segments[1], segments[2]) - return Must(NewVersion(segmentsOnly)) -} - -// Equal tests if two versions are equal. -func (v *Version) Equal(o *Version) bool { - if v == nil || o == nil { - return v == o - } - - return v.Compare(o) == 0 -} - -// GreaterThan tests if this version is greater than another version. -func (v *Version) GreaterThan(o *Version) bool { - return v.Compare(o) > 0 -} - -// GreaterThanOrEqual tests if this version is greater than or equal to another version. -func (v *Version) GreaterThanOrEqual(o *Version) bool { - return v.Compare(o) >= 0 -} - -// LessThan tests if this version is less than another version. -func (v *Version) LessThan(o *Version) bool { - return v.Compare(o) < 0 -} - -// LessThanOrEqual tests if this version is less than or equal to another version. -func (v *Version) LessThanOrEqual(o *Version) bool { - return v.Compare(o) <= 0 -} - -// Metadata returns any metadata that was part of the version -// string. -// -// Metadata is anything that comes after the "+" in the version. -// For example, with "1.2.3+beta", the metadata is "beta". -func (v *Version) Metadata() string { - return v.metadata -} - -// Prerelease returns any prerelease data that is part of the version, -// or blank if there is no prerelease data. -// -// Prerelease information is anything that comes after the "-" in the -// version (but before any metadata). For example, with "1.2.3-beta", -// the prerelease information is "beta". -func (v *Version) Prerelease() string { - return v.pre -} - -// Segments returns the numeric segments of the version as a slice of ints. -// -// This excludes any metadata or pre-release information. For example, -// for a version "1.2.3-beta", segments will return a slice of -// 1, 2, 3. -func (v *Version) Segments() []int { - segmentSlice := make([]int, len(v.segments)) - for i, v := range v.segments { - segmentSlice[i] = int(v) - } - return segmentSlice -} - -// Segments64 returns the numeric segments of the version as a slice of int64s. -// -// This excludes any metadata or pre-release information. For example, -// for a version "1.2.3-beta", segments will return a slice of -// 1, 2, 3. -func (v *Version) Segments64() []int64 { - result := make([]int64, len(v.segments)) - copy(result, v.segments) - return result -} - -// String returns the full version string included pre-release -// and metadata information. -// -// This value is rebuilt according to the parsed segments and other -// information. Therefore, ambiguities in the version string such as -// prefixed zeroes (1.04.0 => 1.4.0), `v` prefix (v1.0.0 => 1.0.0), and -// missing parts (1.0 => 1.0.0) will be made into a canonicalized form -// as shown in the parenthesized examples. -func (v *Version) String() string { - var buf bytes.Buffer - fmtParts := make([]string, len(v.segments)) - for i, s := range v.segments { - // We can ignore err here since we've pre-parsed the values in segments - str := strconv.FormatInt(s, 10) - fmtParts[i] = str - } - fmt.Fprintf(&buf, strings.Join(fmtParts, ".")) - if v.pre != "" { - fmt.Fprintf(&buf, "-%s", v.pre) - } - if v.metadata != "" { - fmt.Fprintf(&buf, "+%s", v.metadata) - } - - return buf.String() -} - -// Original returns the original parsed version as-is, including any -// potential whitespace, `v` prefix, etc. -func (v *Version) Original() string { - return v.original -} diff --git a/ui/wasm/vendor/github.com/hashicorp/go-version/version_collection.go b/ui/wasm/vendor/github.com/hashicorp/go-version/version_collection.go deleted file mode 100644 index cc888d43e6b..00000000000 --- a/ui/wasm/vendor/github.com/hashicorp/go-version/version_collection.go +++ /dev/null @@ -1,17 +0,0 @@ -package version - -// Collection is a type that implements the sort.Interface interface -// so that versions can be sorted. -type Collection []*Version - -func (v Collection) Len() int { - return len(v) -} - -func (v Collection) Less(i, j int) bool { - return v[i].LessThan(v[j]) -} - -func (v Collection) Swap(i, j int) { - v[i], v[j] = v[j], v[i] -} diff --git a/ui/wasm/vendor/github.com/imdario/mergo/.deepsource.toml b/ui/wasm/vendor/github.com/imdario/mergo/.deepsource.toml deleted file mode 100644 index 8a0681af855..00000000000 --- a/ui/wasm/vendor/github.com/imdario/mergo/.deepsource.toml +++ /dev/null @@ -1,12 +0,0 @@ -version = 1 - -test_patterns = [ - "*_test.go" -] - -[[analyzers]] -name = "go" -enabled = true - - [analyzers.meta] - import_path = "github.com/imdario/mergo" \ No newline at end of file diff --git a/ui/wasm/vendor/github.com/imdario/mergo/.gitignore b/ui/wasm/vendor/github.com/imdario/mergo/.gitignore deleted file mode 100644 index 529c3412ba9..00000000000 --- a/ui/wasm/vendor/github.com/imdario/mergo/.gitignore +++ /dev/null @@ -1,33 +0,0 @@ -#### joe made this: http://goel.io/joe - -#### go #### -# Binaries for programs and plugins -*.exe -*.dll -*.so -*.dylib - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 -.glide/ - -#### vim #### -# Swap -[._]*.s[a-v][a-z] -[._]*.sw[a-p] -[._]s[a-v][a-z] -[._]sw[a-p] - -# Session -Session.vim - -# Temporary -.netrwhist -*~ -# Auto-generated tag files -tags diff --git a/ui/wasm/vendor/github.com/imdario/mergo/.travis.yml b/ui/wasm/vendor/github.com/imdario/mergo/.travis.yml deleted file mode 100644 index d324c43ba4d..00000000000 --- a/ui/wasm/vendor/github.com/imdario/mergo/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go -arch: - - amd64 - - ppc64le -install: - - go get -t - - go get golang.org/x/tools/cmd/cover - - go get github.com/mattn/goveralls -script: - - go test -race -v ./... -after_script: - - $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN diff --git a/ui/wasm/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md b/ui/wasm/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md deleted file mode 100644 index 469b44907a0..00000000000 --- a/ui/wasm/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,46 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at i@dario.im. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/ui/wasm/vendor/github.com/imdario/mergo/LICENSE b/ui/wasm/vendor/github.com/imdario/mergo/LICENSE deleted file mode 100644 index 686680298da..00000000000 --- a/ui/wasm/vendor/github.com/imdario/mergo/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2013 Dario Castañé. All rights reserved. -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/ui/wasm/vendor/github.com/imdario/mergo/README.md b/ui/wasm/vendor/github.com/imdario/mergo/README.md deleted file mode 100644 index aa8cbd7ce6d..00000000000 --- a/ui/wasm/vendor/github.com/imdario/mergo/README.md +++ /dev/null @@ -1,247 +0,0 @@ -# Mergo - - -[![GoDoc][3]][4] -[![GitHub release][5]][6] -[![GoCard][7]][8] -[![Build Status][1]][2] -[![Coverage Status][9]][10] -[![Sourcegraph][11]][12] -[![FOSSA Status][13]][14] - -[![GoCenter Kudos][15]][16] - -[1]: https://travis-ci.org/imdario/mergo.png -[2]: https://travis-ci.org/imdario/mergo -[3]: https://godoc.org/github.com/imdario/mergo?status.svg -[4]: https://godoc.org/github.com/imdario/mergo -[5]: https://img.shields.io/github/release/imdario/mergo.svg -[6]: https://github.com/imdario/mergo/releases -[7]: https://goreportcard.com/badge/imdario/mergo -[8]: https://goreportcard.com/report/github.com/imdario/mergo -[9]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master -[10]: https://coveralls.io/github/imdario/mergo?branch=master -[11]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg -[12]: https://sourcegraph.com/github.com/imdario/mergo?badge -[13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield -[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield -[15]: https://search.gocenter.io/api/ui/badge/github.com%2Fimdario%2Fmergo -[16]: https://search.gocenter.io/github.com/imdario/mergo - -A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. - -Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). - -Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche. - -## Status - -It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild). - -### Important note - -Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds suppot for go modules. - -Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code. - -If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). - -### Donations - -If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes: - -Buy Me a Coffee at ko-fi.com -[![Beerpay](https://beerpay.io/imdario/mergo/badge.svg)](https://beerpay.io/imdario/mergo) -[![Beerpay](https://beerpay.io/imdario/mergo/make-wish.svg)](https://beerpay.io/imdario/mergo) -Donate using Liberapay - -### Mergo in the wild - -- [moby/moby](https://github.com/moby/moby) -- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) -- [vmware/dispatch](https://github.com/vmware/dispatch) -- [Shopify/themekit](https://github.com/Shopify/themekit) -- [imdario/zas](https://github.com/imdario/zas) -- [matcornic/hermes](https://github.com/matcornic/hermes) -- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go) -- [kataras/iris](https://github.com/kataras/iris) -- [michaelsauter/crane](https://github.com/michaelsauter/crane) -- [go-task/task](https://github.com/go-task/task) -- [sensu/uchiwa](https://github.com/sensu/uchiwa) -- [ory/hydra](https://github.com/ory/hydra) -- [sisatech/vcli](https://github.com/sisatech/vcli) -- [dairycart/dairycart](https://github.com/dairycart/dairycart) -- [projectcalico/felix](https://github.com/projectcalico/felix) -- [resin-os/balena](https://github.com/resin-os/balena) -- [go-kivik/kivik](https://github.com/go-kivik/kivik) -- [Telefonica/govice](https://github.com/Telefonica/govice) -- [supergiant/supergiant](supergiant/supergiant) -- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce) -- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy) -- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel) -- [EagerIO/Stout](https://github.com/EagerIO/Stout) -- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api) -- [russross/canvasassignments](https://github.com/russross/canvasassignments) -- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api) -- [casualjim/exeggutor](https://github.com/casualjim/exeggutor) -- [divshot/gitling](https://github.com/divshot/gitling) -- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl) -- [andrerocker/deploy42](https://github.com/andrerocker/deploy42) -- [elwinar/rambler](https://github.com/elwinar/rambler) -- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman) -- [jfbus/impressionist](https://github.com/jfbus/impressionist) -- [Jmeyering/zealot](https://github.com/Jmeyering/zealot) -- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host) -- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go) -- [thoas/picfit](https://github.com/thoas/picfit) -- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) -- [jnuthong/item_search](https://github.com/jnuthong/item_search) -- [bukalapak/snowboard](https://github.com/bukalapak/snowboard) -- [containerssh/containerssh](https://github.com/containerssh/containerssh) - -## Install - - go get github.com/imdario/mergo - - // use in your .go code - import ( - "github.com/imdario/mergo" - ) - -## Usage - -You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are zero values](https://golang.org/ref/spec#The_zero_value) too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). - -```go -if err := mergo.Merge(&dst, src); err != nil { - // ... -} -``` - -Also, you can merge overwriting values using the transformer `WithOverride`. - -```go -if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { - // ... -} -``` - -Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field. - -```go -if err := mergo.Map(&dst, srcMap); err != nil { - // ... -} -``` - -Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values. - -Here is a nice example: - -```go -package main - -import ( - "fmt" - "github.com/imdario/mergo" -) - -type Foo struct { - A string - B int64 -} - -func main() { - src := Foo{ - A: "one", - B: 2, - } - dest := Foo{ - A: "two", - } - mergo.Merge(&dest, src) - fmt.Println(dest) - // Will print - // {two 2} -} -``` - -Note: if test are failing due missing package, please execute: - - go get gopkg.in/yaml.v2 - -### Transformers - -Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`? - -```go -package main - -import ( - "fmt" - "github.com/imdario/mergo" - "reflect" - "time" -) - -type timeTransformer struct { -} - -func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { - if typ == reflect.TypeOf(time.Time{}) { - return func(dst, src reflect.Value) error { - if dst.CanSet() { - isZero := dst.MethodByName("IsZero") - result := isZero.Call([]reflect.Value{}) - if result[0].Bool() { - dst.Set(src) - } - } - return nil - } - } - return nil -} - -type Snapshot struct { - Time time.Time - // ... -} - -func main() { - src := Snapshot{time.Now()} - dest := Snapshot{} - mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) - fmt.Println(dest) - // Will print - // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } -} -``` - - -## Contact me - -If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario) - -## About - -Written by [Dario Castañé](http://dario.im). - -## Top Contributors - -[![0](https://sourcerer.io/fame/imdario/imdario/mergo/images/0)](https://sourcerer.io/fame/imdario/imdario/mergo/links/0) -[![1](https://sourcerer.io/fame/imdario/imdario/mergo/images/1)](https://sourcerer.io/fame/imdario/imdario/mergo/links/1) -[![2](https://sourcerer.io/fame/imdario/imdario/mergo/images/2)](https://sourcerer.io/fame/imdario/imdario/mergo/links/2) -[![3](https://sourcerer.io/fame/imdario/imdario/mergo/images/3)](https://sourcerer.io/fame/imdario/imdario/mergo/links/3) -[![4](https://sourcerer.io/fame/imdario/imdario/mergo/images/4)](https://sourcerer.io/fame/imdario/imdario/mergo/links/4) -[![5](https://sourcerer.io/fame/imdario/imdario/mergo/images/5)](https://sourcerer.io/fame/imdario/imdario/mergo/links/5) -[![6](https://sourcerer.io/fame/imdario/imdario/mergo/images/6)](https://sourcerer.io/fame/imdario/imdario/mergo/links/6) -[![7](https://sourcerer.io/fame/imdario/imdario/mergo/images/7)](https://sourcerer.io/fame/imdario/imdario/mergo/links/7) - - -## License - -[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). - - -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large) diff --git a/ui/wasm/vendor/github.com/imdario/mergo/doc.go b/ui/wasm/vendor/github.com/imdario/mergo/doc.go deleted file mode 100644 index fcd985f995d..00000000000 --- a/ui/wasm/vendor/github.com/imdario/mergo/doc.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. - -Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). - -Status - -It is ready for production use. It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc. - -Important note - -Please keep in mind that a problematic PR broke 0.3.9. We reverted it in 0.3.10. We consider 0.3.10 as stable but not bug-free. . Also, this version adds suppot for go modules. - -Keep in mind that in 0.3.2, Mergo changed Merge() and Map() signatures to support transformers. We added an optional/variadic argument so that it won't break the existing code. - -If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u github.com/imdario/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). - -Install - -Do your usual installation procedure: - - go get github.com/imdario/mergo - - // use in your .go code - import ( - "github.com/imdario/mergo" - ) - -Usage - -You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as they are zero values too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). - - if err := mergo.Merge(&dst, src); err != nil { - // ... - } - -Also, you can merge overwriting values using the transformer WithOverride. - - if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { - // ... - } - -Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field. - - if err := mergo.Map(&dst, srcMap); err != nil { - // ... - } - -Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values. - -Here is a nice example: - - package main - - import ( - "fmt" - "github.com/imdario/mergo" - ) - - type Foo struct { - A string - B int64 - } - - func main() { - src := Foo{ - A: "one", - B: 2, - } - dest := Foo{ - A: "two", - } - mergo.Merge(&dest, src) - fmt.Println(dest) - // Will print - // {two 2} - } - -Transformers - -Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, time.Time is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero time.Time? - - package main - - import ( - "fmt" - "github.com/imdario/mergo" - "reflect" - "time" - ) - - type timeTransformer struct { - } - - func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { - if typ == reflect.TypeOf(time.Time{}) { - return func(dst, src reflect.Value) error { - if dst.CanSet() { - isZero := dst.MethodByName("IsZero") - result := isZero.Call([]reflect.Value{}) - if result[0].Bool() { - dst.Set(src) - } - } - return nil - } - } - return nil - } - - type Snapshot struct { - Time time.Time - // ... - } - - func main() { - src := Snapshot{time.Now()} - dest := Snapshot{} - mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) - fmt.Println(dest) - // Will print - // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } - } - -Contact me - -If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): https://twitter.com/im_dario - -About - -Written by Dario Castañé: https://da.rio.hn - -License - -BSD 3-Clause license, as Go language. - -*/ -package mergo diff --git a/ui/wasm/vendor/github.com/imdario/mergo/map.go b/ui/wasm/vendor/github.com/imdario/mergo/map.go deleted file mode 100644 index a13a7ee46c7..00000000000 --- a/ui/wasm/vendor/github.com/imdario/mergo/map.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright 2014 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "fmt" - "reflect" - "unicode" - "unicode/utf8" -) - -func changeInitialCase(s string, mapper func(rune) rune) string { - if s == "" { - return s - } - r, n := utf8.DecodeRuneInString(s) - return string(mapper(r)) + s[n:] -} - -func isExported(field reflect.StructField) bool { - r, _ := utf8.DecodeRuneInString(field.Name) - return r >= 'A' && r <= 'Z' -} - -// Traverses recursively both values, assigning src's fields values to dst. -// The map argument tracks comparisons that have already been seen, which allows -// short circuiting on recursive types. -func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { - overwrite := config.Overwrite - if dst.CanAddr() { - addr := dst.UnsafeAddr() - h := 17 * addr - seen := visited[h] - typ := dst.Type() - for p := seen; p != nil; p = p.next { - if p.ptr == addr && p.typ == typ { - return nil - } - } - // Remember, remember... - visited[h] = &visit{addr, typ, seen} - } - zeroValue := reflect.Value{} - switch dst.Kind() { - case reflect.Map: - dstMap := dst.Interface().(map[string]interface{}) - for i, n := 0, src.NumField(); i < n; i++ { - srcType := src.Type() - field := srcType.Field(i) - if !isExported(field) { - continue - } - fieldName := field.Name - fieldName = changeInitialCase(fieldName, unicode.ToLower) - if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) { - dstMap[fieldName] = src.Field(i).Interface() - } - } - case reflect.Ptr: - if dst.IsNil() { - v := reflect.New(dst.Type().Elem()) - dst.Set(v) - } - dst = dst.Elem() - fallthrough - case reflect.Struct: - srcMap := src.Interface().(map[string]interface{}) - for key := range srcMap { - config.overwriteWithEmptyValue = true - srcValue := srcMap[key] - fieldName := changeInitialCase(key, unicode.ToUpper) - dstElement := dst.FieldByName(fieldName) - if dstElement == zeroValue { - // We discard it because the field doesn't exist. - continue - } - srcElement := reflect.ValueOf(srcValue) - dstKind := dstElement.Kind() - srcKind := srcElement.Kind() - if srcKind == reflect.Ptr && dstKind != reflect.Ptr { - srcElement = srcElement.Elem() - srcKind = reflect.TypeOf(srcElement.Interface()).Kind() - } else if dstKind == reflect.Ptr { - // Can this work? I guess it can't. - if srcKind != reflect.Ptr && srcElement.CanAddr() { - srcPtr := srcElement.Addr() - srcElement = reflect.ValueOf(srcPtr) - srcKind = reflect.Ptr - } - } - - if !srcElement.IsValid() { - continue - } - if srcKind == dstKind { - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface { - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } else if srcKind == reflect.Map { - if err = deepMap(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } else { - return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind) - } - } - } - return -} - -// Map sets fields' values in dst from src. -// src can be a map with string keys or a struct. dst must be the opposite: -// if src is a map, dst must be a valid pointer to struct. If src is a struct, -// dst must be map[string]interface{}. -// It won't merge unexported (private) fields and will do recursively -// any exported field. -// If dst is a map, keys will be src fields' names in lower camel case. -// Missing key in src that doesn't match a field in dst will be skipped. This -// doesn't apply if dst is a map. -// This is separated method from Merge because it is cleaner and it keeps sane -// semantics: merging equal types, mapping different (restricted) types. -func Map(dst, src interface{}, opts ...func(*Config)) error { - return _map(dst, src, opts...) -} - -// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by -// non-empty src attribute values. -// Deprecated: Use Map(…) with WithOverride -func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { - return _map(dst, src, append(opts, WithOverride)...) -} - -func _map(dst, src interface{}, opts ...func(*Config)) error { - if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { - return ErrNonPointerAgument - } - var ( - vDst, vSrc reflect.Value - err error - ) - config := &Config{} - - for _, opt := range opts { - opt(config) - } - - if vDst, vSrc, err = resolveValues(dst, src); err != nil { - return err - } - // To be friction-less, we redirect equal-type arguments - // to deepMerge. Only because arguments can be anything. - if vSrc.Kind() == vDst.Kind() { - return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) - } - switch vSrc.Kind() { - case reflect.Struct: - if vDst.Kind() != reflect.Map { - return ErrExpectedMapAsDestination - } - case reflect.Map: - if vDst.Kind() != reflect.Struct { - return ErrExpectedStructAsDestination - } - default: - return ErrNotSupported - } - return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, config) -} diff --git a/ui/wasm/vendor/github.com/imdario/mergo/merge.go b/ui/wasm/vendor/github.com/imdario/mergo/merge.go deleted file mode 100644 index 8c2a8fcd901..00000000000 --- a/ui/wasm/vendor/github.com/imdario/mergo/merge.go +++ /dev/null @@ -1,380 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "fmt" - "reflect" -) - -func hasMergeableFields(dst reflect.Value) (exported bool) { - for i, n := 0, dst.NumField(); i < n; i++ { - field := dst.Type().Field(i) - if field.Anonymous && dst.Field(i).Kind() == reflect.Struct { - exported = exported || hasMergeableFields(dst.Field(i)) - } else if isExportedComponent(&field) { - exported = exported || len(field.PkgPath) == 0 - } - } - return -} - -func isExportedComponent(field *reflect.StructField) bool { - pkgPath := field.PkgPath - if len(pkgPath) > 0 { - return false - } - c := field.Name[0] - if 'a' <= c && c <= 'z' || c == '_' { - return false - } - return true -} - -type Config struct { - Overwrite bool - AppendSlice bool - TypeCheck bool - Transformers Transformers - overwriteWithEmptyValue bool - overwriteSliceWithEmptyValue bool - sliceDeepCopy bool - debug bool -} - -type Transformers interface { - Transformer(reflect.Type) func(dst, src reflect.Value) error -} - -// Traverses recursively both values, assigning src's fields values to dst. -// The map argument tracks comparisons that have already been seen, which allows -// short circuiting on recursive types. -func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { - overwrite := config.Overwrite - typeCheck := config.TypeCheck - overwriteWithEmptySrc := config.overwriteWithEmptyValue - overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue - sliceDeepCopy := config.sliceDeepCopy - - if !src.IsValid() { - return - } - if dst.CanAddr() { - addr := dst.UnsafeAddr() - h := 17 * addr - seen := visited[h] - typ := dst.Type() - for p := seen; p != nil; p = p.next { - if p.ptr == addr && p.typ == typ { - return nil - } - } - // Remember, remember... - visited[h] = &visit{addr, typ, seen} - } - - if config.Transformers != nil && !isEmptyValue(dst) { - if fn := config.Transformers.Transformer(dst.Type()); fn != nil { - err = fn(dst, src) - return - } - } - - switch dst.Kind() { - case reflect.Struct: - if hasMergeableFields(dst) { - for i, n := 0, dst.NumField(); i < n; i++ { - if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil { - return - } - } - } else { - if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) { - dst.Set(src) - } - } - case reflect.Map: - if dst.IsNil() && !src.IsNil() { - if dst.CanSet() { - dst.Set(reflect.MakeMap(dst.Type())) - } else { - dst = src - return - } - } - - if src.Kind() != reflect.Map { - if overwrite { - dst.Set(src) - } - return - } - - for _, key := range src.MapKeys() { - srcElement := src.MapIndex(key) - if !srcElement.IsValid() { - continue - } - dstElement := dst.MapIndex(key) - switch srcElement.Kind() { - case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice: - if srcElement.IsNil() { - if overwrite { - dst.SetMapIndex(key, srcElement) - } - continue - } - fallthrough - default: - if !srcElement.CanInterface() { - continue - } - switch reflect.TypeOf(srcElement.Interface()).Kind() { - case reflect.Struct: - fallthrough - case reflect.Ptr: - fallthrough - case reflect.Map: - srcMapElm := srcElement - dstMapElm := dstElement - if srcMapElm.CanInterface() { - srcMapElm = reflect.ValueOf(srcMapElm.Interface()) - if dstMapElm.IsValid() { - dstMapElm = reflect.ValueOf(dstMapElm.Interface()) - } - } - if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil { - return - } - case reflect.Slice: - srcSlice := reflect.ValueOf(srcElement.Interface()) - - var dstSlice reflect.Value - if !dstElement.IsValid() || dstElement.IsNil() { - dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len()) - } else { - dstSlice = reflect.ValueOf(dstElement.Interface()) - } - - if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { - if typeCheck && srcSlice.Type() != dstSlice.Type() { - return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) - } - dstSlice = srcSlice - } else if config.AppendSlice { - if srcSlice.Type() != dstSlice.Type() { - return fmt.Errorf("cannot append two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) - } - dstSlice = reflect.AppendSlice(dstSlice, srcSlice) - } else if sliceDeepCopy { - i := 0 - for ; i < srcSlice.Len() && i < dstSlice.Len(); i++ { - srcElement := srcSlice.Index(i) - dstElement := dstSlice.Index(i) - - if srcElement.CanInterface() { - srcElement = reflect.ValueOf(srcElement.Interface()) - } - if dstElement.CanInterface() { - dstElement = reflect.ValueOf(dstElement.Interface()) - } - - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } - - } - dst.SetMapIndex(key, dstSlice) - } - } - if dstElement.IsValid() && !isEmptyValue(dstElement) && (reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map || reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice) { - continue - } - - if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement)) { - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - dst.SetMapIndex(key, srcElement) - } - } - case reflect.Slice: - if !dst.CanSet() { - break - } - if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { - dst.Set(src) - } else if config.AppendSlice { - if src.Type() != dst.Type() { - return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type()) - } - dst.Set(reflect.AppendSlice(dst, src)) - } else if sliceDeepCopy { - for i := 0; i < src.Len() && i < dst.Len(); i++ { - srcElement := src.Index(i) - dstElement := dst.Index(i) - if srcElement.CanInterface() { - srcElement = reflect.ValueOf(srcElement.Interface()) - } - if dstElement.CanInterface() { - dstElement = reflect.ValueOf(dstElement.Interface()) - } - - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } - } - case reflect.Ptr: - fallthrough - case reflect.Interface: - if isReflectNil(src) { - if overwriteWithEmptySrc && dst.CanSet() && src.Type().AssignableTo(dst.Type()) { - dst.Set(src) - } - break - } - - if src.Kind() != reflect.Interface { - if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) { - if dst.CanSet() && (overwrite || isEmptyValue(dst)) { - dst.Set(src) - } - } else if src.Kind() == reflect.Ptr { - if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { - return - } - } else if dst.Elem().Type() == src.Type() { - if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil { - return - } - } else { - return ErrDifferentArgumentsTypes - } - break - } - - if dst.IsNil() || overwrite { - if dst.CanSet() && (overwrite || isEmptyValue(dst)) { - dst.Set(src) - } - break - } - - if dst.Elem().Kind() == src.Elem().Kind() { - if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { - return - } - break - } - default: - mustSet := (isEmptyValue(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) - if mustSet { - if dst.CanSet() { - dst.Set(src) - } else { - dst = src - } - } - } - - return -} - -// Merge will fill any empty for value type attributes on the dst struct using corresponding -// src attributes if they themselves are not empty. dst and src must be valid same-type structs -// and dst must be a pointer to struct. -// It won't merge unexported (private) fields and will do recursively any exported field. -func Merge(dst, src interface{}, opts ...func(*Config)) error { - return merge(dst, src, opts...) -} - -// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overridden by -// non-empty src attribute values. -// Deprecated: use Merge(…) with WithOverride -func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { - return merge(dst, src, append(opts, WithOverride)...) -} - -// WithTransformers adds transformers to merge, allowing to customize the merging of some types. -func WithTransformers(transformers Transformers) func(*Config) { - return func(config *Config) { - config.Transformers = transformers - } -} - -// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values. -func WithOverride(config *Config) { - config.Overwrite = true -} - -// WithOverwriteWithEmptyValue will make merge override non empty dst attributes with empty src attributes values. -func WithOverwriteWithEmptyValue(config *Config) { - config.Overwrite = true - config.overwriteWithEmptyValue = true -} - -// WithOverrideEmptySlice will make merge override empty dst slice with empty src slice. -func WithOverrideEmptySlice(config *Config) { - config.overwriteSliceWithEmptyValue = true -} - -// WithAppendSlice will make merge append slices instead of overwriting it. -func WithAppendSlice(config *Config) { - config.AppendSlice = true -} - -// WithTypeCheck will make merge check types while overwriting it (must be used with WithOverride). -func WithTypeCheck(config *Config) { - config.TypeCheck = true -} - -// WithSliceDeepCopy will merge slice element one by one with Overwrite flag. -func WithSliceDeepCopy(config *Config) { - config.sliceDeepCopy = true - config.Overwrite = true -} - -func merge(dst, src interface{}, opts ...func(*Config)) error { - if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { - return ErrNonPointerAgument - } - var ( - vDst, vSrc reflect.Value - err error - ) - - config := &Config{} - - for _, opt := range opts { - opt(config) - } - - if vDst, vSrc, err = resolveValues(dst, src); err != nil { - return err - } - if vDst.Type() != vSrc.Type() { - return ErrDifferentArgumentsTypes - } - return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) -} - -// IsReflectNil is the reflect value provided nil -func isReflectNil(v reflect.Value) bool { - k := v.Kind() - switch k { - case reflect.Interface, reflect.Slice, reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr: - // Both interface and slice are nil if first word is 0. - // Both are always bigger than a word; assume flagIndir. - return v.IsNil() - default: - return false - } -} diff --git a/ui/wasm/vendor/github.com/imdario/mergo/mergo.go b/ui/wasm/vendor/github.com/imdario/mergo/mergo.go deleted file mode 100644 index 3cc926c7f62..00000000000 --- a/ui/wasm/vendor/github.com/imdario/mergo/mergo.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "errors" - "reflect" -) - -// Errors reported by Mergo when it finds invalid arguments. -var ( - ErrNilArguments = errors.New("src and dst must not be nil") - ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type") - ErrNotSupported = errors.New("only structs and maps are supported") - ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") - ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") - ErrNonPointerAgument = errors.New("dst must be a pointer") -) - -// During deepMerge, must keep track of checks that are -// in progress. The comparison algorithm assumes that all -// checks in progress are true when it reencounters them. -// Visited are stored in a map indexed by 17 * a1 + a2; -type visit struct { - ptr uintptr - typ reflect.Type - next *visit -} - -// From src/pkg/encoding/json/encode.go. -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - if v.IsNil() { - return true - } - return isEmptyValue(v.Elem()) - case reflect.Func: - return v.IsNil() - case reflect.Invalid: - return true - } - return false -} - -func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { - if dst == nil || src == nil { - err = ErrNilArguments - return - } - vDst = reflect.ValueOf(dst).Elem() - if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map { - err = ErrNotSupported - return - } - vSrc = reflect.ValueOf(src) - // We check if vSrc is a pointer to dereference it. - if vSrc.Kind() == reflect.Ptr { - vSrc = vSrc.Elem() - } - return -} diff --git a/ui/wasm/vendor/github.com/jbenet/go-context/LICENSE b/ui/wasm/vendor/github.com/jbenet/go-context/LICENSE deleted file mode 100644 index c7386b3c940..00000000000 --- a/ui/wasm/vendor/github.com/jbenet/go-context/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Juan Batiz-Benet - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/ui/wasm/vendor/github.com/jbenet/go-context/io/ctxio.go b/ui/wasm/vendor/github.com/jbenet/go-context/io/ctxio.go deleted file mode 100644 index b4f2454235a..00000000000 --- a/ui/wasm/vendor/github.com/jbenet/go-context/io/ctxio.go +++ /dev/null @@ -1,120 +0,0 @@ -// Package ctxio provides io.Reader and io.Writer wrappers that -// respect context.Contexts. Use these at the interface between -// your context code and your io. -// -// WARNING: read the code. see how writes and reads will continue -// until you cancel the io. Maybe this package should provide -// versions of io.ReadCloser and io.WriteCloser that automatically -// call .Close when the context expires. But for now -- since in my -// use cases I have long-lived connections with ephemeral io wrappers -// -- this has yet to be a need. -package ctxio - -import ( - "io" - - context "golang.org/x/net/context" -) - -type ioret struct { - n int - err error -} - -type Writer interface { - io.Writer -} - -type ctxWriter struct { - w io.Writer - ctx context.Context -} - -// NewWriter wraps a writer to make it respect given Context. -// If there is a blocking write, the returned Writer will return -// whenever the context is cancelled (the return values are n=0 -// and err=ctx.Err().) -// -// Note well: this wrapper DOES NOT ACTUALLY cancel the underlying -// write-- there is no way to do that with the standard go io -// interface. So the read and write _will_ happen or hang. So, use -// this sparingly, make sure to cancel the read or write as necesary -// (e.g. closing a connection whose context is up, etc.) -// -// Furthermore, in order to protect your memory from being read -// _after_ you've cancelled the context, this io.Writer will -// first make a **copy** of the buffer. -func NewWriter(ctx context.Context, w io.Writer) *ctxWriter { - if ctx == nil { - ctx = context.Background() - } - return &ctxWriter{ctx: ctx, w: w} -} - -func (w *ctxWriter) Write(buf []byte) (int, error) { - buf2 := make([]byte, len(buf)) - copy(buf2, buf) - - c := make(chan ioret, 1) - - go func() { - n, err := w.w.Write(buf2) - c <- ioret{n, err} - close(c) - }() - - select { - case r := <-c: - return r.n, r.err - case <-w.ctx.Done(): - return 0, w.ctx.Err() - } -} - -type Reader interface { - io.Reader -} - -type ctxReader struct { - r io.Reader - ctx context.Context -} - -// NewReader wraps a reader to make it respect given Context. -// If there is a blocking read, the returned Reader will return -// whenever the context is cancelled (the return values are n=0 -// and err=ctx.Err().) -// -// Note well: this wrapper DOES NOT ACTUALLY cancel the underlying -// write-- there is no way to do that with the standard go io -// interface. So the read and write _will_ happen or hang. So, use -// this sparingly, make sure to cancel the read or write as necesary -// (e.g. closing a connection whose context is up, etc.) -// -// Furthermore, in order to protect your memory from being read -// _before_ you've cancelled the context, this io.Reader will -// allocate a buffer of the same size, and **copy** into the client's -// if the read succeeds in time. -func NewReader(ctx context.Context, r io.Reader) *ctxReader { - return &ctxReader{ctx: ctx, r: r} -} - -func (r *ctxReader) Read(buf []byte) (int, error) { - buf2 := make([]byte, len(buf)) - - c := make(chan ioret, 1) - - go func() { - n, err := r.r.Read(buf2) - c <- ioret{n, err} - close(c) - }() - - select { - case ret := <-c: - copy(buf, buf2) - return ret.n, ret.err - case <-r.ctx.Done(): - return 0, r.ctx.Err() - } -} diff --git a/ui/wasm/vendor/github.com/json-iterator/go/.codecov.yml b/ui/wasm/vendor/github.com/json-iterator/go/.codecov.yml deleted file mode 100644 index 955dc0be5fa..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/.codecov.yml +++ /dev/null @@ -1,3 +0,0 @@ -ignore: - - "output_tests/.*" - diff --git a/ui/wasm/vendor/github.com/json-iterator/go/.gitignore b/ui/wasm/vendor/github.com/json-iterator/go/.gitignore deleted file mode 100644 index 15556530a85..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -/vendor -/bug_test.go -/coverage.txt -/.idea diff --git a/ui/wasm/vendor/github.com/json-iterator/go/.travis.yml b/ui/wasm/vendor/github.com/json-iterator/go/.travis.yml deleted file mode 100644 index 449e67cd01a..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/.travis.yml +++ /dev/null @@ -1,14 +0,0 @@ -language: go - -go: - - 1.8.x - - 1.x - -before_install: - - go get -t -v ./... - -script: - - ./test.sh - -after_success: - - bash <(curl -s https://codecov.io/bash) diff --git a/ui/wasm/vendor/github.com/json-iterator/go/Gopkg.lock b/ui/wasm/vendor/github.com/json-iterator/go/Gopkg.lock deleted file mode 100644 index c8a9fbb3871..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/Gopkg.lock +++ /dev/null @@ -1,21 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - name = "github.com/modern-go/concurrent" - packages = ["."] - revision = "e0a39a4cb4216ea8db28e22a69f4ec25610d513a" - version = "1.0.0" - -[[projects]] - name = "github.com/modern-go/reflect2" - packages = ["."] - revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" - version = "1.0.1" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - inputs-digest = "ea54a775e5a354cb015502d2e7aa4b74230fc77e894f34a838b268c25ec8eeb8" - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/ui/wasm/vendor/github.com/json-iterator/go/Gopkg.toml b/ui/wasm/vendor/github.com/json-iterator/go/Gopkg.toml deleted file mode 100644 index 313a0f887b6..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/Gopkg.toml +++ /dev/null @@ -1,26 +0,0 @@ -# Gopkg.toml example -# -# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" - -ignored = ["github.com/davecgh/go-spew*","github.com/google/gofuzz*","github.com/stretchr/testify*"] - -[[constraint]] - name = "github.com/modern-go/reflect2" - version = "1.0.1" diff --git a/ui/wasm/vendor/github.com/json-iterator/go/LICENSE b/ui/wasm/vendor/github.com/json-iterator/go/LICENSE deleted file mode 100644 index 2cf4f5ab28e..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2016 json-iterator - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/ui/wasm/vendor/github.com/json-iterator/go/README.md b/ui/wasm/vendor/github.com/json-iterator/go/README.md deleted file mode 100644 index 52b111d5f36..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/README.md +++ /dev/null @@ -1,87 +0,0 @@ -[![Sourcegraph](https://sourcegraph.com/github.com/json-iterator/go/-/badge.svg)](https://sourcegraph.com/github.com/json-iterator/go?badge) -[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://pkg.go.dev/github.com/json-iterator/go) -[![Build Status](https://travis-ci.org/json-iterator/go.svg?branch=master)](https://travis-ci.org/json-iterator/go) -[![codecov](https://codecov.io/gh/json-iterator/go/branch/master/graph/badge.svg)](https://codecov.io/gh/json-iterator/go) -[![rcard](https://goreportcard.com/badge/github.com/json-iterator/go)](https://goreportcard.com/report/github.com/json-iterator/go) -[![License](http://img.shields.io/badge/license-mit-blue.svg?style=flat-square)](https://raw.githubusercontent.com/json-iterator/go/master/LICENSE) -[![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby) - -A high-performance 100% compatible drop-in replacement of "encoding/json" - -You can also use thrift like JSON using [thrift-iterator](https://github.com/thrift-iterator/go) - -# Benchmark - -![benchmark](http://jsoniter.com/benchmarks/go-benchmark.png) - -Source code: https://github.com/json-iterator/go-benchmark/blob/master/src/github.com/json-iterator/go-benchmark/benchmark_medium_payload_test.go - -Raw Result (easyjson requires static code generation) - -| | ns/op | allocation bytes | allocation times | -| --------------- | ----------- | ---------------- | ---------------- | -| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op | -| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op | -| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op | -| std encode | 2213 ns/op | 712 B/op | 5 allocs/op | -| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op | -| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op | - -Always benchmark with your own workload. -The result depends heavily on the data input. - -# Usage - -100% compatibility with standard lib - -Replace - -```go -import "encoding/json" -json.Marshal(&data) -``` - -with - -```go -import jsoniter "github.com/json-iterator/go" - -var json = jsoniter.ConfigCompatibleWithStandardLibrary -json.Marshal(&data) -``` - -Replace - -```go -import "encoding/json" -json.Unmarshal(input, &data) -``` - -with - -```go -import jsoniter "github.com/json-iterator/go" - -var json = jsoniter.ConfigCompatibleWithStandardLibrary -json.Unmarshal(input, &data) -``` - -[More documentation](http://jsoniter.com/migrate-from-go-std.html) - -# How to get - -``` -go get github.com/json-iterator/go -``` - -# Contribution Welcomed ! - -Contributors - -- [thockin](https://github.com/thockin) -- [mattn](https://github.com/mattn) -- [cch123](https://github.com/cch123) -- [Oleg Shaldybin](https://github.com/olegshaldybin) -- [Jason Toffaletti](https://github.com/toffaletti) - -Report issue or pull request, or email taowen@gmail.com, or [![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby) diff --git a/ui/wasm/vendor/github.com/json-iterator/go/adapter.go b/ui/wasm/vendor/github.com/json-iterator/go/adapter.go deleted file mode 100644 index 92d2cc4a3dd..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/adapter.go +++ /dev/null @@ -1,150 +0,0 @@ -package jsoniter - -import ( - "bytes" - "io" -) - -// RawMessage to make replace json with jsoniter -type RawMessage []byte - -// Unmarshal adapts to json/encoding Unmarshal API -// -// Unmarshal parses the JSON-encoded data and stores the result in the value pointed to by v. -// Refer to https://godoc.org/encoding/json#Unmarshal for more information -func Unmarshal(data []byte, v interface{}) error { - return ConfigDefault.Unmarshal(data, v) -} - -// UnmarshalFromString is a convenient method to read from string instead of []byte -func UnmarshalFromString(str string, v interface{}) error { - return ConfigDefault.UnmarshalFromString(str, v) -} - -// Get quick method to get value from deeply nested JSON structure -func Get(data []byte, path ...interface{}) Any { - return ConfigDefault.Get(data, path...) -} - -// Marshal adapts to json/encoding Marshal API -// -// Marshal returns the JSON encoding of v, adapts to json/encoding Marshal API -// Refer to https://godoc.org/encoding/json#Marshal for more information -func Marshal(v interface{}) ([]byte, error) { - return ConfigDefault.Marshal(v) -} - -// MarshalIndent same as json.MarshalIndent. Prefix is not supported. -func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { - return ConfigDefault.MarshalIndent(v, prefix, indent) -} - -// MarshalToString convenient method to write as string instead of []byte -func MarshalToString(v interface{}) (string, error) { - return ConfigDefault.MarshalToString(v) -} - -// NewDecoder adapts to json/stream NewDecoder API. -// -// NewDecoder returns a new decoder that reads from r. -// -// Instead of a json/encoding Decoder, an Decoder is returned -// Refer to https://godoc.org/encoding/json#NewDecoder for more information -func NewDecoder(reader io.Reader) *Decoder { - return ConfigDefault.NewDecoder(reader) -} - -// Decoder reads and decodes JSON values from an input stream. -// Decoder provides identical APIs with json/stream Decoder (Token() and UseNumber() are in progress) -type Decoder struct { - iter *Iterator -} - -// Decode decode JSON into interface{} -func (adapter *Decoder) Decode(obj interface{}) error { - if adapter.iter.head == adapter.iter.tail && adapter.iter.reader != nil { - if !adapter.iter.loadMore() { - return io.EOF - } - } - adapter.iter.ReadVal(obj) - err := adapter.iter.Error - if err == io.EOF { - return nil - } - return adapter.iter.Error -} - -// More is there more? -func (adapter *Decoder) More() bool { - iter := adapter.iter - if iter.Error != nil { - return false - } - c := iter.nextToken() - if c == 0 { - return false - } - iter.unreadByte() - return c != ']' && c != '}' -} - -// Buffered remaining buffer -func (adapter *Decoder) Buffered() io.Reader { - remaining := adapter.iter.buf[adapter.iter.head:adapter.iter.tail] - return bytes.NewReader(remaining) -} - -// UseNumber causes the Decoder to unmarshal a number into an interface{} as a -// Number instead of as a float64. -func (adapter *Decoder) UseNumber() { - cfg := adapter.iter.cfg.configBeforeFrozen - cfg.UseNumber = true - adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions) -} - -// DisallowUnknownFields causes the Decoder to return an error when the destination -// is a struct and the input contains object keys which do not match any -// non-ignored, exported fields in the destination. -func (adapter *Decoder) DisallowUnknownFields() { - cfg := adapter.iter.cfg.configBeforeFrozen - cfg.DisallowUnknownFields = true - adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions) -} - -// NewEncoder same as json.NewEncoder -func NewEncoder(writer io.Writer) *Encoder { - return ConfigDefault.NewEncoder(writer) -} - -// Encoder same as json.Encoder -type Encoder struct { - stream *Stream -} - -// Encode encode interface{} as JSON to io.Writer -func (adapter *Encoder) Encode(val interface{}) error { - adapter.stream.WriteVal(val) - adapter.stream.WriteRaw("\n") - adapter.stream.Flush() - return adapter.stream.Error -} - -// SetIndent set the indention. Prefix is not supported -func (adapter *Encoder) SetIndent(prefix, indent string) { - config := adapter.stream.cfg.configBeforeFrozen - config.IndentionStep = len(indent) - adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions) -} - -// SetEscapeHTML escape html by default, set to false to disable -func (adapter *Encoder) SetEscapeHTML(escapeHTML bool) { - config := adapter.stream.cfg.configBeforeFrozen - config.EscapeHTML = escapeHTML - adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions) -} - -// Valid reports whether data is a valid JSON encoding. -func Valid(data []byte) bool { - return ConfigDefault.Valid(data) -} diff --git a/ui/wasm/vendor/github.com/json-iterator/go/any.go b/ui/wasm/vendor/github.com/json-iterator/go/any.go deleted file mode 100644 index f6b8aeab0a1..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/any.go +++ /dev/null @@ -1,325 +0,0 @@ -package jsoniter - -import ( - "errors" - "fmt" - "github.com/modern-go/reflect2" - "io" - "reflect" - "strconv" - "unsafe" -) - -// Any generic object representation. -// The lazy json implementation holds []byte and parse lazily. -type Any interface { - LastError() error - ValueType() ValueType - MustBeValid() Any - ToBool() bool - ToInt() int - ToInt32() int32 - ToInt64() int64 - ToUint() uint - ToUint32() uint32 - ToUint64() uint64 - ToFloat32() float32 - ToFloat64() float64 - ToString() string - ToVal(val interface{}) - Get(path ...interface{}) Any - Size() int - Keys() []string - GetInterface() interface{} - WriteTo(stream *Stream) -} - -type baseAny struct{} - -func (any *baseAny) Get(path ...interface{}) Any { - return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)} -} - -func (any *baseAny) Size() int { - return 0 -} - -func (any *baseAny) Keys() []string { - return []string{} -} - -func (any *baseAny) ToVal(obj interface{}) { - panic("not implemented") -} - -// WrapInt32 turn int32 into Any interface -func WrapInt32(val int32) Any { - return &int32Any{baseAny{}, val} -} - -// WrapInt64 turn int64 into Any interface -func WrapInt64(val int64) Any { - return &int64Any{baseAny{}, val} -} - -// WrapUint32 turn uint32 into Any interface -func WrapUint32(val uint32) Any { - return &uint32Any{baseAny{}, val} -} - -// WrapUint64 turn uint64 into Any interface -func WrapUint64(val uint64) Any { - return &uint64Any{baseAny{}, val} -} - -// WrapFloat64 turn float64 into Any interface -func WrapFloat64(val float64) Any { - return &floatAny{baseAny{}, val} -} - -// WrapString turn string into Any interface -func WrapString(val string) Any { - return &stringAny{baseAny{}, val} -} - -// Wrap turn a go object into Any interface -func Wrap(val interface{}) Any { - if val == nil { - return &nilAny{} - } - asAny, isAny := val.(Any) - if isAny { - return asAny - } - typ := reflect2.TypeOf(val) - switch typ.Kind() { - case reflect.Slice: - return wrapArray(val) - case reflect.Struct: - return wrapStruct(val) - case reflect.Map: - return wrapMap(val) - case reflect.String: - return WrapString(val.(string)) - case reflect.Int: - if strconv.IntSize == 32 { - return WrapInt32(int32(val.(int))) - } - return WrapInt64(int64(val.(int))) - case reflect.Int8: - return WrapInt32(int32(val.(int8))) - case reflect.Int16: - return WrapInt32(int32(val.(int16))) - case reflect.Int32: - return WrapInt32(val.(int32)) - case reflect.Int64: - return WrapInt64(val.(int64)) - case reflect.Uint: - if strconv.IntSize == 32 { - return WrapUint32(uint32(val.(uint))) - } - return WrapUint64(uint64(val.(uint))) - case reflect.Uintptr: - if ptrSize == 32 { - return WrapUint32(uint32(val.(uintptr))) - } - return WrapUint64(uint64(val.(uintptr))) - case reflect.Uint8: - return WrapUint32(uint32(val.(uint8))) - case reflect.Uint16: - return WrapUint32(uint32(val.(uint16))) - case reflect.Uint32: - return WrapUint32(uint32(val.(uint32))) - case reflect.Uint64: - return WrapUint64(val.(uint64)) - case reflect.Float32: - return WrapFloat64(float64(val.(float32))) - case reflect.Float64: - return WrapFloat64(val.(float64)) - case reflect.Bool: - if val.(bool) == true { - return &trueAny{} - } - return &falseAny{} - } - return &invalidAny{baseAny{}, fmt.Errorf("unsupported type: %v", typ)} -} - -// ReadAny read next JSON element as an Any object. It is a better json.RawMessage. -func (iter *Iterator) ReadAny() Any { - return iter.readAny() -} - -func (iter *Iterator) readAny() Any { - c := iter.nextToken() - switch c { - case '"': - iter.unreadByte() - return &stringAny{baseAny{}, iter.ReadString()} - case 'n': - iter.skipThreeBytes('u', 'l', 'l') // null - return &nilAny{} - case 't': - iter.skipThreeBytes('r', 'u', 'e') // true - return &trueAny{} - case 'f': - iter.skipFourBytes('a', 'l', 's', 'e') // false - return &falseAny{} - case '{': - return iter.readObjectAny() - case '[': - return iter.readArrayAny() - case '-': - return iter.readNumberAny(false) - case 0: - return &invalidAny{baseAny{}, errors.New("input is empty")} - default: - return iter.readNumberAny(true) - } -} - -func (iter *Iterator) readNumberAny(positive bool) Any { - iter.startCapture(iter.head - 1) - iter.skipNumber() - lazyBuf := iter.stopCapture() - return &numberLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} -} - -func (iter *Iterator) readObjectAny() Any { - iter.startCapture(iter.head - 1) - iter.skipObject() - lazyBuf := iter.stopCapture() - return &objectLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} -} - -func (iter *Iterator) readArrayAny() Any { - iter.startCapture(iter.head - 1) - iter.skipArray() - lazyBuf := iter.stopCapture() - return &arrayLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} -} - -func locateObjectField(iter *Iterator, target string) []byte { - var found []byte - iter.ReadObjectCB(func(iter *Iterator, field string) bool { - if field == target { - found = iter.SkipAndReturnBytes() - return false - } - iter.Skip() - return true - }) - return found -} - -func locateArrayElement(iter *Iterator, target int) []byte { - var found []byte - n := 0 - iter.ReadArrayCB(func(iter *Iterator) bool { - if n == target { - found = iter.SkipAndReturnBytes() - return false - } - iter.Skip() - n++ - return true - }) - return found -} - -func locatePath(iter *Iterator, path []interface{}) Any { - for i, pathKeyObj := range path { - switch pathKey := pathKeyObj.(type) { - case string: - valueBytes := locateObjectField(iter, pathKey) - if valueBytes == nil { - return newInvalidAny(path[i:]) - } - iter.ResetBytes(valueBytes) - case int: - valueBytes := locateArrayElement(iter, pathKey) - if valueBytes == nil { - return newInvalidAny(path[i:]) - } - iter.ResetBytes(valueBytes) - case int32: - if '*' == pathKey { - return iter.readAny().Get(path[i:]...) - } - return newInvalidAny(path[i:]) - default: - return newInvalidAny(path[i:]) - } - } - if iter.Error != nil && iter.Error != io.EOF { - return &invalidAny{baseAny{}, iter.Error} - } - return iter.readAny() -} - -var anyType = reflect2.TypeOfPtr((*Any)(nil)).Elem() - -func createDecoderOfAny(ctx *ctx, typ reflect2.Type) ValDecoder { - if typ == anyType { - return &directAnyCodec{} - } - if typ.Implements(anyType) { - return &anyCodec{ - valType: typ, - } - } - return nil -} - -func createEncoderOfAny(ctx *ctx, typ reflect2.Type) ValEncoder { - if typ == anyType { - return &directAnyCodec{} - } - if typ.Implements(anyType) { - return &anyCodec{ - valType: typ, - } - } - return nil -} - -type anyCodec struct { - valType reflect2.Type -} - -func (codec *anyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - panic("not implemented") -} - -func (codec *anyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - obj := codec.valType.UnsafeIndirect(ptr) - any := obj.(Any) - any.WriteTo(stream) -} - -func (codec *anyCodec) IsEmpty(ptr unsafe.Pointer) bool { - obj := codec.valType.UnsafeIndirect(ptr) - any := obj.(Any) - return any.Size() == 0 -} - -type directAnyCodec struct { -} - -func (codec *directAnyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *(*Any)(ptr) = iter.readAny() -} - -func (codec *directAnyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - any := *(*Any)(ptr) - if any == nil { - stream.WriteNil() - return - } - any.WriteTo(stream) -} - -func (codec *directAnyCodec) IsEmpty(ptr unsafe.Pointer) bool { - any := *(*Any)(ptr) - return any.Size() == 0 -} diff --git a/ui/wasm/vendor/github.com/json-iterator/go/any_array.go b/ui/wasm/vendor/github.com/json-iterator/go/any_array.go deleted file mode 100644 index 0449e9aa428..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/any_array.go +++ /dev/null @@ -1,278 +0,0 @@ -package jsoniter - -import ( - "reflect" - "unsafe" -) - -type arrayLazyAny struct { - baseAny - cfg *frozenConfig - buf []byte - err error -} - -func (any *arrayLazyAny) ValueType() ValueType { - return ArrayValue -} - -func (any *arrayLazyAny) MustBeValid() Any { - return any -} - -func (any *arrayLazyAny) LastError() error { - return any.err -} - -func (any *arrayLazyAny) ToBool() bool { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - return iter.ReadArray() -} - -func (any *arrayLazyAny) ToInt() int { - if any.ToBool() { - return 1 - } - return 0 -} - -func (any *arrayLazyAny) ToInt32() int32 { - if any.ToBool() { - return 1 - } - return 0 -} - -func (any *arrayLazyAny) ToInt64() int64 { - if any.ToBool() { - return 1 - } - return 0 -} - -func (any *arrayLazyAny) ToUint() uint { - if any.ToBool() { - return 1 - } - return 0 -} - -func (any *arrayLazyAny) ToUint32() uint32 { - if any.ToBool() { - return 1 - } - return 0 -} - -func (any *arrayLazyAny) ToUint64() uint64 { - if any.ToBool() { - return 1 - } - return 0 -} - -func (any *arrayLazyAny) ToFloat32() float32 { - if any.ToBool() { - return 1 - } - return 0 -} - -func (any *arrayLazyAny) ToFloat64() float64 { - if any.ToBool() { - return 1 - } - return 0 -} - -func (any *arrayLazyAny) ToString() string { - return *(*string)(unsafe.Pointer(&any.buf)) -} - -func (any *arrayLazyAny) ToVal(val interface{}) { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - iter.ReadVal(val) -} - -func (any *arrayLazyAny) Get(path ...interface{}) Any { - if len(path) == 0 { - return any - } - switch firstPath := path[0].(type) { - case int: - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - valueBytes := locateArrayElement(iter, firstPath) - if valueBytes == nil { - return newInvalidAny(path) - } - iter.ResetBytes(valueBytes) - return locatePath(iter, path[1:]) - case int32: - if '*' == firstPath { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - arr := make([]Any, 0) - iter.ReadArrayCB(func(iter *Iterator) bool { - found := iter.readAny().Get(path[1:]...) - if found.ValueType() != InvalidValue { - arr = append(arr, found) - } - return true - }) - return wrapArray(arr) - } - return newInvalidAny(path) - default: - return newInvalidAny(path) - } -} - -func (any *arrayLazyAny) Size() int { - size := 0 - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - iter.ReadArrayCB(func(iter *Iterator) bool { - size++ - iter.Skip() - return true - }) - return size -} - -func (any *arrayLazyAny) WriteTo(stream *Stream) { - stream.Write(any.buf) -} - -func (any *arrayLazyAny) GetInterface() interface{} { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - return iter.Read() -} - -type arrayAny struct { - baseAny - val reflect.Value -} - -func wrapArray(val interface{}) *arrayAny { - return &arrayAny{baseAny{}, reflect.ValueOf(val)} -} - -func (any *arrayAny) ValueType() ValueType { - return ArrayValue -} - -func (any *arrayAny) MustBeValid() Any { - return any -} - -func (any *arrayAny) LastError() error { - return nil -} - -func (any *arrayAny) ToBool() bool { - return any.val.Len() != 0 -} - -func (any *arrayAny) ToInt() int { - if any.val.Len() == 0 { - return 0 - } - return 1 -} - -func (any *arrayAny) ToInt32() int32 { - if any.val.Len() == 0 { - return 0 - } - return 1 -} - -func (any *arrayAny) ToInt64() int64 { - if any.val.Len() == 0 { - return 0 - } - return 1 -} - -func (any *arrayAny) ToUint() uint { - if any.val.Len() == 0 { - return 0 - } - return 1 -} - -func (any *arrayAny) ToUint32() uint32 { - if any.val.Len() == 0 { - return 0 - } - return 1 -} - -func (any *arrayAny) ToUint64() uint64 { - if any.val.Len() == 0 { - return 0 - } - return 1 -} - -func (any *arrayAny) ToFloat32() float32 { - if any.val.Len() == 0 { - return 0 - } - return 1 -} - -func (any *arrayAny) ToFloat64() float64 { - if any.val.Len() == 0 { - return 0 - } - return 1 -} - -func (any *arrayAny) ToString() string { - str, _ := MarshalToString(any.val.Interface()) - return str -} - -func (any *arrayAny) Get(path ...interface{}) Any { - if len(path) == 0 { - return any - } - switch firstPath := path[0].(type) { - case int: - if firstPath < 0 || firstPath >= any.val.Len() { - return newInvalidAny(path) - } - return Wrap(any.val.Index(firstPath).Interface()) - case int32: - if '*' == firstPath { - mappedAll := make([]Any, 0) - for i := 0; i < any.val.Len(); i++ { - mapped := Wrap(any.val.Index(i).Interface()).Get(path[1:]...) - if mapped.ValueType() != InvalidValue { - mappedAll = append(mappedAll, mapped) - } - } - return wrapArray(mappedAll) - } - return newInvalidAny(path) - default: - return newInvalidAny(path) - } -} - -func (any *arrayAny) Size() int { - return any.val.Len() -} - -func (any *arrayAny) WriteTo(stream *Stream) { - stream.WriteVal(any.val) -} - -func (any *arrayAny) GetInterface() interface{} { - return any.val.Interface() -} diff --git a/ui/wasm/vendor/github.com/json-iterator/go/any_bool.go b/ui/wasm/vendor/github.com/json-iterator/go/any_bool.go deleted file mode 100644 index 9452324af5b..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/any_bool.go +++ /dev/null @@ -1,137 +0,0 @@ -package jsoniter - -type trueAny struct { - baseAny -} - -func (any *trueAny) LastError() error { - return nil -} - -func (any *trueAny) ToBool() bool { - return true -} - -func (any *trueAny) ToInt() int { - return 1 -} - -func (any *trueAny) ToInt32() int32 { - return 1 -} - -func (any *trueAny) ToInt64() int64 { - return 1 -} - -func (any *trueAny) ToUint() uint { - return 1 -} - -func (any *trueAny) ToUint32() uint32 { - return 1 -} - -func (any *trueAny) ToUint64() uint64 { - return 1 -} - -func (any *trueAny) ToFloat32() float32 { - return 1 -} - -func (any *trueAny) ToFloat64() float64 { - return 1 -} - -func (any *trueAny) ToString() string { - return "true" -} - -func (any *trueAny) WriteTo(stream *Stream) { - stream.WriteTrue() -} - -func (any *trueAny) Parse() *Iterator { - return nil -} - -func (any *trueAny) GetInterface() interface{} { - return true -} - -func (any *trueAny) ValueType() ValueType { - return BoolValue -} - -func (any *trueAny) MustBeValid() Any { - return any -} - -type falseAny struct { - baseAny -} - -func (any *falseAny) LastError() error { - return nil -} - -func (any *falseAny) ToBool() bool { - return false -} - -func (any *falseAny) ToInt() int { - return 0 -} - -func (any *falseAny) ToInt32() int32 { - return 0 -} - -func (any *falseAny) ToInt64() int64 { - return 0 -} - -func (any *falseAny) ToUint() uint { - return 0 -} - -func (any *falseAny) ToUint32() uint32 { - return 0 -} - -func (any *falseAny) ToUint64() uint64 { - return 0 -} - -func (any *falseAny) ToFloat32() float32 { - return 0 -} - -func (any *falseAny) ToFloat64() float64 { - return 0 -} - -func (any *falseAny) ToString() string { - return "false" -} - -func (any *falseAny) WriteTo(stream *Stream) { - stream.WriteFalse() -} - -func (any *falseAny) Parse() *Iterator { - return nil -} - -func (any *falseAny) GetInterface() interface{} { - return false -} - -func (any *falseAny) ValueType() ValueType { - return BoolValue -} - -func (any *falseAny) MustBeValid() Any { - return any -} diff --git a/ui/wasm/vendor/github.com/json-iterator/go/any_float.go b/ui/wasm/vendor/github.com/json-iterator/go/any_float.go deleted file mode 100644 index 35fdb09497f..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/any_float.go +++ /dev/null @@ -1,83 +0,0 @@ -package jsoniter - -import ( - "strconv" -) - -type floatAny struct { - baseAny - val float64 -} - -func (any *floatAny) Parse() *Iterator { - return nil -} - -func (any *floatAny) ValueType() ValueType { - return NumberValue -} - -func (any *floatAny) MustBeValid() Any { - return any -} - -func (any *floatAny) LastError() error { - return nil -} - -func (any *floatAny) ToBool() bool { - return any.ToFloat64() != 0 -} - -func (any *floatAny) ToInt() int { - return int(any.val) -} - -func (any *floatAny) ToInt32() int32 { - return int32(any.val) -} - -func (any *floatAny) ToInt64() int64 { - return int64(any.val) -} - -func (any *floatAny) ToUint() uint { - if any.val > 0 { - return uint(any.val) - } - return 0 -} - -func (any *floatAny) ToUint32() uint32 { - if any.val > 0 { - return uint32(any.val) - } - return 0 -} - -func (any *floatAny) ToUint64() uint64 { - if any.val > 0 { - return uint64(any.val) - } - return 0 -} - -func (any *floatAny) ToFloat32() float32 { - return float32(any.val) -} - -func (any *floatAny) ToFloat64() float64 { - return any.val -} - -func (any *floatAny) ToString() string { - return strconv.FormatFloat(any.val, 'E', -1, 64) -} - -func (any *floatAny) WriteTo(stream *Stream) { - stream.WriteFloat64(any.val) -} - -func (any *floatAny) GetInterface() interface{} { - return any.val -} diff --git a/ui/wasm/vendor/github.com/json-iterator/go/any_int32.go b/ui/wasm/vendor/github.com/json-iterator/go/any_int32.go deleted file mode 100644 index 1b56f399150..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/any_int32.go +++ /dev/null @@ -1,74 +0,0 @@ -package jsoniter - -import ( - "strconv" -) - -type int32Any struct { - baseAny - val int32 -} - -func (any *int32Any) LastError() error { - return nil -} - -func (any *int32Any) ValueType() ValueType { - return NumberValue -} - -func (any *int32Any) MustBeValid() Any { - return any -} - -func (any *int32Any) ToBool() bool { - return any.val != 0 -} - -func (any *int32Any) ToInt() int { - return int(any.val) -} - -func (any *int32Any) ToInt32() int32 { - return any.val -} - -func (any *int32Any) ToInt64() int64 { - return int64(any.val) -} - -func (any *int32Any) ToUint() uint { - return uint(any.val) -} - -func (any *int32Any) ToUint32() uint32 { - return uint32(any.val) -} - -func (any *int32Any) ToUint64() uint64 { - return uint64(any.val) -} - -func (any *int32Any) ToFloat32() float32 { - return float32(any.val) -} - -func (any *int32Any) ToFloat64() float64 { - return float64(any.val) -} - -func (any *int32Any) ToString() string { - return strconv.FormatInt(int64(any.val), 10) -} - -func (any *int32Any) WriteTo(stream *Stream) { - stream.WriteInt32(any.val) -} - -func (any *int32Any) Parse() *Iterator { - return nil -} - -func (any *int32Any) GetInterface() interface{} { - return any.val -} diff --git a/ui/wasm/vendor/github.com/json-iterator/go/any_int64.go b/ui/wasm/vendor/github.com/json-iterator/go/any_int64.go deleted file mode 100644 index c440d72b6d3..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/any_int64.go +++ /dev/null @@ -1,74 +0,0 @@ -package jsoniter - -import ( - "strconv" -) - -type int64Any struct { - baseAny - val int64 -} - -func (any *int64Any) LastError() error { - return nil -} - -func (any *int64Any) ValueType() ValueType { - return NumberValue -} - -func (any *int64Any) MustBeValid() Any { - return any -} - -func (any *int64Any) ToBool() bool { - return any.val != 0 -} - -func (any *int64Any) ToInt() int { - return int(any.val) -} - -func (any *int64Any) ToInt32() int32 { - return int32(any.val) -} - -func (any *int64Any) ToInt64() int64 { - return any.val -} - -func (any *int64Any) ToUint() uint { - return uint(any.val) -} - -func (any *int64Any) ToUint32() uint32 { - return uint32(any.val) -} - -func (any *int64Any) ToUint64() uint64 { - return uint64(any.val) -} - -func (any *int64Any) ToFloat32() float32 { - return float32(any.val) -} - -func (any *int64Any) ToFloat64() float64 { - return float64(any.val) -} - -func (any *int64Any) ToString() string { - return strconv.FormatInt(any.val, 10) -} - -func (any *int64Any) WriteTo(stream *Stream) { - stream.WriteInt64(any.val) -} - -func (any *int64Any) Parse() *Iterator { - return nil -} - -func (any *int64Any) GetInterface() interface{} { - return any.val -} diff --git a/ui/wasm/vendor/github.com/json-iterator/go/any_invalid.go b/ui/wasm/vendor/github.com/json-iterator/go/any_invalid.go deleted file mode 100644 index 1d859eac327..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/any_invalid.go +++ /dev/null @@ -1,82 +0,0 @@ -package jsoniter - -import "fmt" - -type invalidAny struct { - baseAny - err error -} - -func newInvalidAny(path []interface{}) *invalidAny { - return &invalidAny{baseAny{}, fmt.Errorf("%v not found", path)} -} - -func (any *invalidAny) LastError() error { - return any.err -} - -func (any *invalidAny) ValueType() ValueType { - return InvalidValue -} - -func (any *invalidAny) MustBeValid() Any { - panic(any.err) -} - -func (any *invalidAny) ToBool() bool { - return false -} - -func (any *invalidAny) ToInt() int { - return 0 -} - -func (any *invalidAny) ToInt32() int32 { - return 0 -} - -func (any *invalidAny) ToInt64() int64 { - return 0 -} - -func (any *invalidAny) ToUint() uint { - return 0 -} - -func (any *invalidAny) ToUint32() uint32 { - return 0 -} - -func (any *invalidAny) ToUint64() uint64 { - return 0 -} - -func (any *invalidAny) ToFloat32() float32 { - return 0 -} - -func (any *invalidAny) ToFloat64() float64 { - return 0 -} - -func (any *invalidAny) ToString() string { - return "" -} - -func (any *invalidAny) WriteTo(stream *Stream) { -} - -func (any *invalidAny) Get(path ...interface{}) Any { - if any.err == nil { - return &invalidAny{baseAny{}, fmt.Errorf("get %v from invalid", path)} - } - return &invalidAny{baseAny{}, fmt.Errorf("%v, get %v from invalid", any.err, path)} -} - -func (any *invalidAny) Parse() *Iterator { - return nil -} - -func (any *invalidAny) GetInterface() interface{} { - return nil -} diff --git a/ui/wasm/vendor/github.com/json-iterator/go/any_nil.go b/ui/wasm/vendor/github.com/json-iterator/go/any_nil.go deleted file mode 100644 index d04cb54c11c..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/any_nil.go +++ /dev/null @@ -1,69 +0,0 @@ -package jsoniter - -type nilAny struct { - baseAny -} - -func (any *nilAny) LastError() error { - return nil -} - -func (any *nilAny) ValueType() ValueType { - return NilValue -} - -func (any *nilAny) MustBeValid() Any { - return any -} - -func (any *nilAny) ToBool() bool { - return false -} - -func (any *nilAny) ToInt() int { - return 0 -} - -func (any *nilAny) ToInt32() int32 { - return 0 -} - -func (any *nilAny) ToInt64() int64 { - return 0 -} - -func (any *nilAny) ToUint() uint { - return 0 -} - -func (any *nilAny) ToUint32() uint32 { - return 0 -} - -func (any *nilAny) ToUint64() uint64 { - return 0 -} - -func (any *nilAny) ToFloat32() float32 { - return 0 -} - -func (any *nilAny) ToFloat64() float64 { - return 0 -} - -func (any *nilAny) ToString() string { - return "" -} - -func (any *nilAny) WriteTo(stream *Stream) { - stream.WriteNil() -} - -func (any *nilAny) Parse() *Iterator { - return nil -} - -func (any *nilAny) GetInterface() interface{} { - return nil -} diff --git a/ui/wasm/vendor/github.com/json-iterator/go/any_number.go b/ui/wasm/vendor/github.com/json-iterator/go/any_number.go deleted file mode 100644 index 9d1e901a66a..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/any_number.go +++ /dev/null @@ -1,123 +0,0 @@ -package jsoniter - -import ( - "io" - "unsafe" -) - -type numberLazyAny struct { - baseAny - cfg *frozenConfig - buf []byte - err error -} - -func (any *numberLazyAny) ValueType() ValueType { - return NumberValue -} - -func (any *numberLazyAny) MustBeValid() Any { - return any -} - -func (any *numberLazyAny) LastError() error { - return any.err -} - -func (any *numberLazyAny) ToBool() bool { - return any.ToFloat64() != 0 -} - -func (any *numberLazyAny) ToInt() int { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadInt() - if iter.Error != nil && iter.Error != io.EOF { - any.err = iter.Error - } - return val -} - -func (any *numberLazyAny) ToInt32() int32 { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadInt32() - if iter.Error != nil && iter.Error != io.EOF { - any.err = iter.Error - } - return val -} - -func (any *numberLazyAny) ToInt64() int64 { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadInt64() - if iter.Error != nil && iter.Error != io.EOF { - any.err = iter.Error - } - return val -} - -func (any *numberLazyAny) ToUint() uint { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadUint() - if iter.Error != nil && iter.Error != io.EOF { - any.err = iter.Error - } - return val -} - -func (any *numberLazyAny) ToUint32() uint32 { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadUint32() - if iter.Error != nil && iter.Error != io.EOF { - any.err = iter.Error - } - return val -} - -func (any *numberLazyAny) ToUint64() uint64 { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadUint64() - if iter.Error != nil && iter.Error != io.EOF { - any.err = iter.Error - } - return val -} - -func (any *numberLazyAny) ToFloat32() float32 { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadFloat32() - if iter.Error != nil && iter.Error != io.EOF { - any.err = iter.Error - } - return val -} - -func (any *numberLazyAny) ToFloat64() float64 { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - val := iter.ReadFloat64() - if iter.Error != nil && iter.Error != io.EOF { - any.err = iter.Error - } - return val -} - -func (any *numberLazyAny) ToString() string { - return *(*string)(unsafe.Pointer(&any.buf)) -} - -func (any *numberLazyAny) WriteTo(stream *Stream) { - stream.Write(any.buf) -} - -func (any *numberLazyAny) GetInterface() interface{} { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - return iter.Read() -} diff --git a/ui/wasm/vendor/github.com/json-iterator/go/any_object.go b/ui/wasm/vendor/github.com/json-iterator/go/any_object.go deleted file mode 100644 index c44ef5c989a..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/any_object.go +++ /dev/null @@ -1,374 +0,0 @@ -package jsoniter - -import ( - "reflect" - "unsafe" -) - -type objectLazyAny struct { - baseAny - cfg *frozenConfig - buf []byte - err error -} - -func (any *objectLazyAny) ValueType() ValueType { - return ObjectValue -} - -func (any *objectLazyAny) MustBeValid() Any { - return any -} - -func (any *objectLazyAny) LastError() error { - return any.err -} - -func (any *objectLazyAny) ToBool() bool { - return true -} - -func (any *objectLazyAny) ToInt() int { - return 0 -} - -func (any *objectLazyAny) ToInt32() int32 { - return 0 -} - -func (any *objectLazyAny) ToInt64() int64 { - return 0 -} - -func (any *objectLazyAny) ToUint() uint { - return 0 -} - -func (any *objectLazyAny) ToUint32() uint32 { - return 0 -} - -func (any *objectLazyAny) ToUint64() uint64 { - return 0 -} - -func (any *objectLazyAny) ToFloat32() float32 { - return 0 -} - -func (any *objectLazyAny) ToFloat64() float64 { - return 0 -} - -func (any *objectLazyAny) ToString() string { - return *(*string)(unsafe.Pointer(&any.buf)) -} - -func (any *objectLazyAny) ToVal(obj interface{}) { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - iter.ReadVal(obj) -} - -func (any *objectLazyAny) Get(path ...interface{}) Any { - if len(path) == 0 { - return any - } - switch firstPath := path[0].(type) { - case string: - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - valueBytes := locateObjectField(iter, firstPath) - if valueBytes == nil { - return newInvalidAny(path) - } - iter.ResetBytes(valueBytes) - return locatePath(iter, path[1:]) - case int32: - if '*' == firstPath { - mappedAll := map[string]Any{} - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - iter.ReadMapCB(func(iter *Iterator, field string) bool { - mapped := locatePath(iter, path[1:]) - if mapped.ValueType() != InvalidValue { - mappedAll[field] = mapped - } - return true - }) - return wrapMap(mappedAll) - } - return newInvalidAny(path) - default: - return newInvalidAny(path) - } -} - -func (any *objectLazyAny) Keys() []string { - keys := []string{} - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - iter.ReadMapCB(func(iter *Iterator, field string) bool { - iter.Skip() - keys = append(keys, field) - return true - }) - return keys -} - -func (any *objectLazyAny) Size() int { - size := 0 - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - iter.ReadObjectCB(func(iter *Iterator, field string) bool { - iter.Skip() - size++ - return true - }) - return size -} - -func (any *objectLazyAny) WriteTo(stream *Stream) { - stream.Write(any.buf) -} - -func (any *objectLazyAny) GetInterface() interface{} { - iter := any.cfg.BorrowIterator(any.buf) - defer any.cfg.ReturnIterator(iter) - return iter.Read() -} - -type objectAny struct { - baseAny - err error - val reflect.Value -} - -func wrapStruct(val interface{}) *objectAny { - return &objectAny{baseAny{}, nil, reflect.ValueOf(val)} -} - -func (any *objectAny) ValueType() ValueType { - return ObjectValue -} - -func (any *objectAny) MustBeValid() Any { - return any -} - -func (any *objectAny) Parse() *Iterator { - return nil -} - -func (any *objectAny) LastError() error { - return any.err -} - -func (any *objectAny) ToBool() bool { - return any.val.NumField() != 0 -} - -func (any *objectAny) ToInt() int { - return 0 -} - -func (any *objectAny) ToInt32() int32 { - return 0 -} - -func (any *objectAny) ToInt64() int64 { - return 0 -} - -func (any *objectAny) ToUint() uint { - return 0 -} - -func (any *objectAny) ToUint32() uint32 { - return 0 -} - -func (any *objectAny) ToUint64() uint64 { - return 0 -} - -func (any *objectAny) ToFloat32() float32 { - return 0 -} - -func (any *objectAny) ToFloat64() float64 { - return 0 -} - -func (any *objectAny) ToString() string { - str, err := MarshalToString(any.val.Interface()) - any.err = err - return str -} - -func (any *objectAny) Get(path ...interface{}) Any { - if len(path) == 0 { - return any - } - switch firstPath := path[0].(type) { - case string: - field := any.val.FieldByName(firstPath) - if !field.IsValid() { - return newInvalidAny(path) - } - return Wrap(field.Interface()) - case int32: - if '*' == firstPath { - mappedAll := map[string]Any{} - for i := 0; i < any.val.NumField(); i++ { - field := any.val.Field(i) - if field.CanInterface() { - mapped := Wrap(field.Interface()).Get(path[1:]...) - if mapped.ValueType() != InvalidValue { - mappedAll[any.val.Type().Field(i).Name] = mapped - } - } - } - return wrapMap(mappedAll) - } - return newInvalidAny(path) - default: - return newInvalidAny(path) - } -} - -func (any *objectAny) Keys() []string { - keys := make([]string, 0, any.val.NumField()) - for i := 0; i < any.val.NumField(); i++ { - keys = append(keys, any.val.Type().Field(i).Name) - } - return keys -} - -func (any *objectAny) Size() int { - return any.val.NumField() -} - -func (any *objectAny) WriteTo(stream *Stream) { - stream.WriteVal(any.val) -} - -func (any *objectAny) GetInterface() interface{} { - return any.val.Interface() -} - -type mapAny struct { - baseAny - err error - val reflect.Value -} - -func wrapMap(val interface{}) *mapAny { - return &mapAny{baseAny{}, nil, reflect.ValueOf(val)} -} - -func (any *mapAny) ValueType() ValueType { - return ObjectValue -} - -func (any *mapAny) MustBeValid() Any { - return any -} - -func (any *mapAny) Parse() *Iterator { - return nil -} - -func (any *mapAny) LastError() error { - return any.err -} - -func (any *mapAny) ToBool() bool { - return true -} - -func (any *mapAny) ToInt() int { - return 0 -} - -func (any *mapAny) ToInt32() int32 { - return 0 -} - -func (any *mapAny) ToInt64() int64 { - return 0 -} - -func (any *mapAny) ToUint() uint { - return 0 -} - -func (any *mapAny) ToUint32() uint32 { - return 0 -} - -func (any *mapAny) ToUint64() uint64 { - return 0 -} - -func (any *mapAny) ToFloat32() float32 { - return 0 -} - -func (any *mapAny) ToFloat64() float64 { - return 0 -} - -func (any *mapAny) ToString() string { - str, err := MarshalToString(any.val.Interface()) - any.err = err - return str -} - -func (any *mapAny) Get(path ...interface{}) Any { - if len(path) == 0 { - return any - } - switch firstPath := path[0].(type) { - case int32: - if '*' == firstPath { - mappedAll := map[string]Any{} - for _, key := range any.val.MapKeys() { - keyAsStr := key.String() - element := Wrap(any.val.MapIndex(key).Interface()) - mapped := element.Get(path[1:]...) - if mapped.ValueType() != InvalidValue { - mappedAll[keyAsStr] = mapped - } - } - return wrapMap(mappedAll) - } - return newInvalidAny(path) - default: - value := any.val.MapIndex(reflect.ValueOf(firstPath)) - if !value.IsValid() { - return newInvalidAny(path) - } - return Wrap(value.Interface()) - } -} - -func (any *mapAny) Keys() []string { - keys := make([]string, 0, any.val.Len()) - for _, key := range any.val.MapKeys() { - keys = append(keys, key.String()) - } - return keys -} - -func (any *mapAny) Size() int { - return any.val.Len() -} - -func (any *mapAny) WriteTo(stream *Stream) { - stream.WriteVal(any.val) -} - -func (any *mapAny) GetInterface() interface{} { - return any.val.Interface() -} diff --git a/ui/wasm/vendor/github.com/json-iterator/go/any_str.go b/ui/wasm/vendor/github.com/json-iterator/go/any_str.go deleted file mode 100644 index 1f12f6612de..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/any_str.go +++ /dev/null @@ -1,166 +0,0 @@ -package jsoniter - -import ( - "fmt" - "strconv" -) - -type stringAny struct { - baseAny - val string -} - -func (any *stringAny) Get(path ...interface{}) Any { - if len(path) == 0 { - return any - } - return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)} -} - -func (any *stringAny) Parse() *Iterator { - return nil -} - -func (any *stringAny) ValueType() ValueType { - return StringValue -} - -func (any *stringAny) MustBeValid() Any { - return any -} - -func (any *stringAny) LastError() error { - return nil -} - -func (any *stringAny) ToBool() bool { - str := any.ToString() - if str == "0" { - return false - } - for _, c := range str { - switch c { - case ' ', '\n', '\r', '\t': - default: - return true - } - } - return false -} - -func (any *stringAny) ToInt() int { - return int(any.ToInt64()) - -} - -func (any *stringAny) ToInt32() int32 { - return int32(any.ToInt64()) -} - -func (any *stringAny) ToInt64() int64 { - if any.val == "" { - return 0 - } - - flag := 1 - startPos := 0 - if any.val[0] == '+' || any.val[0] == '-' { - startPos = 1 - } - - if any.val[0] == '-' { - flag = -1 - } - - endPos := startPos - for i := startPos; i < len(any.val); i++ { - if any.val[i] >= '0' && any.val[i] <= '9' { - endPos = i + 1 - } else { - break - } - } - parsed, _ := strconv.ParseInt(any.val[startPos:endPos], 10, 64) - return int64(flag) * parsed -} - -func (any *stringAny) ToUint() uint { - return uint(any.ToUint64()) -} - -func (any *stringAny) ToUint32() uint32 { - return uint32(any.ToUint64()) -} - -func (any *stringAny) ToUint64() uint64 { - if any.val == "" { - return 0 - } - - startPos := 0 - - if any.val[0] == '-' { - return 0 - } - if any.val[0] == '+' { - startPos = 1 - } - - endPos := startPos - for i := startPos; i < len(any.val); i++ { - if any.val[i] >= '0' && any.val[i] <= '9' { - endPos = i + 1 - } else { - break - } - } - parsed, _ := strconv.ParseUint(any.val[startPos:endPos], 10, 64) - return parsed -} - -func (any *stringAny) ToFloat32() float32 { - return float32(any.ToFloat64()) -} - -func (any *stringAny) ToFloat64() float64 { - if len(any.val) == 0 { - return 0 - } - - // first char invalid - if any.val[0] != '+' && any.val[0] != '-' && (any.val[0] > '9' || any.val[0] < '0') { - return 0 - } - - // extract valid num expression from string - // eg 123true => 123, -12.12xxa => -12.12 - endPos := 1 - for i := 1; i < len(any.val); i++ { - if any.val[i] == '.' || any.val[i] == 'e' || any.val[i] == 'E' || any.val[i] == '+' || any.val[i] == '-' { - endPos = i + 1 - continue - } - - // end position is the first char which is not digit - if any.val[i] >= '0' && any.val[i] <= '9' { - endPos = i + 1 - } else { - endPos = i - break - } - } - parsed, _ := strconv.ParseFloat(any.val[:endPos], 64) - return parsed -} - -func (any *stringAny) ToString() string { - return any.val -} - -func (any *stringAny) WriteTo(stream *Stream) { - stream.WriteString(any.val) -} - -func (any *stringAny) GetInterface() interface{} { - return any.val -} diff --git a/ui/wasm/vendor/github.com/json-iterator/go/any_uint32.go b/ui/wasm/vendor/github.com/json-iterator/go/any_uint32.go deleted file mode 100644 index 656bbd33d7e..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/any_uint32.go +++ /dev/null @@ -1,74 +0,0 @@ -package jsoniter - -import ( - "strconv" -) - -type uint32Any struct { - baseAny - val uint32 -} - -func (any *uint32Any) LastError() error { - return nil -} - -func (any *uint32Any) ValueType() ValueType { - return NumberValue -} - -func (any *uint32Any) MustBeValid() Any { - return any -} - -func (any *uint32Any) ToBool() bool { - return any.val != 0 -} - -func (any *uint32Any) ToInt() int { - return int(any.val) -} - -func (any *uint32Any) ToInt32() int32 { - return int32(any.val) -} - -func (any *uint32Any) ToInt64() int64 { - return int64(any.val) -} - -func (any *uint32Any) ToUint() uint { - return uint(any.val) -} - -func (any *uint32Any) ToUint32() uint32 { - return any.val -} - -func (any *uint32Any) ToUint64() uint64 { - return uint64(any.val) -} - -func (any *uint32Any) ToFloat32() float32 { - return float32(any.val) -} - -func (any *uint32Any) ToFloat64() float64 { - return float64(any.val) -} - -func (any *uint32Any) ToString() string { - return strconv.FormatInt(int64(any.val), 10) -} - -func (any *uint32Any) WriteTo(stream *Stream) { - stream.WriteUint32(any.val) -} - -func (any *uint32Any) Parse() *Iterator { - return nil -} - -func (any *uint32Any) GetInterface() interface{} { - return any.val -} diff --git a/ui/wasm/vendor/github.com/json-iterator/go/any_uint64.go b/ui/wasm/vendor/github.com/json-iterator/go/any_uint64.go deleted file mode 100644 index 7df2fce33ba..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/any_uint64.go +++ /dev/null @@ -1,74 +0,0 @@ -package jsoniter - -import ( - "strconv" -) - -type uint64Any struct { - baseAny - val uint64 -} - -func (any *uint64Any) LastError() error { - return nil -} - -func (any *uint64Any) ValueType() ValueType { - return NumberValue -} - -func (any *uint64Any) MustBeValid() Any { - return any -} - -func (any *uint64Any) ToBool() bool { - return any.val != 0 -} - -func (any *uint64Any) ToInt() int { - return int(any.val) -} - -func (any *uint64Any) ToInt32() int32 { - return int32(any.val) -} - -func (any *uint64Any) ToInt64() int64 { - return int64(any.val) -} - -func (any *uint64Any) ToUint() uint { - return uint(any.val) -} - -func (any *uint64Any) ToUint32() uint32 { - return uint32(any.val) -} - -func (any *uint64Any) ToUint64() uint64 { - return any.val -} - -func (any *uint64Any) ToFloat32() float32 { - return float32(any.val) -} - -func (any *uint64Any) ToFloat64() float64 { - return float64(any.val) -} - -func (any *uint64Any) ToString() string { - return strconv.FormatUint(any.val, 10) -} - -func (any *uint64Any) WriteTo(stream *Stream) { - stream.WriteUint64(any.val) -} - -func (any *uint64Any) Parse() *Iterator { - return nil -} - -func (any *uint64Any) GetInterface() interface{} { - return any.val -} diff --git a/ui/wasm/vendor/github.com/json-iterator/go/build.sh b/ui/wasm/vendor/github.com/json-iterator/go/build.sh deleted file mode 100644 index b45ef688313..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/build.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -set -e -set -x - -if [ ! -d /tmp/build-golang/src/github.com/json-iterator ]; then - mkdir -p /tmp/build-golang/src/github.com/json-iterator - ln -s $PWD /tmp/build-golang/src/github.com/json-iterator/go -fi -export GOPATH=/tmp/build-golang -go get -u github.com/golang/dep/cmd/dep -cd /tmp/build-golang/src/github.com/json-iterator/go -exec $GOPATH/bin/dep ensure -update diff --git a/ui/wasm/vendor/github.com/json-iterator/go/config.go b/ui/wasm/vendor/github.com/json-iterator/go/config.go deleted file mode 100644 index 2adcdc3b790..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/config.go +++ /dev/null @@ -1,375 +0,0 @@ -package jsoniter - -import ( - "encoding/json" - "io" - "reflect" - "sync" - "unsafe" - - "github.com/modern-go/concurrent" - "github.com/modern-go/reflect2" -) - -// Config customize how the API should behave. -// The API is created from Config by Froze. -type Config struct { - IndentionStep int - MarshalFloatWith6Digits bool - EscapeHTML bool - SortMapKeys bool - UseNumber bool - DisallowUnknownFields bool - TagKey string - OnlyTaggedField bool - ValidateJsonRawMessage bool - ObjectFieldMustBeSimpleString bool - CaseSensitive bool -} - -// API the public interface of this package. -// Primary Marshal and Unmarshal. -type API interface { - IteratorPool - StreamPool - MarshalToString(v interface{}) (string, error) - Marshal(v interface{}) ([]byte, error) - MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) - UnmarshalFromString(str string, v interface{}) error - Unmarshal(data []byte, v interface{}) error - Get(data []byte, path ...interface{}) Any - NewEncoder(writer io.Writer) *Encoder - NewDecoder(reader io.Reader) *Decoder - Valid(data []byte) bool - RegisterExtension(extension Extension) - DecoderOf(typ reflect2.Type) ValDecoder - EncoderOf(typ reflect2.Type) ValEncoder -} - -// ConfigDefault the default API -var ConfigDefault = Config{ - EscapeHTML: true, -}.Froze() - -// ConfigCompatibleWithStandardLibrary tries to be 100% compatible with standard library behavior -var ConfigCompatibleWithStandardLibrary = Config{ - EscapeHTML: true, - SortMapKeys: true, - ValidateJsonRawMessage: true, -}.Froze() - -// ConfigFastest marshals float with only 6 digits precision -var ConfigFastest = Config{ - EscapeHTML: false, - MarshalFloatWith6Digits: true, // will lose precession - ObjectFieldMustBeSimpleString: true, // do not unescape object field -}.Froze() - -type frozenConfig struct { - configBeforeFrozen Config - sortMapKeys bool - indentionStep int - objectFieldMustBeSimpleString bool - onlyTaggedField bool - disallowUnknownFields bool - decoderCache *concurrent.Map - encoderCache *concurrent.Map - encoderExtension Extension - decoderExtension Extension - extraExtensions []Extension - streamPool *sync.Pool - iteratorPool *sync.Pool - caseSensitive bool -} - -func (cfg *frozenConfig) initCache() { - cfg.decoderCache = concurrent.NewMap() - cfg.encoderCache = concurrent.NewMap() -} - -func (cfg *frozenConfig) addDecoderToCache(cacheKey uintptr, decoder ValDecoder) { - cfg.decoderCache.Store(cacheKey, decoder) -} - -func (cfg *frozenConfig) addEncoderToCache(cacheKey uintptr, encoder ValEncoder) { - cfg.encoderCache.Store(cacheKey, encoder) -} - -func (cfg *frozenConfig) getDecoderFromCache(cacheKey uintptr) ValDecoder { - decoder, found := cfg.decoderCache.Load(cacheKey) - if found { - return decoder.(ValDecoder) - } - return nil -} - -func (cfg *frozenConfig) getEncoderFromCache(cacheKey uintptr) ValEncoder { - encoder, found := cfg.encoderCache.Load(cacheKey) - if found { - return encoder.(ValEncoder) - } - return nil -} - -var cfgCache = concurrent.NewMap() - -func getFrozenConfigFromCache(cfg Config) *frozenConfig { - obj, found := cfgCache.Load(cfg) - if found { - return obj.(*frozenConfig) - } - return nil -} - -func addFrozenConfigToCache(cfg Config, frozenConfig *frozenConfig) { - cfgCache.Store(cfg, frozenConfig) -} - -// Froze forge API from config -func (cfg Config) Froze() API { - api := &frozenConfig{ - sortMapKeys: cfg.SortMapKeys, - indentionStep: cfg.IndentionStep, - objectFieldMustBeSimpleString: cfg.ObjectFieldMustBeSimpleString, - onlyTaggedField: cfg.OnlyTaggedField, - disallowUnknownFields: cfg.DisallowUnknownFields, - caseSensitive: cfg.CaseSensitive, - } - api.streamPool = &sync.Pool{ - New: func() interface{} { - return NewStream(api, nil, 512) - }, - } - api.iteratorPool = &sync.Pool{ - New: func() interface{} { - return NewIterator(api) - }, - } - api.initCache() - encoderExtension := EncoderExtension{} - decoderExtension := DecoderExtension{} - if cfg.MarshalFloatWith6Digits { - api.marshalFloatWith6Digits(encoderExtension) - } - if cfg.EscapeHTML { - api.escapeHTML(encoderExtension) - } - if cfg.UseNumber { - api.useNumber(decoderExtension) - } - if cfg.ValidateJsonRawMessage { - api.validateJsonRawMessage(encoderExtension) - } - api.encoderExtension = encoderExtension - api.decoderExtension = decoderExtension - api.configBeforeFrozen = cfg - return api -} - -func (cfg Config) frozeWithCacheReuse(extraExtensions []Extension) *frozenConfig { - api := getFrozenConfigFromCache(cfg) - if api != nil { - return api - } - api = cfg.Froze().(*frozenConfig) - for _, extension := range extraExtensions { - api.RegisterExtension(extension) - } - addFrozenConfigToCache(cfg, api) - return api -} - -func (cfg *frozenConfig) validateJsonRawMessage(extension EncoderExtension) { - encoder := &funcEncoder{func(ptr unsafe.Pointer, stream *Stream) { - rawMessage := *(*json.RawMessage)(ptr) - iter := cfg.BorrowIterator([]byte(rawMessage)) - defer cfg.ReturnIterator(iter) - iter.Read() - if iter.Error != nil && iter.Error != io.EOF { - stream.WriteRaw("null") - } else { - stream.WriteRaw(string(rawMessage)) - } - }, func(ptr unsafe.Pointer) bool { - return len(*((*json.RawMessage)(ptr))) == 0 - }} - extension[reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem()] = encoder - extension[reflect2.TypeOfPtr((*RawMessage)(nil)).Elem()] = encoder -} - -func (cfg *frozenConfig) useNumber(extension DecoderExtension) { - extension[reflect2.TypeOfPtr((*interface{})(nil)).Elem()] = &funcDecoder{func(ptr unsafe.Pointer, iter *Iterator) { - exitingValue := *((*interface{})(ptr)) - if exitingValue != nil && reflect.TypeOf(exitingValue).Kind() == reflect.Ptr { - iter.ReadVal(exitingValue) - return - } - if iter.WhatIsNext() == NumberValue { - *((*interface{})(ptr)) = json.Number(iter.readNumberAsString()) - } else { - *((*interface{})(ptr)) = iter.Read() - } - }} -} -func (cfg *frozenConfig) getTagKey() string { - tagKey := cfg.configBeforeFrozen.TagKey - if tagKey == "" { - return "json" - } - return tagKey -} - -func (cfg *frozenConfig) RegisterExtension(extension Extension) { - cfg.extraExtensions = append(cfg.extraExtensions, extension) - copied := cfg.configBeforeFrozen - cfg.configBeforeFrozen = copied -} - -type lossyFloat32Encoder struct { -} - -func (encoder *lossyFloat32Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteFloat32Lossy(*((*float32)(ptr))) -} - -func (encoder *lossyFloat32Encoder) IsEmpty(ptr unsafe.Pointer) bool { - return *((*float32)(ptr)) == 0 -} - -type lossyFloat64Encoder struct { -} - -func (encoder *lossyFloat64Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteFloat64Lossy(*((*float64)(ptr))) -} - -func (encoder *lossyFloat64Encoder) IsEmpty(ptr unsafe.Pointer) bool { - return *((*float64)(ptr)) == 0 -} - -// EnableLossyFloatMarshalling keeps 10**(-6) precision -// for float variables for better performance. -func (cfg *frozenConfig) marshalFloatWith6Digits(extension EncoderExtension) { - // for better performance - extension[reflect2.TypeOfPtr((*float32)(nil)).Elem()] = &lossyFloat32Encoder{} - extension[reflect2.TypeOfPtr((*float64)(nil)).Elem()] = &lossyFloat64Encoder{} -} - -type htmlEscapedStringEncoder struct { -} - -func (encoder *htmlEscapedStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - str := *((*string)(ptr)) - stream.WriteStringWithHTMLEscaped(str) -} - -func (encoder *htmlEscapedStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return *((*string)(ptr)) == "" -} - -func (cfg *frozenConfig) escapeHTML(encoderExtension EncoderExtension) { - encoderExtension[reflect2.TypeOfPtr((*string)(nil)).Elem()] = &htmlEscapedStringEncoder{} -} - -func (cfg *frozenConfig) cleanDecoders() { - typeDecoders = map[string]ValDecoder{} - fieldDecoders = map[string]ValDecoder{} - *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) -} - -func (cfg *frozenConfig) cleanEncoders() { - typeEncoders = map[string]ValEncoder{} - fieldEncoders = map[string]ValEncoder{} - *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) -} - -func (cfg *frozenConfig) MarshalToString(v interface{}) (string, error) { - stream := cfg.BorrowStream(nil) - defer cfg.ReturnStream(stream) - stream.WriteVal(v) - if stream.Error != nil { - return "", stream.Error - } - return string(stream.Buffer()), nil -} - -func (cfg *frozenConfig) Marshal(v interface{}) ([]byte, error) { - stream := cfg.BorrowStream(nil) - defer cfg.ReturnStream(stream) - stream.WriteVal(v) - if stream.Error != nil { - return nil, stream.Error - } - result := stream.Buffer() - copied := make([]byte, len(result)) - copy(copied, result) - return copied, nil -} - -func (cfg *frozenConfig) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { - if prefix != "" { - panic("prefix is not supported") - } - for _, r := range indent { - if r != ' ' { - panic("indent can only be space") - } - } - newCfg := cfg.configBeforeFrozen - newCfg.IndentionStep = len(indent) - return newCfg.frozeWithCacheReuse(cfg.extraExtensions).Marshal(v) -} - -func (cfg *frozenConfig) UnmarshalFromString(str string, v interface{}) error { - data := []byte(str) - iter := cfg.BorrowIterator(data) - defer cfg.ReturnIterator(iter) - iter.ReadVal(v) - c := iter.nextToken() - if c == 0 { - if iter.Error == io.EOF { - return nil - } - return iter.Error - } - iter.ReportError("Unmarshal", "there are bytes left after unmarshal") - return iter.Error -} - -func (cfg *frozenConfig) Get(data []byte, path ...interface{}) Any { - iter := cfg.BorrowIterator(data) - defer cfg.ReturnIterator(iter) - return locatePath(iter, path) -} - -func (cfg *frozenConfig) Unmarshal(data []byte, v interface{}) error { - iter := cfg.BorrowIterator(data) - defer cfg.ReturnIterator(iter) - iter.ReadVal(v) - c := iter.nextToken() - if c == 0 { - if iter.Error == io.EOF { - return nil - } - return iter.Error - } - iter.ReportError("Unmarshal", "there are bytes left after unmarshal") - return iter.Error -} - -func (cfg *frozenConfig) NewEncoder(writer io.Writer) *Encoder { - stream := NewStream(cfg, writer, 512) - return &Encoder{stream} -} - -func (cfg *frozenConfig) NewDecoder(reader io.Reader) *Decoder { - iter := Parse(cfg, reader, 512) - return &Decoder{iter} -} - -func (cfg *frozenConfig) Valid(data []byte) bool { - iter := cfg.BorrowIterator(data) - defer cfg.ReturnIterator(iter) - iter.Skip() - return iter.Error == nil -} diff --git a/ui/wasm/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md b/ui/wasm/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md deleted file mode 100644 index 3095662b061..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md +++ /dev/null @@ -1,7 +0,0 @@ -| json type \ dest type | bool | int | uint | float |string| -| --- | --- | --- | --- |--|--| -| number | positive => true
    negative => true
    zero => false| 23.2 => 23
    -32.1 => -32| 12.1 => 12
    -12.1 => 0|as normal|same as origin| -| string | empty string => false
    string "0" => false
    other strings => true | "123.32" => 123
    "-123.4" => -123
    "123.23xxxw" => 123
    "abcde12" => 0
    "-32.1" => -32| 13.2 => 13
    -1.1 => 0 |12.1 => 12.1
    -12.3 => -12.3
    12.4xxa => 12.4
    +1.1e2 =>110 |same as origin| -| bool | true => true
    false => false| true => 1
    false => 0 | true => 1
    false => 0 |true => 1
    false => 0|true => "true"
    false => "false"| -| object | true | 0 | 0 |0|originnal json| -| array | empty array => false
    nonempty array => true| [] => 0
    [1,2] => 1 | [] => 0
    [1,2] => 1 |[] => 0
    [1,2] => 1|original json| \ No newline at end of file diff --git a/ui/wasm/vendor/github.com/json-iterator/go/iter.go b/ui/wasm/vendor/github.com/json-iterator/go/iter.go deleted file mode 100644 index 29b31cf7895..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/iter.go +++ /dev/null @@ -1,349 +0,0 @@ -package jsoniter - -import ( - "encoding/json" - "fmt" - "io" -) - -// ValueType the type for JSON element -type ValueType int - -const ( - // InvalidValue invalid JSON element - InvalidValue ValueType = iota - // StringValue JSON element "string" - StringValue - // NumberValue JSON element 100 or 0.10 - NumberValue - // NilValue JSON element null - NilValue - // BoolValue JSON element true or false - BoolValue - // ArrayValue JSON element [] - ArrayValue - // ObjectValue JSON element {} - ObjectValue -) - -var hexDigits []byte -var valueTypes []ValueType - -func init() { - hexDigits = make([]byte, 256) - for i := 0; i < len(hexDigits); i++ { - hexDigits[i] = 255 - } - for i := '0'; i <= '9'; i++ { - hexDigits[i] = byte(i - '0') - } - for i := 'a'; i <= 'f'; i++ { - hexDigits[i] = byte((i - 'a') + 10) - } - for i := 'A'; i <= 'F'; i++ { - hexDigits[i] = byte((i - 'A') + 10) - } - valueTypes = make([]ValueType, 256) - for i := 0; i < len(valueTypes); i++ { - valueTypes[i] = InvalidValue - } - valueTypes['"'] = StringValue - valueTypes['-'] = NumberValue - valueTypes['0'] = NumberValue - valueTypes['1'] = NumberValue - valueTypes['2'] = NumberValue - valueTypes['3'] = NumberValue - valueTypes['4'] = NumberValue - valueTypes['5'] = NumberValue - valueTypes['6'] = NumberValue - valueTypes['7'] = NumberValue - valueTypes['8'] = NumberValue - valueTypes['9'] = NumberValue - valueTypes['t'] = BoolValue - valueTypes['f'] = BoolValue - valueTypes['n'] = NilValue - valueTypes['['] = ArrayValue - valueTypes['{'] = ObjectValue -} - -// Iterator is a io.Reader like object, with JSON specific read functions. -// Error is not returned as return value, but stored as Error member on this iterator instance. -type Iterator struct { - cfg *frozenConfig - reader io.Reader - buf []byte - head int - tail int - depth int - captureStartedAt int - captured []byte - Error error - Attachment interface{} // open for customized decoder -} - -// NewIterator creates an empty Iterator instance -func NewIterator(cfg API) *Iterator { - return &Iterator{ - cfg: cfg.(*frozenConfig), - reader: nil, - buf: nil, - head: 0, - tail: 0, - depth: 0, - } -} - -// Parse creates an Iterator instance from io.Reader -func Parse(cfg API, reader io.Reader, bufSize int) *Iterator { - return &Iterator{ - cfg: cfg.(*frozenConfig), - reader: reader, - buf: make([]byte, bufSize), - head: 0, - tail: 0, - depth: 0, - } -} - -// ParseBytes creates an Iterator instance from byte array -func ParseBytes(cfg API, input []byte) *Iterator { - return &Iterator{ - cfg: cfg.(*frozenConfig), - reader: nil, - buf: input, - head: 0, - tail: len(input), - depth: 0, - } -} - -// ParseString creates an Iterator instance from string -func ParseString(cfg API, input string) *Iterator { - return ParseBytes(cfg, []byte(input)) -} - -// Pool returns a pool can provide more iterator with same configuration -func (iter *Iterator) Pool() IteratorPool { - return iter.cfg -} - -// Reset reuse iterator instance by specifying another reader -func (iter *Iterator) Reset(reader io.Reader) *Iterator { - iter.reader = reader - iter.head = 0 - iter.tail = 0 - iter.depth = 0 - return iter -} - -// ResetBytes reuse iterator instance by specifying another byte array as input -func (iter *Iterator) ResetBytes(input []byte) *Iterator { - iter.reader = nil - iter.buf = input - iter.head = 0 - iter.tail = len(input) - iter.depth = 0 - return iter -} - -// WhatIsNext gets ValueType of relatively next json element -func (iter *Iterator) WhatIsNext() ValueType { - valueType := valueTypes[iter.nextToken()] - iter.unreadByte() - return valueType -} - -func (iter *Iterator) skipWhitespacesWithoutLoadMore() bool { - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - switch c { - case ' ', '\n', '\t', '\r': - continue - } - iter.head = i - return false - } - return true -} - -func (iter *Iterator) isObjectEnd() bool { - c := iter.nextToken() - if c == ',' { - return false - } - if c == '}' { - return true - } - iter.ReportError("isObjectEnd", "object ended prematurely, unexpected char "+string([]byte{c})) - return true -} - -func (iter *Iterator) nextToken() byte { - // a variation of skip whitespaces, returning the next non-whitespace token - for { - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - switch c { - case ' ', '\n', '\t', '\r': - continue - } - iter.head = i + 1 - return c - } - if !iter.loadMore() { - return 0 - } - } -} - -// ReportError record a error in iterator instance with current position. -func (iter *Iterator) ReportError(operation string, msg string) { - if iter.Error != nil { - if iter.Error != io.EOF { - return - } - } - peekStart := iter.head - 10 - if peekStart < 0 { - peekStart = 0 - } - peekEnd := iter.head + 10 - if peekEnd > iter.tail { - peekEnd = iter.tail - } - parsing := string(iter.buf[peekStart:peekEnd]) - contextStart := iter.head - 50 - if contextStart < 0 { - contextStart = 0 - } - contextEnd := iter.head + 50 - if contextEnd > iter.tail { - contextEnd = iter.tail - } - context := string(iter.buf[contextStart:contextEnd]) - iter.Error = fmt.Errorf("%s: %s, error found in #%v byte of ...|%s|..., bigger context ...|%s|...", - operation, msg, iter.head-peekStart, parsing, context) -} - -// CurrentBuffer gets current buffer as string for debugging purpose -func (iter *Iterator) CurrentBuffer() string { - peekStart := iter.head - 10 - if peekStart < 0 { - peekStart = 0 - } - return fmt.Sprintf("parsing #%v byte, around ...|%s|..., whole buffer ...|%s|...", iter.head, - string(iter.buf[peekStart:iter.head]), string(iter.buf[0:iter.tail])) -} - -func (iter *Iterator) readByte() (ret byte) { - if iter.head == iter.tail { - if iter.loadMore() { - ret = iter.buf[iter.head] - iter.head++ - return ret - } - return 0 - } - ret = iter.buf[iter.head] - iter.head++ - return ret -} - -func (iter *Iterator) loadMore() bool { - if iter.reader == nil { - if iter.Error == nil { - iter.head = iter.tail - iter.Error = io.EOF - } - return false - } - if iter.captured != nil { - iter.captured = append(iter.captured, - iter.buf[iter.captureStartedAt:iter.tail]...) - iter.captureStartedAt = 0 - } - for { - n, err := iter.reader.Read(iter.buf) - if n == 0 { - if err != nil { - if iter.Error == nil { - iter.Error = err - } - return false - } - } else { - iter.head = 0 - iter.tail = n - return true - } - } -} - -func (iter *Iterator) unreadByte() { - if iter.Error != nil { - return - } - iter.head-- - return -} - -// Read read the next JSON element as generic interface{}. -func (iter *Iterator) Read() interface{} { - valueType := iter.WhatIsNext() - switch valueType { - case StringValue: - return iter.ReadString() - case NumberValue: - if iter.cfg.configBeforeFrozen.UseNumber { - return json.Number(iter.readNumberAsString()) - } - return iter.ReadFloat64() - case NilValue: - iter.skipFourBytes('n', 'u', 'l', 'l') - return nil - case BoolValue: - return iter.ReadBool() - case ArrayValue: - arr := []interface{}{} - iter.ReadArrayCB(func(iter *Iterator) bool { - var elem interface{} - iter.ReadVal(&elem) - arr = append(arr, elem) - return true - }) - return arr - case ObjectValue: - obj := map[string]interface{}{} - iter.ReadMapCB(func(Iter *Iterator, field string) bool { - var elem interface{} - iter.ReadVal(&elem) - obj[field] = elem - return true - }) - return obj - default: - iter.ReportError("Read", fmt.Sprintf("unexpected value type: %v", valueType)) - return nil - } -} - -// limit maximum depth of nesting, as allowed by https://tools.ietf.org/html/rfc7159#section-9 -const maxDepth = 10000 - -func (iter *Iterator) incrementDepth() (success bool) { - iter.depth++ - if iter.depth <= maxDepth { - return true - } - iter.ReportError("incrementDepth", "exceeded max depth") - return false -} - -func (iter *Iterator) decrementDepth() (success bool) { - iter.depth-- - if iter.depth >= 0 { - return true - } - iter.ReportError("decrementDepth", "unexpected negative nesting") - return false -} diff --git a/ui/wasm/vendor/github.com/json-iterator/go/iter_array.go b/ui/wasm/vendor/github.com/json-iterator/go/iter_array.go deleted file mode 100644 index 204fe0e0922..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/iter_array.go +++ /dev/null @@ -1,64 +0,0 @@ -package jsoniter - -// ReadArray read array element, tells if the array has more element to read. -func (iter *Iterator) ReadArray() (ret bool) { - c := iter.nextToken() - switch c { - case 'n': - iter.skipThreeBytes('u', 'l', 'l') - return false // null - case '[': - c = iter.nextToken() - if c != ']' { - iter.unreadByte() - return true - } - return false - case ']': - return false - case ',': - return true - default: - iter.ReportError("ReadArray", "expect [ or , or ] or n, but found "+string([]byte{c})) - return - } -} - -// ReadArrayCB read array with callback -func (iter *Iterator) ReadArrayCB(callback func(*Iterator) bool) (ret bool) { - c := iter.nextToken() - if c == '[' { - if !iter.incrementDepth() { - return false - } - c = iter.nextToken() - if c != ']' { - iter.unreadByte() - if !callback(iter) { - iter.decrementDepth() - return false - } - c = iter.nextToken() - for c == ',' { - if !callback(iter) { - iter.decrementDepth() - return false - } - c = iter.nextToken() - } - if c != ']' { - iter.ReportError("ReadArrayCB", "expect ] in the end, but found "+string([]byte{c})) - iter.decrementDepth() - return false - } - return iter.decrementDepth() - } - return iter.decrementDepth() - } - if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') - return true // null - } - iter.ReportError("ReadArrayCB", "expect [ or n, but found "+string([]byte{c})) - return false -} diff --git a/ui/wasm/vendor/github.com/json-iterator/go/iter_float.go b/ui/wasm/vendor/github.com/json-iterator/go/iter_float.go deleted file mode 100644 index 8a3d8b6fb43..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/iter_float.go +++ /dev/null @@ -1,342 +0,0 @@ -package jsoniter - -import ( - "encoding/json" - "io" - "math/big" - "strconv" - "strings" - "unsafe" -) - -var floatDigits []int8 - -const invalidCharForNumber = int8(-1) -const endOfNumber = int8(-2) -const dotInNumber = int8(-3) - -func init() { - floatDigits = make([]int8, 256) - for i := 0; i < len(floatDigits); i++ { - floatDigits[i] = invalidCharForNumber - } - for i := int8('0'); i <= int8('9'); i++ { - floatDigits[i] = i - int8('0') - } - floatDigits[','] = endOfNumber - floatDigits[']'] = endOfNumber - floatDigits['}'] = endOfNumber - floatDigits[' '] = endOfNumber - floatDigits['\t'] = endOfNumber - floatDigits['\n'] = endOfNumber - floatDigits['.'] = dotInNumber -} - -// ReadBigFloat read big.Float -func (iter *Iterator) ReadBigFloat() (ret *big.Float) { - str := iter.readNumberAsString() - if iter.Error != nil && iter.Error != io.EOF { - return nil - } - prec := 64 - if len(str) > prec { - prec = len(str) - } - val, _, err := big.ParseFloat(str, 10, uint(prec), big.ToZero) - if err != nil { - iter.Error = err - return nil - } - return val -} - -// ReadBigInt read big.Int -func (iter *Iterator) ReadBigInt() (ret *big.Int) { - str := iter.readNumberAsString() - if iter.Error != nil && iter.Error != io.EOF { - return nil - } - ret = big.NewInt(0) - var success bool - ret, success = ret.SetString(str, 10) - if !success { - iter.ReportError("ReadBigInt", "invalid big int") - return nil - } - return ret -} - -//ReadFloat32 read float32 -func (iter *Iterator) ReadFloat32() (ret float32) { - c := iter.nextToken() - if c == '-' { - return -iter.readPositiveFloat32() - } - iter.unreadByte() - return iter.readPositiveFloat32() -} - -func (iter *Iterator) readPositiveFloat32() (ret float32) { - i := iter.head - // first char - if i == iter.tail { - return iter.readFloat32SlowPath() - } - c := iter.buf[i] - i++ - ind := floatDigits[c] - switch ind { - case invalidCharForNumber: - return iter.readFloat32SlowPath() - case endOfNumber: - iter.ReportError("readFloat32", "empty number") - return - case dotInNumber: - iter.ReportError("readFloat32", "leading dot is invalid") - return - case 0: - if i == iter.tail { - return iter.readFloat32SlowPath() - } - c = iter.buf[i] - switch c { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - iter.ReportError("readFloat32", "leading zero is invalid") - return - } - } - value := uint64(ind) - // chars before dot -non_decimal_loop: - for ; i < iter.tail; i++ { - c = iter.buf[i] - ind := floatDigits[c] - switch ind { - case invalidCharForNumber: - return iter.readFloat32SlowPath() - case endOfNumber: - iter.head = i - return float32(value) - case dotInNumber: - break non_decimal_loop - } - if value > uint64SafeToMultiple10 { - return iter.readFloat32SlowPath() - } - value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind; - } - // chars after dot - if c == '.' { - i++ - decimalPlaces := 0 - if i == iter.tail { - return iter.readFloat32SlowPath() - } - for ; i < iter.tail; i++ { - c = iter.buf[i] - ind := floatDigits[c] - switch ind { - case endOfNumber: - if decimalPlaces > 0 && decimalPlaces < len(pow10) { - iter.head = i - return float32(float64(value) / float64(pow10[decimalPlaces])) - } - // too many decimal places - return iter.readFloat32SlowPath() - case invalidCharForNumber, dotInNumber: - return iter.readFloat32SlowPath() - } - decimalPlaces++ - if value > uint64SafeToMultiple10 { - return iter.readFloat32SlowPath() - } - value = (value << 3) + (value << 1) + uint64(ind) - } - } - return iter.readFloat32SlowPath() -} - -func (iter *Iterator) readNumberAsString() (ret string) { - strBuf := [16]byte{} - str := strBuf[0:0] -load_loop: - for { - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - switch c { - case '+', '-', '.', 'e', 'E', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - str = append(str, c) - continue - default: - iter.head = i - break load_loop - } - } - if !iter.loadMore() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF { - return - } - if len(str) == 0 { - iter.ReportError("readNumberAsString", "invalid number") - } - return *(*string)(unsafe.Pointer(&str)) -} - -func (iter *Iterator) readFloat32SlowPath() (ret float32) { - str := iter.readNumberAsString() - if iter.Error != nil && iter.Error != io.EOF { - return - } - errMsg := validateFloat(str) - if errMsg != "" { - iter.ReportError("readFloat32SlowPath", errMsg) - return - } - val, err := strconv.ParseFloat(str, 32) - if err != nil { - iter.Error = err - return - } - return float32(val) -} - -// ReadFloat64 read float64 -func (iter *Iterator) ReadFloat64() (ret float64) { - c := iter.nextToken() - if c == '-' { - return -iter.readPositiveFloat64() - } - iter.unreadByte() - return iter.readPositiveFloat64() -} - -func (iter *Iterator) readPositiveFloat64() (ret float64) { - i := iter.head - // first char - if i == iter.tail { - return iter.readFloat64SlowPath() - } - c := iter.buf[i] - i++ - ind := floatDigits[c] - switch ind { - case invalidCharForNumber: - return iter.readFloat64SlowPath() - case endOfNumber: - iter.ReportError("readFloat64", "empty number") - return - case dotInNumber: - iter.ReportError("readFloat64", "leading dot is invalid") - return - case 0: - if i == iter.tail { - return iter.readFloat64SlowPath() - } - c = iter.buf[i] - switch c { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - iter.ReportError("readFloat64", "leading zero is invalid") - return - } - } - value := uint64(ind) - // chars before dot -non_decimal_loop: - for ; i < iter.tail; i++ { - c = iter.buf[i] - ind := floatDigits[c] - switch ind { - case invalidCharForNumber: - return iter.readFloat64SlowPath() - case endOfNumber: - iter.head = i - return float64(value) - case dotInNumber: - break non_decimal_loop - } - if value > uint64SafeToMultiple10 { - return iter.readFloat64SlowPath() - } - value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind; - } - // chars after dot - if c == '.' { - i++ - decimalPlaces := 0 - if i == iter.tail { - return iter.readFloat64SlowPath() - } - for ; i < iter.tail; i++ { - c = iter.buf[i] - ind := floatDigits[c] - switch ind { - case endOfNumber: - if decimalPlaces > 0 && decimalPlaces < len(pow10) { - iter.head = i - return float64(value) / float64(pow10[decimalPlaces]) - } - // too many decimal places - return iter.readFloat64SlowPath() - case invalidCharForNumber, dotInNumber: - return iter.readFloat64SlowPath() - } - decimalPlaces++ - if value > uint64SafeToMultiple10 { - return iter.readFloat64SlowPath() - } - value = (value << 3) + (value << 1) + uint64(ind) - if value > maxFloat64 { - return iter.readFloat64SlowPath() - } - } - } - return iter.readFloat64SlowPath() -} - -func (iter *Iterator) readFloat64SlowPath() (ret float64) { - str := iter.readNumberAsString() - if iter.Error != nil && iter.Error != io.EOF { - return - } - errMsg := validateFloat(str) - if errMsg != "" { - iter.ReportError("readFloat64SlowPath", errMsg) - return - } - val, err := strconv.ParseFloat(str, 64) - if err != nil { - iter.Error = err - return - } - return val -} - -func validateFloat(str string) string { - // strconv.ParseFloat is not validating `1.` or `1.e1` - if len(str) == 0 { - return "empty number" - } - if str[0] == '-' { - return "-- is not valid" - } - dotPos := strings.IndexByte(str, '.') - if dotPos != -1 { - if dotPos == len(str)-1 { - return "dot can not be last character" - } - switch str[dotPos+1] { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - default: - return "missing digit after dot" - } - } - return "" -} - -// ReadNumber read json.Number -func (iter *Iterator) ReadNumber() (ret json.Number) { - return json.Number(iter.readNumberAsString()) -} diff --git a/ui/wasm/vendor/github.com/json-iterator/go/iter_int.go b/ui/wasm/vendor/github.com/json-iterator/go/iter_int.go deleted file mode 100644 index d786a89fe1a..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/iter_int.go +++ /dev/null @@ -1,346 +0,0 @@ -package jsoniter - -import ( - "math" - "strconv" -) - -var intDigits []int8 - -const uint32SafeToMultiply10 = uint32(0xffffffff)/10 - 1 -const uint64SafeToMultiple10 = uint64(0xffffffffffffffff)/10 - 1 -const maxFloat64 = 1<<53 - 1 - -func init() { - intDigits = make([]int8, 256) - for i := 0; i < len(intDigits); i++ { - intDigits[i] = invalidCharForNumber - } - for i := int8('0'); i <= int8('9'); i++ { - intDigits[i] = i - int8('0') - } -} - -// ReadUint read uint -func (iter *Iterator) ReadUint() uint { - if strconv.IntSize == 32 { - return uint(iter.ReadUint32()) - } - return uint(iter.ReadUint64()) -} - -// ReadInt read int -func (iter *Iterator) ReadInt() int { - if strconv.IntSize == 32 { - return int(iter.ReadInt32()) - } - return int(iter.ReadInt64()) -} - -// ReadInt8 read int8 -func (iter *Iterator) ReadInt8() (ret int8) { - c := iter.nextToken() - if c == '-' { - val := iter.readUint32(iter.readByte()) - if val > math.MaxInt8+1 { - iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return -int8(val) - } - val := iter.readUint32(c) - if val > math.MaxInt8 { - iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return int8(val) -} - -// ReadUint8 read uint8 -func (iter *Iterator) ReadUint8() (ret uint8) { - val := iter.readUint32(iter.nextToken()) - if val > math.MaxUint8 { - iter.ReportError("ReadUint8", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return uint8(val) -} - -// ReadInt16 read int16 -func (iter *Iterator) ReadInt16() (ret int16) { - c := iter.nextToken() - if c == '-' { - val := iter.readUint32(iter.readByte()) - if val > math.MaxInt16+1 { - iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return -int16(val) - } - val := iter.readUint32(c) - if val > math.MaxInt16 { - iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return int16(val) -} - -// ReadUint16 read uint16 -func (iter *Iterator) ReadUint16() (ret uint16) { - val := iter.readUint32(iter.nextToken()) - if val > math.MaxUint16 { - iter.ReportError("ReadUint16", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return uint16(val) -} - -// ReadInt32 read int32 -func (iter *Iterator) ReadInt32() (ret int32) { - c := iter.nextToken() - if c == '-' { - val := iter.readUint32(iter.readByte()) - if val > math.MaxInt32+1 { - iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return -int32(val) - } - val := iter.readUint32(c) - if val > math.MaxInt32 { - iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10)) - return - } - return int32(val) -} - -// ReadUint32 read uint32 -func (iter *Iterator) ReadUint32() (ret uint32) { - return iter.readUint32(iter.nextToken()) -} - -func (iter *Iterator) readUint32(c byte) (ret uint32) { - ind := intDigits[c] - if ind == 0 { - iter.assertInteger() - return 0 // single zero - } - if ind == invalidCharForNumber { - iter.ReportError("readUint32", "unexpected character: "+string([]byte{byte(ind)})) - return - } - value := uint32(ind) - if iter.tail-iter.head > 10 { - i := iter.head - ind2 := intDigits[iter.buf[i]] - if ind2 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value - } - i++ - ind3 := intDigits[iter.buf[i]] - if ind3 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*10 + uint32(ind2) - } - //iter.head = i + 1 - //value = value * 100 + uint32(ind2) * 10 + uint32(ind3) - i++ - ind4 := intDigits[iter.buf[i]] - if ind4 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*100 + uint32(ind2)*10 + uint32(ind3) - } - i++ - ind5 := intDigits[iter.buf[i]] - if ind5 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*1000 + uint32(ind2)*100 + uint32(ind3)*10 + uint32(ind4) - } - i++ - ind6 := intDigits[iter.buf[i]] - if ind6 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*10000 + uint32(ind2)*1000 + uint32(ind3)*100 + uint32(ind4)*10 + uint32(ind5) - } - i++ - ind7 := intDigits[iter.buf[i]] - if ind7 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*100000 + uint32(ind2)*10000 + uint32(ind3)*1000 + uint32(ind4)*100 + uint32(ind5)*10 + uint32(ind6) - } - i++ - ind8 := intDigits[iter.buf[i]] - if ind8 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*1000000 + uint32(ind2)*100000 + uint32(ind3)*10000 + uint32(ind4)*1000 + uint32(ind5)*100 + uint32(ind6)*10 + uint32(ind7) - } - i++ - ind9 := intDigits[iter.buf[i]] - value = value*10000000 + uint32(ind2)*1000000 + uint32(ind3)*100000 + uint32(ind4)*10000 + uint32(ind5)*1000 + uint32(ind6)*100 + uint32(ind7)*10 + uint32(ind8) - iter.head = i - if ind9 == invalidCharForNumber { - iter.assertInteger() - return value - } - } - for { - for i := iter.head; i < iter.tail; i++ { - ind = intDigits[iter.buf[i]] - if ind == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value - } - if value > uint32SafeToMultiply10 { - value2 := (value << 3) + (value << 1) + uint32(ind) - if value2 < value { - iter.ReportError("readUint32", "overflow") - return - } - value = value2 - continue - } - value = (value << 3) + (value << 1) + uint32(ind) - } - if !iter.loadMore() { - iter.assertInteger() - return value - } - } -} - -// ReadInt64 read int64 -func (iter *Iterator) ReadInt64() (ret int64) { - c := iter.nextToken() - if c == '-' { - val := iter.readUint64(iter.readByte()) - if val > math.MaxInt64+1 { - iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10)) - return - } - return -int64(val) - } - val := iter.readUint64(c) - if val > math.MaxInt64 { - iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10)) - return - } - return int64(val) -} - -// ReadUint64 read uint64 -func (iter *Iterator) ReadUint64() uint64 { - return iter.readUint64(iter.nextToken()) -} - -func (iter *Iterator) readUint64(c byte) (ret uint64) { - ind := intDigits[c] - if ind == 0 { - iter.assertInteger() - return 0 // single zero - } - if ind == invalidCharForNumber { - iter.ReportError("readUint64", "unexpected character: "+string([]byte{byte(ind)})) - return - } - value := uint64(ind) - if iter.tail-iter.head > 10 { - i := iter.head - ind2 := intDigits[iter.buf[i]] - if ind2 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value - } - i++ - ind3 := intDigits[iter.buf[i]] - if ind3 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*10 + uint64(ind2) - } - //iter.head = i + 1 - //value = value * 100 + uint32(ind2) * 10 + uint32(ind3) - i++ - ind4 := intDigits[iter.buf[i]] - if ind4 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*100 + uint64(ind2)*10 + uint64(ind3) - } - i++ - ind5 := intDigits[iter.buf[i]] - if ind5 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*1000 + uint64(ind2)*100 + uint64(ind3)*10 + uint64(ind4) - } - i++ - ind6 := intDigits[iter.buf[i]] - if ind6 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*10000 + uint64(ind2)*1000 + uint64(ind3)*100 + uint64(ind4)*10 + uint64(ind5) - } - i++ - ind7 := intDigits[iter.buf[i]] - if ind7 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*100000 + uint64(ind2)*10000 + uint64(ind3)*1000 + uint64(ind4)*100 + uint64(ind5)*10 + uint64(ind6) - } - i++ - ind8 := intDigits[iter.buf[i]] - if ind8 == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value*1000000 + uint64(ind2)*100000 + uint64(ind3)*10000 + uint64(ind4)*1000 + uint64(ind5)*100 + uint64(ind6)*10 + uint64(ind7) - } - i++ - ind9 := intDigits[iter.buf[i]] - value = value*10000000 + uint64(ind2)*1000000 + uint64(ind3)*100000 + uint64(ind4)*10000 + uint64(ind5)*1000 + uint64(ind6)*100 + uint64(ind7)*10 + uint64(ind8) - iter.head = i - if ind9 == invalidCharForNumber { - iter.assertInteger() - return value - } - } - for { - for i := iter.head; i < iter.tail; i++ { - ind = intDigits[iter.buf[i]] - if ind == invalidCharForNumber { - iter.head = i - iter.assertInteger() - return value - } - if value > uint64SafeToMultiple10 { - value2 := (value << 3) + (value << 1) + uint64(ind) - if value2 < value { - iter.ReportError("readUint64", "overflow") - return - } - value = value2 - continue - } - value = (value << 3) + (value << 1) + uint64(ind) - } - if !iter.loadMore() { - iter.assertInteger() - return value - } - } -} - -func (iter *Iterator) assertInteger() { - if iter.head < iter.tail && iter.buf[iter.head] == '.' { - iter.ReportError("assertInteger", "can not decode float as int") - } -} diff --git a/ui/wasm/vendor/github.com/json-iterator/go/iter_object.go b/ui/wasm/vendor/github.com/json-iterator/go/iter_object.go deleted file mode 100644 index 58ee89c849e..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/iter_object.go +++ /dev/null @@ -1,267 +0,0 @@ -package jsoniter - -import ( - "fmt" - "strings" -) - -// ReadObject read one field from object. -// If object ended, returns empty string. -// Otherwise, returns the field name. -func (iter *Iterator) ReadObject() (ret string) { - c := iter.nextToken() - switch c { - case 'n': - iter.skipThreeBytes('u', 'l', 'l') - return "" // null - case '{': - c = iter.nextToken() - if c == '"' { - iter.unreadByte() - field := iter.ReadString() - c = iter.nextToken() - if c != ':' { - iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) - } - return field - } - if c == '}' { - return "" // end of object - } - iter.ReportError("ReadObject", `expect " after {, but found `+string([]byte{c})) - return - case ',': - field := iter.ReadString() - c = iter.nextToken() - if c != ':' { - iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) - } - return field - case '}': - return "" // end of object - default: - iter.ReportError("ReadObject", fmt.Sprintf(`expect { or , or } or n, but found %s`, string([]byte{c}))) - return - } -} - -// CaseInsensitive -func (iter *Iterator) readFieldHash() int64 { - hash := int64(0x811c9dc5) - c := iter.nextToken() - if c != '"' { - iter.ReportError("readFieldHash", `expect ", but found `+string([]byte{c})) - return 0 - } - for { - for i := iter.head; i < iter.tail; i++ { - // require ascii string and no escape - b := iter.buf[i] - if b == '\\' { - iter.head = i - for _, b := range iter.readStringSlowPath() { - if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive { - b += 'a' - 'A' - } - hash ^= int64(b) - hash *= 0x1000193 - } - c = iter.nextToken() - if c != ':' { - iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c})) - return 0 - } - return hash - } - if b == '"' { - iter.head = i + 1 - c = iter.nextToken() - if c != ':' { - iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c})) - return 0 - } - return hash - } - if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive { - b += 'a' - 'A' - } - hash ^= int64(b) - hash *= 0x1000193 - } - if !iter.loadMore() { - iter.ReportError("readFieldHash", `incomplete field name`) - return 0 - } - } -} - -func calcHash(str string, caseSensitive bool) int64 { - if !caseSensitive { - str = strings.ToLower(str) - } - hash := int64(0x811c9dc5) - for _, b := range []byte(str) { - hash ^= int64(b) - hash *= 0x1000193 - } - return int64(hash) -} - -// ReadObjectCB read object with callback, the key is ascii only and field name not copied -func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { - c := iter.nextToken() - var field string - if c == '{' { - if !iter.incrementDepth() { - return false - } - c = iter.nextToken() - if c == '"' { - iter.unreadByte() - field = iter.ReadString() - c = iter.nextToken() - if c != ':' { - iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) - } - if !callback(iter, field) { - iter.decrementDepth() - return false - } - c = iter.nextToken() - for c == ',' { - field = iter.ReadString() - c = iter.nextToken() - if c != ':' { - iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) - } - if !callback(iter, field) { - iter.decrementDepth() - return false - } - c = iter.nextToken() - } - if c != '}' { - iter.ReportError("ReadObjectCB", `object not ended with }`) - iter.decrementDepth() - return false - } - return iter.decrementDepth() - } - if c == '}' { - return iter.decrementDepth() - } - iter.ReportError("ReadObjectCB", `expect " after {, but found `+string([]byte{c})) - iter.decrementDepth() - return false - } - if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') - return true // null - } - iter.ReportError("ReadObjectCB", `expect { or n, but found `+string([]byte{c})) - return false -} - -// ReadMapCB read map with callback, the key can be any string -func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool { - c := iter.nextToken() - if c == '{' { - if !iter.incrementDepth() { - return false - } - c = iter.nextToken() - if c == '"' { - iter.unreadByte() - field := iter.ReadString() - if iter.nextToken() != ':' { - iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) - iter.decrementDepth() - return false - } - if !callback(iter, field) { - iter.decrementDepth() - return false - } - c = iter.nextToken() - for c == ',' { - field = iter.ReadString() - if iter.nextToken() != ':' { - iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) - iter.decrementDepth() - return false - } - if !callback(iter, field) { - iter.decrementDepth() - return false - } - c = iter.nextToken() - } - if c != '}' { - iter.ReportError("ReadMapCB", `object not ended with }`) - iter.decrementDepth() - return false - } - return iter.decrementDepth() - } - if c == '}' { - return iter.decrementDepth() - } - iter.ReportError("ReadMapCB", `expect " after {, but found `+string([]byte{c})) - iter.decrementDepth() - return false - } - if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') - return true // null - } - iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c})) - return false -} - -func (iter *Iterator) readObjectStart() bool { - c := iter.nextToken() - if c == '{' { - c = iter.nextToken() - if c == '}' { - return false - } - iter.unreadByte() - return true - } else if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') - return false - } - iter.ReportError("readObjectStart", "expect { or n, but found "+string([]byte{c})) - return false -} - -func (iter *Iterator) readObjectFieldAsBytes() (ret []byte) { - str := iter.ReadStringAsSlice() - if iter.skipWhitespacesWithoutLoadMore() { - if ret == nil { - ret = make([]byte, len(str)) - copy(ret, str) - } - if !iter.loadMore() { - return - } - } - if iter.buf[iter.head] != ':' { - iter.ReportError("readObjectFieldAsBytes", "expect : after object field, but found "+string([]byte{iter.buf[iter.head]})) - return - } - iter.head++ - if iter.skipWhitespacesWithoutLoadMore() { - if ret == nil { - ret = make([]byte, len(str)) - copy(ret, str) - } - if !iter.loadMore() { - return - } - } - if ret == nil { - return str - } - return ret -} diff --git a/ui/wasm/vendor/github.com/json-iterator/go/iter_skip.go b/ui/wasm/vendor/github.com/json-iterator/go/iter_skip.go deleted file mode 100644 index e91eefb15be..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/iter_skip.go +++ /dev/null @@ -1,130 +0,0 @@ -package jsoniter - -import "fmt" - -// ReadNil reads a json object as nil and -// returns whether it's a nil or not -func (iter *Iterator) ReadNil() (ret bool) { - c := iter.nextToken() - if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') // null - return true - } - iter.unreadByte() - return false -} - -// ReadBool reads a json object as BoolValue -func (iter *Iterator) ReadBool() (ret bool) { - c := iter.nextToken() - if c == 't' { - iter.skipThreeBytes('r', 'u', 'e') - return true - } - if c == 'f' { - iter.skipFourBytes('a', 'l', 's', 'e') - return false - } - iter.ReportError("ReadBool", "expect t or f, but found "+string([]byte{c})) - return -} - -// SkipAndReturnBytes skip next JSON element, and return its content as []byte. -// The []byte can be kept, it is a copy of data. -func (iter *Iterator) SkipAndReturnBytes() []byte { - iter.startCapture(iter.head) - iter.Skip() - return iter.stopCapture() -} - -// SkipAndAppendBytes skips next JSON element and appends its content to -// buffer, returning the result. -func (iter *Iterator) SkipAndAppendBytes(buf []byte) []byte { - iter.startCaptureTo(buf, iter.head) - iter.Skip() - return iter.stopCapture() -} - -func (iter *Iterator) startCaptureTo(buf []byte, captureStartedAt int) { - if iter.captured != nil { - panic("already in capture mode") - } - iter.captureStartedAt = captureStartedAt - iter.captured = buf -} - -func (iter *Iterator) startCapture(captureStartedAt int) { - iter.startCaptureTo(make([]byte, 0, 32), captureStartedAt) -} - -func (iter *Iterator) stopCapture() []byte { - if iter.captured == nil { - panic("not in capture mode") - } - captured := iter.captured - remaining := iter.buf[iter.captureStartedAt:iter.head] - iter.captureStartedAt = -1 - iter.captured = nil - return append(captured, remaining...) -} - -// Skip skips a json object and positions to relatively the next json object -func (iter *Iterator) Skip() { - c := iter.nextToken() - switch c { - case '"': - iter.skipString() - case 'n': - iter.skipThreeBytes('u', 'l', 'l') // null - case 't': - iter.skipThreeBytes('r', 'u', 'e') // true - case 'f': - iter.skipFourBytes('a', 'l', 's', 'e') // false - case '0': - iter.unreadByte() - iter.ReadFloat32() - case '-', '1', '2', '3', '4', '5', '6', '7', '8', '9': - iter.skipNumber() - case '[': - iter.skipArray() - case '{': - iter.skipObject() - default: - iter.ReportError("Skip", fmt.Sprintf("do not know how to skip: %v", c)) - return - } -} - -func (iter *Iterator) skipFourBytes(b1, b2, b3, b4 byte) { - if iter.readByte() != b1 { - iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) - return - } - if iter.readByte() != b2 { - iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) - return - } - if iter.readByte() != b3 { - iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) - return - } - if iter.readByte() != b4 { - iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) - return - } -} - -func (iter *Iterator) skipThreeBytes(b1, b2, b3 byte) { - if iter.readByte() != b1 { - iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) - return - } - if iter.readByte() != b2 { - iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) - return - } - if iter.readByte() != b3 { - iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) - return - } -} diff --git a/ui/wasm/vendor/github.com/json-iterator/go/iter_skip_sloppy.go b/ui/wasm/vendor/github.com/json-iterator/go/iter_skip_sloppy.go deleted file mode 100644 index 9303de41e40..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/iter_skip_sloppy.go +++ /dev/null @@ -1,163 +0,0 @@ -//+build jsoniter_sloppy - -package jsoniter - -// sloppy but faster implementation, do not validate the input json - -func (iter *Iterator) skipNumber() { - for { - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - switch c { - case ' ', '\n', '\r', '\t', ',', '}', ']': - iter.head = i - return - } - } - if !iter.loadMore() { - return - } - } -} - -func (iter *Iterator) skipArray() { - level := 1 - if !iter.incrementDepth() { - return - } - for { - for i := iter.head; i < iter.tail; i++ { - switch iter.buf[i] { - case '"': // If inside string, skip it - iter.head = i + 1 - iter.skipString() - i = iter.head - 1 // it will be i++ soon - case '[': // If open symbol, increase level - level++ - if !iter.incrementDepth() { - return - } - case ']': // If close symbol, increase level - level-- - if !iter.decrementDepth() { - return - } - - // If we have returned to the original level, we're done - if level == 0 { - iter.head = i + 1 - return - } - } - } - if !iter.loadMore() { - iter.ReportError("skipObject", "incomplete array") - return - } - } -} - -func (iter *Iterator) skipObject() { - level := 1 - if !iter.incrementDepth() { - return - } - - for { - for i := iter.head; i < iter.tail; i++ { - switch iter.buf[i] { - case '"': // If inside string, skip it - iter.head = i + 1 - iter.skipString() - i = iter.head - 1 // it will be i++ soon - case '{': // If open symbol, increase level - level++ - if !iter.incrementDepth() { - return - } - case '}': // If close symbol, increase level - level-- - if !iter.decrementDepth() { - return - } - - // If we have returned to the original level, we're done - if level == 0 { - iter.head = i + 1 - return - } - } - } - if !iter.loadMore() { - iter.ReportError("skipObject", "incomplete object") - return - } - } -} - -func (iter *Iterator) skipString() { - for { - end, escaped := iter.findStringEnd() - if end == -1 { - if !iter.loadMore() { - iter.ReportError("skipString", "incomplete string") - return - } - if escaped { - iter.head = 1 // skip the first char as last char read is \ - } - } else { - iter.head = end - return - } - } -} - -// adapted from: https://github.com/buger/jsonparser/blob/master/parser.go -// Tries to find the end of string -// Support if string contains escaped quote symbols. -func (iter *Iterator) findStringEnd() (int, bool) { - escaped := false - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - if c == '"' { - if !escaped { - return i + 1, false - } - j := i - 1 - for { - if j < iter.head || iter.buf[j] != '\\' { - // even number of backslashes - // either end of buffer, or " found - return i + 1, true - } - j-- - if j < iter.head || iter.buf[j] != '\\' { - // odd number of backslashes - // it is \" or \\\" - break - } - j-- - } - } else if c == '\\' { - escaped = true - } - } - j := iter.tail - 1 - for { - if j < iter.head || iter.buf[j] != '\\' { - // even number of backslashes - // either end of buffer, or " found - return -1, false // do not end with \ - } - j-- - if j < iter.head || iter.buf[j] != '\\' { - // odd number of backslashes - // it is \" or \\\" - break - } - j-- - - } - return -1, true // end with \ -} diff --git a/ui/wasm/vendor/github.com/json-iterator/go/iter_skip_strict.go b/ui/wasm/vendor/github.com/json-iterator/go/iter_skip_strict.go deleted file mode 100644 index 6cf66d0438d..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/iter_skip_strict.go +++ /dev/null @@ -1,99 +0,0 @@ -//+build !jsoniter_sloppy - -package jsoniter - -import ( - "fmt" - "io" -) - -func (iter *Iterator) skipNumber() { - if !iter.trySkipNumber() { - iter.unreadByte() - if iter.Error != nil && iter.Error != io.EOF { - return - } - iter.ReadFloat64() - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = nil - iter.ReadBigFloat() - } - } -} - -func (iter *Iterator) trySkipNumber() bool { - dotFound := false - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - switch c { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - case '.': - if dotFound { - iter.ReportError("validateNumber", `more than one dot found in number`) - return true // already failed - } - if i+1 == iter.tail { - return false - } - c = iter.buf[i+1] - switch c { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - default: - iter.ReportError("validateNumber", `missing digit after dot`) - return true // already failed - } - dotFound = true - default: - switch c { - case ',', ']', '}', ' ', '\t', '\n', '\r': - if iter.head == i { - return false // if - without following digits - } - iter.head = i - return true // must be valid - } - return false // may be invalid - } - } - return false -} - -func (iter *Iterator) skipString() { - if !iter.trySkipString() { - iter.unreadByte() - iter.ReadString() - } -} - -func (iter *Iterator) trySkipString() bool { - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - if c == '"' { - iter.head = i + 1 - return true // valid - } else if c == '\\' { - return false - } else if c < ' ' { - iter.ReportError("trySkipString", - fmt.Sprintf(`invalid control character found: %d`, c)) - return true // already failed - } - } - return false -} - -func (iter *Iterator) skipObject() { - iter.unreadByte() - iter.ReadObjectCB(func(iter *Iterator, field string) bool { - iter.Skip() - return true - }) -} - -func (iter *Iterator) skipArray() { - iter.unreadByte() - iter.ReadArrayCB(func(iter *Iterator) bool { - iter.Skip() - return true - }) -} diff --git a/ui/wasm/vendor/github.com/json-iterator/go/iter_str.go b/ui/wasm/vendor/github.com/json-iterator/go/iter_str.go deleted file mode 100644 index adc487ea804..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/iter_str.go +++ /dev/null @@ -1,215 +0,0 @@ -package jsoniter - -import ( - "fmt" - "unicode/utf16" -) - -// ReadString read string from iterator -func (iter *Iterator) ReadString() (ret string) { - c := iter.nextToken() - if c == '"' { - for i := iter.head; i < iter.tail; i++ { - c := iter.buf[i] - if c == '"' { - ret = string(iter.buf[iter.head:i]) - iter.head = i + 1 - return ret - } else if c == '\\' { - break - } else if c < ' ' { - iter.ReportError("ReadString", - fmt.Sprintf(`invalid control character found: %d`, c)) - return - } - } - return iter.readStringSlowPath() - } else if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') - return "" - } - iter.ReportError("ReadString", `expects " or n, but found `+string([]byte{c})) - return -} - -func (iter *Iterator) readStringSlowPath() (ret string) { - var str []byte - var c byte - for iter.Error == nil { - c = iter.readByte() - if c == '"' { - return string(str) - } - if c == '\\' { - c = iter.readByte() - str = iter.readEscapedChar(c, str) - } else { - str = append(str, c) - } - } - iter.ReportError("readStringSlowPath", "unexpected end of input") - return -} - -func (iter *Iterator) readEscapedChar(c byte, str []byte) []byte { - switch c { - case 'u': - r := iter.readU4() - if utf16.IsSurrogate(r) { - c = iter.readByte() - if iter.Error != nil { - return nil - } - if c != '\\' { - iter.unreadByte() - str = appendRune(str, r) - return str - } - c = iter.readByte() - if iter.Error != nil { - return nil - } - if c != 'u' { - str = appendRune(str, r) - return iter.readEscapedChar(c, str) - } - r2 := iter.readU4() - if iter.Error != nil { - return nil - } - combined := utf16.DecodeRune(r, r2) - if combined == '\uFFFD' { - str = appendRune(str, r) - str = appendRune(str, r2) - } else { - str = appendRune(str, combined) - } - } else { - str = appendRune(str, r) - } - case '"': - str = append(str, '"') - case '\\': - str = append(str, '\\') - case '/': - str = append(str, '/') - case 'b': - str = append(str, '\b') - case 'f': - str = append(str, '\f') - case 'n': - str = append(str, '\n') - case 'r': - str = append(str, '\r') - case 't': - str = append(str, '\t') - default: - iter.ReportError("readEscapedChar", - `invalid escape char after \`) - return nil - } - return str -} - -// ReadStringAsSlice read string from iterator without copying into string form. -// The []byte can not be kept, as it will change after next iterator call. -func (iter *Iterator) ReadStringAsSlice() (ret []byte) { - c := iter.nextToken() - if c == '"' { - for i := iter.head; i < iter.tail; i++ { - // require ascii string and no escape - // for: field name, base64, number - if iter.buf[i] == '"' { - // fast path: reuse the underlying buffer - ret = iter.buf[iter.head:i] - iter.head = i + 1 - return ret - } - } - readLen := iter.tail - iter.head - copied := make([]byte, readLen, readLen*2) - copy(copied, iter.buf[iter.head:iter.tail]) - iter.head = iter.tail - for iter.Error == nil { - c := iter.readByte() - if c == '"' { - return copied - } - copied = append(copied, c) - } - return copied - } - iter.ReportError("ReadStringAsSlice", `expects " or n, but found `+string([]byte{c})) - return -} - -func (iter *Iterator) readU4() (ret rune) { - for i := 0; i < 4; i++ { - c := iter.readByte() - if iter.Error != nil { - return - } - if c >= '0' && c <= '9' { - ret = ret*16 + rune(c-'0') - } else if c >= 'a' && c <= 'f' { - ret = ret*16 + rune(c-'a'+10) - } else if c >= 'A' && c <= 'F' { - ret = ret*16 + rune(c-'A'+10) - } else { - iter.ReportError("readU4", "expects 0~9 or a~f, but found "+string([]byte{c})) - return - } - } - return ret -} - -const ( - t1 = 0x00 // 0000 0000 - tx = 0x80 // 1000 0000 - t2 = 0xC0 // 1100 0000 - t3 = 0xE0 // 1110 0000 - t4 = 0xF0 // 1111 0000 - t5 = 0xF8 // 1111 1000 - - maskx = 0x3F // 0011 1111 - mask2 = 0x1F // 0001 1111 - mask3 = 0x0F // 0000 1111 - mask4 = 0x07 // 0000 0111 - - rune1Max = 1<<7 - 1 - rune2Max = 1<<11 - 1 - rune3Max = 1<<16 - 1 - - surrogateMin = 0xD800 - surrogateMax = 0xDFFF - - maxRune = '\U0010FFFF' // Maximum valid Unicode code point. - runeError = '\uFFFD' // the "error" Rune or "Unicode replacement character" -) - -func appendRune(p []byte, r rune) []byte { - // Negative values are erroneous. Making it unsigned addresses the problem. - switch i := uint32(r); { - case i <= rune1Max: - p = append(p, byte(r)) - return p - case i <= rune2Max: - p = append(p, t2|byte(r>>6)) - p = append(p, tx|byte(r)&maskx) - return p - case i > maxRune, surrogateMin <= i && i <= surrogateMax: - r = runeError - fallthrough - case i <= rune3Max: - p = append(p, t3|byte(r>>12)) - p = append(p, tx|byte(r>>6)&maskx) - p = append(p, tx|byte(r)&maskx) - return p - default: - p = append(p, t4|byte(r>>18)) - p = append(p, tx|byte(r>>12)&maskx) - p = append(p, tx|byte(r>>6)&maskx) - p = append(p, tx|byte(r)&maskx) - return p - } -} diff --git a/ui/wasm/vendor/github.com/json-iterator/go/jsoniter.go b/ui/wasm/vendor/github.com/json-iterator/go/jsoniter.go deleted file mode 100644 index c2934f916eb..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/jsoniter.go +++ /dev/null @@ -1,18 +0,0 @@ -// Package jsoniter implements encoding and decoding of JSON as defined in -// RFC 4627 and provides interfaces with identical syntax of standard lib encoding/json. -// Converting from encoding/json to jsoniter is no more than replacing the package with jsoniter -// and variable type declarations (if any). -// jsoniter interfaces gives 100% compatibility with code using standard lib. -// -// "JSON and Go" -// (https://golang.org/doc/articles/json_and_go.html) -// gives a description of how Marshal/Unmarshal operate -// between arbitrary or predefined json objects and bytes, -// and it applies to jsoniter.Marshal/Unmarshal as well. -// -// Besides, jsoniter.Iterator provides a different set of interfaces -// iterating given bytes/string/reader -// and yielding parsed elements one by one. -// This set of interfaces reads input as required and gives -// better performance. -package jsoniter diff --git a/ui/wasm/vendor/github.com/json-iterator/go/pool.go b/ui/wasm/vendor/github.com/json-iterator/go/pool.go deleted file mode 100644 index e2389b56cff..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/pool.go +++ /dev/null @@ -1,42 +0,0 @@ -package jsoniter - -import ( - "io" -) - -// IteratorPool a thread safe pool of iterators with same configuration -type IteratorPool interface { - BorrowIterator(data []byte) *Iterator - ReturnIterator(iter *Iterator) -} - -// StreamPool a thread safe pool of streams with same configuration -type StreamPool interface { - BorrowStream(writer io.Writer) *Stream - ReturnStream(stream *Stream) -} - -func (cfg *frozenConfig) BorrowStream(writer io.Writer) *Stream { - stream := cfg.streamPool.Get().(*Stream) - stream.Reset(writer) - return stream -} - -func (cfg *frozenConfig) ReturnStream(stream *Stream) { - stream.out = nil - stream.Error = nil - stream.Attachment = nil - cfg.streamPool.Put(stream) -} - -func (cfg *frozenConfig) BorrowIterator(data []byte) *Iterator { - iter := cfg.iteratorPool.Get().(*Iterator) - iter.ResetBytes(data) - return iter -} - -func (cfg *frozenConfig) ReturnIterator(iter *Iterator) { - iter.Error = nil - iter.Attachment = nil - cfg.iteratorPool.Put(iter) -} diff --git a/ui/wasm/vendor/github.com/json-iterator/go/reflect.go b/ui/wasm/vendor/github.com/json-iterator/go/reflect.go deleted file mode 100644 index 39acb320ace..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/reflect.go +++ /dev/null @@ -1,337 +0,0 @@ -package jsoniter - -import ( - "fmt" - "reflect" - "unsafe" - - "github.com/modern-go/reflect2" -) - -// ValDecoder is an internal type registered to cache as needed. -// Don't confuse jsoniter.ValDecoder with json.Decoder. -// For json.Decoder's adapter, refer to jsoniter.AdapterDecoder(todo link). -// -// Reflection on type to create decoders, which is then cached -// Reflection on value is avoided as we can, as the reflect.Value itself will allocate, with following exceptions -// 1. create instance of new value, for example *int will need a int to be allocated -// 2. append to slice, if the existing cap is not enough, allocate will be done using Reflect.New -// 3. assignment to map, both key and value will be reflect.Value -// For a simple struct binding, it will be reflect.Value free and allocation free -type ValDecoder interface { - Decode(ptr unsafe.Pointer, iter *Iterator) -} - -// ValEncoder is an internal type registered to cache as needed. -// Don't confuse jsoniter.ValEncoder with json.Encoder. -// For json.Encoder's adapter, refer to jsoniter.AdapterEncoder(todo godoc link). -type ValEncoder interface { - IsEmpty(ptr unsafe.Pointer) bool - Encode(ptr unsafe.Pointer, stream *Stream) -} - -type checkIsEmpty interface { - IsEmpty(ptr unsafe.Pointer) bool -} - -type ctx struct { - *frozenConfig - prefix string - encoders map[reflect2.Type]ValEncoder - decoders map[reflect2.Type]ValDecoder -} - -func (b *ctx) caseSensitive() bool { - if b.frozenConfig == nil { - // default is case-insensitive - return false - } - return b.frozenConfig.caseSensitive -} - -func (b *ctx) append(prefix string) *ctx { - return &ctx{ - frozenConfig: b.frozenConfig, - prefix: b.prefix + " " + prefix, - encoders: b.encoders, - decoders: b.decoders, - } -} - -// ReadVal copy the underlying JSON into go interface, same as json.Unmarshal -func (iter *Iterator) ReadVal(obj interface{}) { - depth := iter.depth - cacheKey := reflect2.RTypeOf(obj) - decoder := iter.cfg.getDecoderFromCache(cacheKey) - if decoder == nil { - typ := reflect2.TypeOf(obj) - if typ == nil || typ.Kind() != reflect.Ptr { - iter.ReportError("ReadVal", "can only unmarshal into pointer") - return - } - decoder = iter.cfg.DecoderOf(typ) - } - ptr := reflect2.PtrOf(obj) - if ptr == nil { - iter.ReportError("ReadVal", "can not read into nil pointer") - return - } - decoder.Decode(ptr, iter) - if iter.depth != depth { - iter.ReportError("ReadVal", "unexpected mismatched nesting") - return - } -} - -// WriteVal copy the go interface into underlying JSON, same as json.Marshal -func (stream *Stream) WriteVal(val interface{}) { - if nil == val { - stream.WriteNil() - return - } - cacheKey := reflect2.RTypeOf(val) - encoder := stream.cfg.getEncoderFromCache(cacheKey) - if encoder == nil { - typ := reflect2.TypeOf(val) - encoder = stream.cfg.EncoderOf(typ) - } - encoder.Encode(reflect2.PtrOf(val), stream) -} - -func (cfg *frozenConfig) DecoderOf(typ reflect2.Type) ValDecoder { - cacheKey := typ.RType() - decoder := cfg.getDecoderFromCache(cacheKey) - if decoder != nil { - return decoder - } - ctx := &ctx{ - frozenConfig: cfg, - prefix: "", - decoders: map[reflect2.Type]ValDecoder{}, - encoders: map[reflect2.Type]ValEncoder{}, - } - ptrType := typ.(*reflect2.UnsafePtrType) - decoder = decoderOfType(ctx, ptrType.Elem()) - cfg.addDecoderToCache(cacheKey, decoder) - return decoder -} - -func decoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { - decoder := getTypeDecoderFromExtension(ctx, typ) - if decoder != nil { - return decoder - } - decoder = createDecoderOfType(ctx, typ) - for _, extension := range extensions { - decoder = extension.DecorateDecoder(typ, decoder) - } - decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder) - for _, extension := range ctx.extraExtensions { - decoder = extension.DecorateDecoder(typ, decoder) - } - return decoder -} - -func createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { - decoder := ctx.decoders[typ] - if decoder != nil { - return decoder - } - placeholder := &placeholderDecoder{} - ctx.decoders[typ] = placeholder - decoder = _createDecoderOfType(ctx, typ) - placeholder.decoder = decoder - return decoder -} - -func _createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { - decoder := createDecoderOfJsonRawMessage(ctx, typ) - if decoder != nil { - return decoder - } - decoder = createDecoderOfJsonNumber(ctx, typ) - if decoder != nil { - return decoder - } - decoder = createDecoderOfMarshaler(ctx, typ) - if decoder != nil { - return decoder - } - decoder = createDecoderOfAny(ctx, typ) - if decoder != nil { - return decoder - } - decoder = createDecoderOfNative(ctx, typ) - if decoder != nil { - return decoder - } - switch typ.Kind() { - case reflect.Interface: - ifaceType, isIFace := typ.(*reflect2.UnsafeIFaceType) - if isIFace { - return &ifaceDecoder{valType: ifaceType} - } - return &efaceDecoder{} - case reflect.Struct: - return decoderOfStruct(ctx, typ) - case reflect.Array: - return decoderOfArray(ctx, typ) - case reflect.Slice: - return decoderOfSlice(ctx, typ) - case reflect.Map: - return decoderOfMap(ctx, typ) - case reflect.Ptr: - return decoderOfOptional(ctx, typ) - default: - return &lazyErrorDecoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())} - } -} - -func (cfg *frozenConfig) EncoderOf(typ reflect2.Type) ValEncoder { - cacheKey := typ.RType() - encoder := cfg.getEncoderFromCache(cacheKey) - if encoder != nil { - return encoder - } - ctx := &ctx{ - frozenConfig: cfg, - prefix: "", - decoders: map[reflect2.Type]ValDecoder{}, - encoders: map[reflect2.Type]ValEncoder{}, - } - encoder = encoderOfType(ctx, typ) - if typ.LikePtr() { - encoder = &onePtrEncoder{encoder} - } - cfg.addEncoderToCache(cacheKey, encoder) - return encoder -} - -type onePtrEncoder struct { - encoder ValEncoder -} - -func (encoder *onePtrEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr)) -} - -func (encoder *onePtrEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - encoder.encoder.Encode(unsafe.Pointer(&ptr), stream) -} - -func encoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { - encoder := getTypeEncoderFromExtension(ctx, typ) - if encoder != nil { - return encoder - } - encoder = createEncoderOfType(ctx, typ) - for _, extension := range extensions { - encoder = extension.DecorateEncoder(typ, encoder) - } - encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder) - for _, extension := range ctx.extraExtensions { - encoder = extension.DecorateEncoder(typ, encoder) - } - return encoder -} - -func createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { - encoder := ctx.encoders[typ] - if encoder != nil { - return encoder - } - placeholder := &placeholderEncoder{} - ctx.encoders[typ] = placeholder - encoder = _createEncoderOfType(ctx, typ) - placeholder.encoder = encoder - return encoder -} -func _createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { - encoder := createEncoderOfJsonRawMessage(ctx, typ) - if encoder != nil { - return encoder - } - encoder = createEncoderOfJsonNumber(ctx, typ) - if encoder != nil { - return encoder - } - encoder = createEncoderOfMarshaler(ctx, typ) - if encoder != nil { - return encoder - } - encoder = createEncoderOfAny(ctx, typ) - if encoder != nil { - return encoder - } - encoder = createEncoderOfNative(ctx, typ) - if encoder != nil { - return encoder - } - kind := typ.Kind() - switch kind { - case reflect.Interface: - return &dynamicEncoder{typ} - case reflect.Struct: - return encoderOfStruct(ctx, typ) - case reflect.Array: - return encoderOfArray(ctx, typ) - case reflect.Slice: - return encoderOfSlice(ctx, typ) - case reflect.Map: - return encoderOfMap(ctx, typ) - case reflect.Ptr: - return encoderOfOptional(ctx, typ) - default: - return &lazyErrorEncoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())} - } -} - -type lazyErrorDecoder struct { - err error -} - -func (decoder *lazyErrorDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if iter.WhatIsNext() != NilValue { - if iter.Error == nil { - iter.Error = decoder.err - } - } else { - iter.Skip() - } -} - -type lazyErrorEncoder struct { - err error -} - -func (encoder *lazyErrorEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - if ptr == nil { - stream.WriteNil() - } else if stream.Error == nil { - stream.Error = encoder.err - } -} - -func (encoder *lazyErrorEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return false -} - -type placeholderDecoder struct { - decoder ValDecoder -} - -func (decoder *placeholderDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - decoder.decoder.Decode(ptr, iter) -} - -type placeholderEncoder struct { - encoder ValEncoder -} - -func (encoder *placeholderEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - encoder.encoder.Encode(ptr, stream) -} - -func (encoder *placeholderEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.encoder.IsEmpty(ptr) -} diff --git a/ui/wasm/vendor/github.com/json-iterator/go/reflect_array.go b/ui/wasm/vendor/github.com/json-iterator/go/reflect_array.go deleted file mode 100644 index 13a0b7b0878..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/reflect_array.go +++ /dev/null @@ -1,104 +0,0 @@ -package jsoniter - -import ( - "fmt" - "github.com/modern-go/reflect2" - "io" - "unsafe" -) - -func decoderOfArray(ctx *ctx, typ reflect2.Type) ValDecoder { - arrayType := typ.(*reflect2.UnsafeArrayType) - decoder := decoderOfType(ctx.append("[arrayElem]"), arrayType.Elem()) - return &arrayDecoder{arrayType, decoder} -} - -func encoderOfArray(ctx *ctx, typ reflect2.Type) ValEncoder { - arrayType := typ.(*reflect2.UnsafeArrayType) - if arrayType.Len() == 0 { - return emptyArrayEncoder{} - } - encoder := encoderOfType(ctx.append("[arrayElem]"), arrayType.Elem()) - return &arrayEncoder{arrayType, encoder} -} - -type emptyArrayEncoder struct{} - -func (encoder emptyArrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteEmptyArray() -} - -func (encoder emptyArrayEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return true -} - -type arrayEncoder struct { - arrayType *reflect2.UnsafeArrayType - elemEncoder ValEncoder -} - -func (encoder *arrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteArrayStart() - elemPtr := unsafe.Pointer(ptr) - encoder.elemEncoder.Encode(elemPtr, stream) - for i := 1; i < encoder.arrayType.Len(); i++ { - stream.WriteMore() - elemPtr = encoder.arrayType.UnsafeGetIndex(ptr, i) - encoder.elemEncoder.Encode(elemPtr, stream) - } - stream.WriteArrayEnd() - if stream.Error != nil && stream.Error != io.EOF { - stream.Error = fmt.Errorf("%v: %s", encoder.arrayType, stream.Error.Error()) - } -} - -func (encoder *arrayEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return false -} - -type arrayDecoder struct { - arrayType *reflect2.UnsafeArrayType - elemDecoder ValDecoder -} - -func (decoder *arrayDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - decoder.doDecode(ptr, iter) - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.arrayType, iter.Error.Error()) - } -} - -func (decoder *arrayDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { - c := iter.nextToken() - arrayType := decoder.arrayType - if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') - return - } - if c != '[' { - iter.ReportError("decode array", "expect [ or n, but found "+string([]byte{c})) - return - } - c = iter.nextToken() - if c == ']' { - return - } - iter.unreadByte() - elemPtr := arrayType.UnsafeGetIndex(ptr, 0) - decoder.elemDecoder.Decode(elemPtr, iter) - length := 1 - for c = iter.nextToken(); c == ','; c = iter.nextToken() { - if length >= arrayType.Len() { - iter.Skip() - continue - } - idx := length - length += 1 - elemPtr = arrayType.UnsafeGetIndex(ptr, idx) - decoder.elemDecoder.Decode(elemPtr, iter) - } - if c != ']' { - iter.ReportError("decode array", "expect ], but found "+string([]byte{c})) - return - } -} diff --git a/ui/wasm/vendor/github.com/json-iterator/go/reflect_dynamic.go b/ui/wasm/vendor/github.com/json-iterator/go/reflect_dynamic.go deleted file mode 100644 index 8b6bc8b4332..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/reflect_dynamic.go +++ /dev/null @@ -1,70 +0,0 @@ -package jsoniter - -import ( - "github.com/modern-go/reflect2" - "reflect" - "unsafe" -) - -type dynamicEncoder struct { - valType reflect2.Type -} - -func (encoder *dynamicEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - obj := encoder.valType.UnsafeIndirect(ptr) - stream.WriteVal(obj) -} - -func (encoder *dynamicEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.valType.UnsafeIndirect(ptr) == nil -} - -type efaceDecoder struct { -} - -func (decoder *efaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - pObj := (*interface{})(ptr) - obj := *pObj - if obj == nil { - *pObj = iter.Read() - return - } - typ := reflect2.TypeOf(obj) - if typ.Kind() != reflect.Ptr { - *pObj = iter.Read() - return - } - ptrType := typ.(*reflect2.UnsafePtrType) - ptrElemType := ptrType.Elem() - if iter.WhatIsNext() == NilValue { - if ptrElemType.Kind() != reflect.Ptr { - iter.skipFourBytes('n', 'u', 'l', 'l') - *pObj = nil - return - } - } - if reflect2.IsNil(obj) { - obj := ptrElemType.New() - iter.ReadVal(obj) - *pObj = obj - return - } - iter.ReadVal(obj) -} - -type ifaceDecoder struct { - valType *reflect2.UnsafeIFaceType -} - -func (decoder *ifaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if iter.ReadNil() { - decoder.valType.UnsafeSet(ptr, decoder.valType.UnsafeNew()) - return - } - obj := decoder.valType.UnsafeIndirect(ptr) - if reflect2.IsNil(obj) { - iter.ReportError("decode non empty interface", "can not unmarshal into nil") - return - } - iter.ReadVal(obj) -} diff --git a/ui/wasm/vendor/github.com/json-iterator/go/reflect_extension.go b/ui/wasm/vendor/github.com/json-iterator/go/reflect_extension.go deleted file mode 100644 index 74a97bfe5ab..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/reflect_extension.go +++ /dev/null @@ -1,483 +0,0 @@ -package jsoniter - -import ( - "fmt" - "github.com/modern-go/reflect2" - "reflect" - "sort" - "strings" - "unicode" - "unsafe" -) - -var typeDecoders = map[string]ValDecoder{} -var fieldDecoders = map[string]ValDecoder{} -var typeEncoders = map[string]ValEncoder{} -var fieldEncoders = map[string]ValEncoder{} -var extensions = []Extension{} - -// StructDescriptor describe how should we encode/decode the struct -type StructDescriptor struct { - Type reflect2.Type - Fields []*Binding -} - -// GetField get one field from the descriptor by its name. -// Can not use map here to keep field orders. -func (structDescriptor *StructDescriptor) GetField(fieldName string) *Binding { - for _, binding := range structDescriptor.Fields { - if binding.Field.Name() == fieldName { - return binding - } - } - return nil -} - -// Binding describe how should we encode/decode the struct field -type Binding struct { - levels []int - Field reflect2.StructField - FromNames []string - ToNames []string - Encoder ValEncoder - Decoder ValDecoder -} - -// Extension the one for all SPI. Customize encoding/decoding by specifying alternate encoder/decoder. -// Can also rename fields by UpdateStructDescriptor. -type Extension interface { - UpdateStructDescriptor(structDescriptor *StructDescriptor) - CreateMapKeyDecoder(typ reflect2.Type) ValDecoder - CreateMapKeyEncoder(typ reflect2.Type) ValEncoder - CreateDecoder(typ reflect2.Type) ValDecoder - CreateEncoder(typ reflect2.Type) ValEncoder - DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder - DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder -} - -// DummyExtension embed this type get dummy implementation for all methods of Extension -type DummyExtension struct { -} - -// UpdateStructDescriptor No-op -func (extension *DummyExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { -} - -// CreateMapKeyDecoder No-op -func (extension *DummyExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { - return nil -} - -// CreateMapKeyEncoder No-op -func (extension *DummyExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { - return nil -} - -// CreateDecoder No-op -func (extension *DummyExtension) CreateDecoder(typ reflect2.Type) ValDecoder { - return nil -} - -// CreateEncoder No-op -func (extension *DummyExtension) CreateEncoder(typ reflect2.Type) ValEncoder { - return nil -} - -// DecorateDecoder No-op -func (extension *DummyExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { - return decoder -} - -// DecorateEncoder No-op -func (extension *DummyExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { - return encoder -} - -type EncoderExtension map[reflect2.Type]ValEncoder - -// UpdateStructDescriptor No-op -func (extension EncoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { -} - -// CreateDecoder No-op -func (extension EncoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder { - return nil -} - -// CreateEncoder get encoder from map -func (extension EncoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder { - return extension[typ] -} - -// CreateMapKeyDecoder No-op -func (extension EncoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { - return nil -} - -// CreateMapKeyEncoder No-op -func (extension EncoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { - return nil -} - -// DecorateDecoder No-op -func (extension EncoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { - return decoder -} - -// DecorateEncoder No-op -func (extension EncoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { - return encoder -} - -type DecoderExtension map[reflect2.Type]ValDecoder - -// UpdateStructDescriptor No-op -func (extension DecoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { -} - -// CreateMapKeyDecoder No-op -func (extension DecoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { - return nil -} - -// CreateMapKeyEncoder No-op -func (extension DecoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { - return nil -} - -// CreateDecoder get decoder from map -func (extension DecoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder { - return extension[typ] -} - -// CreateEncoder No-op -func (extension DecoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder { - return nil -} - -// DecorateDecoder No-op -func (extension DecoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { - return decoder -} - -// DecorateEncoder No-op -func (extension DecoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { - return encoder -} - -type funcDecoder struct { - fun DecoderFunc -} - -func (decoder *funcDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - decoder.fun(ptr, iter) -} - -type funcEncoder struct { - fun EncoderFunc - isEmptyFunc func(ptr unsafe.Pointer) bool -} - -func (encoder *funcEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - encoder.fun(ptr, stream) -} - -func (encoder *funcEncoder) IsEmpty(ptr unsafe.Pointer) bool { - if encoder.isEmptyFunc == nil { - return false - } - return encoder.isEmptyFunc(ptr) -} - -// DecoderFunc the function form of TypeDecoder -type DecoderFunc func(ptr unsafe.Pointer, iter *Iterator) - -// EncoderFunc the function form of TypeEncoder -type EncoderFunc func(ptr unsafe.Pointer, stream *Stream) - -// RegisterTypeDecoderFunc register TypeDecoder for a type with function -func RegisterTypeDecoderFunc(typ string, fun DecoderFunc) { - typeDecoders[typ] = &funcDecoder{fun} -} - -// RegisterTypeDecoder register TypeDecoder for a typ -func RegisterTypeDecoder(typ string, decoder ValDecoder) { - typeDecoders[typ] = decoder -} - -// RegisterFieldDecoderFunc register TypeDecoder for a struct field with function -func RegisterFieldDecoderFunc(typ string, field string, fun DecoderFunc) { - RegisterFieldDecoder(typ, field, &funcDecoder{fun}) -} - -// RegisterFieldDecoder register TypeDecoder for a struct field -func RegisterFieldDecoder(typ string, field string, decoder ValDecoder) { - fieldDecoders[fmt.Sprintf("%s/%s", typ, field)] = decoder -} - -// RegisterTypeEncoderFunc register TypeEncoder for a type with encode/isEmpty function -func RegisterTypeEncoderFunc(typ string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) { - typeEncoders[typ] = &funcEncoder{fun, isEmptyFunc} -} - -// RegisterTypeEncoder register TypeEncoder for a type -func RegisterTypeEncoder(typ string, encoder ValEncoder) { - typeEncoders[typ] = encoder -} - -// RegisterFieldEncoderFunc register TypeEncoder for a struct field with encode/isEmpty function -func RegisterFieldEncoderFunc(typ string, field string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) { - RegisterFieldEncoder(typ, field, &funcEncoder{fun, isEmptyFunc}) -} - -// RegisterFieldEncoder register TypeEncoder for a struct field -func RegisterFieldEncoder(typ string, field string, encoder ValEncoder) { - fieldEncoders[fmt.Sprintf("%s/%s", typ, field)] = encoder -} - -// RegisterExtension register extension -func RegisterExtension(extension Extension) { - extensions = append(extensions, extension) -} - -func getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder { - decoder := _getTypeDecoderFromExtension(ctx, typ) - if decoder != nil { - for _, extension := range extensions { - decoder = extension.DecorateDecoder(typ, decoder) - } - decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder) - for _, extension := range ctx.extraExtensions { - decoder = extension.DecorateDecoder(typ, decoder) - } - } - return decoder -} -func _getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder { - for _, extension := range extensions { - decoder := extension.CreateDecoder(typ) - if decoder != nil { - return decoder - } - } - decoder := ctx.decoderExtension.CreateDecoder(typ) - if decoder != nil { - return decoder - } - for _, extension := range ctx.extraExtensions { - decoder := extension.CreateDecoder(typ) - if decoder != nil { - return decoder - } - } - typeName := typ.String() - decoder = typeDecoders[typeName] - if decoder != nil { - return decoder - } - if typ.Kind() == reflect.Ptr { - ptrType := typ.(*reflect2.UnsafePtrType) - decoder := typeDecoders[ptrType.Elem().String()] - if decoder != nil { - return &OptionalDecoder{ptrType.Elem(), decoder} - } - } - return nil -} - -func getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder { - encoder := _getTypeEncoderFromExtension(ctx, typ) - if encoder != nil { - for _, extension := range extensions { - encoder = extension.DecorateEncoder(typ, encoder) - } - encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder) - for _, extension := range ctx.extraExtensions { - encoder = extension.DecorateEncoder(typ, encoder) - } - } - return encoder -} - -func _getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder { - for _, extension := range extensions { - encoder := extension.CreateEncoder(typ) - if encoder != nil { - return encoder - } - } - encoder := ctx.encoderExtension.CreateEncoder(typ) - if encoder != nil { - return encoder - } - for _, extension := range ctx.extraExtensions { - encoder := extension.CreateEncoder(typ) - if encoder != nil { - return encoder - } - } - typeName := typ.String() - encoder = typeEncoders[typeName] - if encoder != nil { - return encoder - } - if typ.Kind() == reflect.Ptr { - typePtr := typ.(*reflect2.UnsafePtrType) - encoder := typeEncoders[typePtr.Elem().String()] - if encoder != nil { - return &OptionalEncoder{encoder} - } - } - return nil -} - -func describeStruct(ctx *ctx, typ reflect2.Type) *StructDescriptor { - structType := typ.(*reflect2.UnsafeStructType) - embeddedBindings := []*Binding{} - bindings := []*Binding{} - for i := 0; i < structType.NumField(); i++ { - field := structType.Field(i) - tag, hastag := field.Tag().Lookup(ctx.getTagKey()) - if ctx.onlyTaggedField && !hastag && !field.Anonymous() { - continue - } - if tag == "-" || field.Name() == "_" { - continue - } - tagParts := strings.Split(tag, ",") - if field.Anonymous() && (tag == "" || tagParts[0] == "") { - if field.Type().Kind() == reflect.Struct { - structDescriptor := describeStruct(ctx, field.Type()) - for _, binding := range structDescriptor.Fields { - binding.levels = append([]int{i}, binding.levels...) - omitempty := binding.Encoder.(*structFieldEncoder).omitempty - binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty} - binding.Decoder = &structFieldDecoder{field, binding.Decoder} - embeddedBindings = append(embeddedBindings, binding) - } - continue - } else if field.Type().Kind() == reflect.Ptr { - ptrType := field.Type().(*reflect2.UnsafePtrType) - if ptrType.Elem().Kind() == reflect.Struct { - structDescriptor := describeStruct(ctx, ptrType.Elem()) - for _, binding := range structDescriptor.Fields { - binding.levels = append([]int{i}, binding.levels...) - omitempty := binding.Encoder.(*structFieldEncoder).omitempty - binding.Encoder = &dereferenceEncoder{binding.Encoder} - binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty} - binding.Decoder = &dereferenceDecoder{ptrType.Elem(), binding.Decoder} - binding.Decoder = &structFieldDecoder{field, binding.Decoder} - embeddedBindings = append(embeddedBindings, binding) - } - continue - } - } - } - fieldNames := calcFieldNames(field.Name(), tagParts[0], tag) - fieldCacheKey := fmt.Sprintf("%s/%s", typ.String(), field.Name()) - decoder := fieldDecoders[fieldCacheKey] - if decoder == nil { - decoder = decoderOfType(ctx.append(field.Name()), field.Type()) - } - encoder := fieldEncoders[fieldCacheKey] - if encoder == nil { - encoder = encoderOfType(ctx.append(field.Name()), field.Type()) - } - binding := &Binding{ - Field: field, - FromNames: fieldNames, - ToNames: fieldNames, - Decoder: decoder, - Encoder: encoder, - } - binding.levels = []int{i} - bindings = append(bindings, binding) - } - return createStructDescriptor(ctx, typ, bindings, embeddedBindings) -} -func createStructDescriptor(ctx *ctx, typ reflect2.Type, bindings []*Binding, embeddedBindings []*Binding) *StructDescriptor { - structDescriptor := &StructDescriptor{ - Type: typ, - Fields: bindings, - } - for _, extension := range extensions { - extension.UpdateStructDescriptor(structDescriptor) - } - ctx.encoderExtension.UpdateStructDescriptor(structDescriptor) - ctx.decoderExtension.UpdateStructDescriptor(structDescriptor) - for _, extension := range ctx.extraExtensions { - extension.UpdateStructDescriptor(structDescriptor) - } - processTags(structDescriptor, ctx.frozenConfig) - // merge normal & embedded bindings & sort with original order - allBindings := sortableBindings(append(embeddedBindings, structDescriptor.Fields...)) - sort.Sort(allBindings) - structDescriptor.Fields = allBindings - return structDescriptor -} - -type sortableBindings []*Binding - -func (bindings sortableBindings) Len() int { - return len(bindings) -} - -func (bindings sortableBindings) Less(i, j int) bool { - left := bindings[i].levels - right := bindings[j].levels - k := 0 - for { - if left[k] < right[k] { - return true - } else if left[k] > right[k] { - return false - } - k++ - } -} - -func (bindings sortableBindings) Swap(i, j int) { - bindings[i], bindings[j] = bindings[j], bindings[i] -} - -func processTags(structDescriptor *StructDescriptor, cfg *frozenConfig) { - for _, binding := range structDescriptor.Fields { - shouldOmitEmpty := false - tagParts := strings.Split(binding.Field.Tag().Get(cfg.getTagKey()), ",") - for _, tagPart := range tagParts[1:] { - if tagPart == "omitempty" { - shouldOmitEmpty = true - } else if tagPart == "string" { - if binding.Field.Type().Kind() == reflect.String { - binding.Decoder = &stringModeStringDecoder{binding.Decoder, cfg} - binding.Encoder = &stringModeStringEncoder{binding.Encoder, cfg} - } else { - binding.Decoder = &stringModeNumberDecoder{binding.Decoder} - binding.Encoder = &stringModeNumberEncoder{binding.Encoder} - } - } - } - binding.Decoder = &structFieldDecoder{binding.Field, binding.Decoder} - binding.Encoder = &structFieldEncoder{binding.Field, binding.Encoder, shouldOmitEmpty} - } -} - -func calcFieldNames(originalFieldName string, tagProvidedFieldName string, wholeTag string) []string { - // ignore? - if wholeTag == "-" { - return []string{} - } - // rename? - var fieldNames []string - if tagProvidedFieldName == "" { - fieldNames = []string{originalFieldName} - } else { - fieldNames = []string{tagProvidedFieldName} - } - // private? - isNotExported := unicode.IsLower(rune(originalFieldName[0])) || originalFieldName[0] == '_' - if isNotExported { - fieldNames = []string{} - } - return fieldNames -} diff --git a/ui/wasm/vendor/github.com/json-iterator/go/reflect_json_number.go b/ui/wasm/vendor/github.com/json-iterator/go/reflect_json_number.go deleted file mode 100644 index 98d45c1ec25..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/reflect_json_number.go +++ /dev/null @@ -1,112 +0,0 @@ -package jsoniter - -import ( - "encoding/json" - "github.com/modern-go/reflect2" - "strconv" - "unsafe" -) - -type Number string - -// String returns the literal text of the number. -func (n Number) String() string { return string(n) } - -// Float64 returns the number as a float64. -func (n Number) Float64() (float64, error) { - return strconv.ParseFloat(string(n), 64) -} - -// Int64 returns the number as an int64. -func (n Number) Int64() (int64, error) { - return strconv.ParseInt(string(n), 10, 64) -} - -func CastJsonNumber(val interface{}) (string, bool) { - switch typedVal := val.(type) { - case json.Number: - return string(typedVal), true - case Number: - return string(typedVal), true - } - return "", false -} - -var jsonNumberType = reflect2.TypeOfPtr((*json.Number)(nil)).Elem() -var jsoniterNumberType = reflect2.TypeOfPtr((*Number)(nil)).Elem() - -func createDecoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValDecoder { - if typ.AssignableTo(jsonNumberType) { - return &jsonNumberCodec{} - } - if typ.AssignableTo(jsoniterNumberType) { - return &jsoniterNumberCodec{} - } - return nil -} - -func createEncoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValEncoder { - if typ.AssignableTo(jsonNumberType) { - return &jsonNumberCodec{} - } - if typ.AssignableTo(jsoniterNumberType) { - return &jsoniterNumberCodec{} - } - return nil -} - -type jsonNumberCodec struct { -} - -func (codec *jsonNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - switch iter.WhatIsNext() { - case StringValue: - *((*json.Number)(ptr)) = json.Number(iter.ReadString()) - case NilValue: - iter.skipFourBytes('n', 'u', 'l', 'l') - *((*json.Number)(ptr)) = "" - default: - *((*json.Number)(ptr)) = json.Number([]byte(iter.readNumberAsString())) - } -} - -func (codec *jsonNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - number := *((*json.Number)(ptr)) - if len(number) == 0 { - stream.writeByte('0') - } else { - stream.WriteRaw(string(number)) - } -} - -func (codec *jsonNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { - return len(*((*json.Number)(ptr))) == 0 -} - -type jsoniterNumberCodec struct { -} - -func (codec *jsoniterNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - switch iter.WhatIsNext() { - case StringValue: - *((*Number)(ptr)) = Number(iter.ReadString()) - case NilValue: - iter.skipFourBytes('n', 'u', 'l', 'l') - *((*Number)(ptr)) = "" - default: - *((*Number)(ptr)) = Number([]byte(iter.readNumberAsString())) - } -} - -func (codec *jsoniterNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - number := *((*Number)(ptr)) - if len(number) == 0 { - stream.writeByte('0') - } else { - stream.WriteRaw(string(number)) - } -} - -func (codec *jsoniterNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { - return len(*((*Number)(ptr))) == 0 -} diff --git a/ui/wasm/vendor/github.com/json-iterator/go/reflect_json_raw_message.go b/ui/wasm/vendor/github.com/json-iterator/go/reflect_json_raw_message.go deleted file mode 100644 index eba434f2f16..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/reflect_json_raw_message.go +++ /dev/null @@ -1,76 +0,0 @@ -package jsoniter - -import ( - "encoding/json" - "github.com/modern-go/reflect2" - "unsafe" -) - -var jsonRawMessageType = reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem() -var jsoniterRawMessageType = reflect2.TypeOfPtr((*RawMessage)(nil)).Elem() - -func createEncoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValEncoder { - if typ == jsonRawMessageType { - return &jsonRawMessageCodec{} - } - if typ == jsoniterRawMessageType { - return &jsoniterRawMessageCodec{} - } - return nil -} - -func createDecoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValDecoder { - if typ == jsonRawMessageType { - return &jsonRawMessageCodec{} - } - if typ == jsoniterRawMessageType { - return &jsoniterRawMessageCodec{} - } - return nil -} - -type jsonRawMessageCodec struct { -} - -func (codec *jsonRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if iter.ReadNil() { - *((*json.RawMessage)(ptr)) = nil - } else { - *((*json.RawMessage)(ptr)) = iter.SkipAndReturnBytes() - } -} - -func (codec *jsonRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - if *((*json.RawMessage)(ptr)) == nil { - stream.WriteNil() - } else { - stream.WriteRaw(string(*((*json.RawMessage)(ptr)))) - } -} - -func (codec *jsonRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { - return len(*((*json.RawMessage)(ptr))) == 0 -} - -type jsoniterRawMessageCodec struct { -} - -func (codec *jsoniterRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if iter.ReadNil() { - *((*RawMessage)(ptr)) = nil - } else { - *((*RawMessage)(ptr)) = iter.SkipAndReturnBytes() - } -} - -func (codec *jsoniterRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - if *((*RawMessage)(ptr)) == nil { - stream.WriteNil() - } else { - stream.WriteRaw(string(*((*RawMessage)(ptr)))) - } -} - -func (codec *jsoniterRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { - return len(*((*RawMessage)(ptr))) == 0 -} diff --git a/ui/wasm/vendor/github.com/json-iterator/go/reflect_map.go b/ui/wasm/vendor/github.com/json-iterator/go/reflect_map.go deleted file mode 100644 index 58296713013..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/reflect_map.go +++ /dev/null @@ -1,346 +0,0 @@ -package jsoniter - -import ( - "fmt" - "github.com/modern-go/reflect2" - "io" - "reflect" - "sort" - "unsafe" -) - -func decoderOfMap(ctx *ctx, typ reflect2.Type) ValDecoder { - mapType := typ.(*reflect2.UnsafeMapType) - keyDecoder := decoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()) - elemDecoder := decoderOfType(ctx.append("[mapElem]"), mapType.Elem()) - return &mapDecoder{ - mapType: mapType, - keyType: mapType.Key(), - elemType: mapType.Elem(), - keyDecoder: keyDecoder, - elemDecoder: elemDecoder, - } -} - -func encoderOfMap(ctx *ctx, typ reflect2.Type) ValEncoder { - mapType := typ.(*reflect2.UnsafeMapType) - if ctx.sortMapKeys { - return &sortKeysMapEncoder{ - mapType: mapType, - keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()), - elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()), - } - } - return &mapEncoder{ - mapType: mapType, - keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()), - elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()), - } -} - -func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder { - decoder := ctx.decoderExtension.CreateMapKeyDecoder(typ) - if decoder != nil { - return decoder - } - for _, extension := range ctx.extraExtensions { - decoder := extension.CreateMapKeyDecoder(typ) - if decoder != nil { - return decoder - } - } - - ptrType := reflect2.PtrTo(typ) - if ptrType.Implements(unmarshalerType) { - return &referenceDecoder{ - &unmarshalerDecoder{ - valType: ptrType, - }, - } - } - if typ.Implements(unmarshalerType) { - return &unmarshalerDecoder{ - valType: typ, - } - } - if ptrType.Implements(textUnmarshalerType) { - return &referenceDecoder{ - &textUnmarshalerDecoder{ - valType: ptrType, - }, - } - } - if typ.Implements(textUnmarshalerType) { - return &textUnmarshalerDecoder{ - valType: typ, - } - } - - switch typ.Kind() { - case reflect.String: - return decoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String)) - case reflect.Bool, - reflect.Uint8, reflect.Int8, - reflect.Uint16, reflect.Int16, - reflect.Uint32, reflect.Int32, - reflect.Uint64, reflect.Int64, - reflect.Uint, reflect.Int, - reflect.Float32, reflect.Float64, - reflect.Uintptr: - typ = reflect2.DefaultTypeOfKind(typ.Kind()) - return &numericMapKeyDecoder{decoderOfType(ctx, typ)} - default: - return &lazyErrorDecoder{err: fmt.Errorf("unsupported map key type: %v", typ)} - } -} - -func encoderOfMapKey(ctx *ctx, typ reflect2.Type) ValEncoder { - encoder := ctx.encoderExtension.CreateMapKeyEncoder(typ) - if encoder != nil { - return encoder - } - for _, extension := range ctx.extraExtensions { - encoder := extension.CreateMapKeyEncoder(typ) - if encoder != nil { - return encoder - } - } - - if typ == textMarshalerType { - return &directTextMarshalerEncoder{ - stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), - } - } - if typ.Implements(textMarshalerType) { - return &textMarshalerEncoder{ - valType: typ, - stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), - } - } - - switch typ.Kind() { - case reflect.String: - return encoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String)) - case reflect.Bool, - reflect.Uint8, reflect.Int8, - reflect.Uint16, reflect.Int16, - reflect.Uint32, reflect.Int32, - reflect.Uint64, reflect.Int64, - reflect.Uint, reflect.Int, - reflect.Float32, reflect.Float64, - reflect.Uintptr: - typ = reflect2.DefaultTypeOfKind(typ.Kind()) - return &numericMapKeyEncoder{encoderOfType(ctx, typ)} - default: - if typ.Kind() == reflect.Interface { - return &dynamicMapKeyEncoder{ctx, typ} - } - return &lazyErrorEncoder{err: fmt.Errorf("unsupported map key type: %v", typ)} - } -} - -type mapDecoder struct { - mapType *reflect2.UnsafeMapType - keyType reflect2.Type - elemType reflect2.Type - keyDecoder ValDecoder - elemDecoder ValDecoder -} - -func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - mapType := decoder.mapType - c := iter.nextToken() - if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') - *(*unsafe.Pointer)(ptr) = nil - mapType.UnsafeSet(ptr, mapType.UnsafeNew()) - return - } - if mapType.UnsafeIsNil(ptr) { - mapType.UnsafeSet(ptr, mapType.UnsafeMakeMap(0)) - } - if c != '{' { - iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c})) - return - } - c = iter.nextToken() - if c == '}' { - return - } - iter.unreadByte() - key := decoder.keyType.UnsafeNew() - decoder.keyDecoder.Decode(key, iter) - c = iter.nextToken() - if c != ':' { - iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) - return - } - elem := decoder.elemType.UnsafeNew() - decoder.elemDecoder.Decode(elem, iter) - decoder.mapType.UnsafeSetIndex(ptr, key, elem) - for c = iter.nextToken(); c == ','; c = iter.nextToken() { - key := decoder.keyType.UnsafeNew() - decoder.keyDecoder.Decode(key, iter) - c = iter.nextToken() - if c != ':' { - iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) - return - } - elem := decoder.elemType.UnsafeNew() - decoder.elemDecoder.Decode(elem, iter) - decoder.mapType.UnsafeSetIndex(ptr, key, elem) - } - if c != '}' { - iter.ReportError("ReadMapCB", `expect }, but found `+string([]byte{c})) - } -} - -type numericMapKeyDecoder struct { - decoder ValDecoder -} - -func (decoder *numericMapKeyDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - c := iter.nextToken() - if c != '"' { - iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c})) - return - } - decoder.decoder.Decode(ptr, iter) - c = iter.nextToken() - if c != '"' { - iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c})) - return - } -} - -type numericMapKeyEncoder struct { - encoder ValEncoder -} - -func (encoder *numericMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.writeByte('"') - encoder.encoder.Encode(ptr, stream) - stream.writeByte('"') -} - -func (encoder *numericMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return false -} - -type dynamicMapKeyEncoder struct { - ctx *ctx - valType reflect2.Type -} - -func (encoder *dynamicMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - obj := encoder.valType.UnsafeIndirect(ptr) - encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).Encode(reflect2.PtrOf(obj), stream) -} - -func (encoder *dynamicMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool { - obj := encoder.valType.UnsafeIndirect(ptr) - return encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).IsEmpty(reflect2.PtrOf(obj)) -} - -type mapEncoder struct { - mapType *reflect2.UnsafeMapType - keyEncoder ValEncoder - elemEncoder ValEncoder -} - -func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - if *(*unsafe.Pointer)(ptr) == nil { - stream.WriteNil() - return - } - stream.WriteObjectStart() - iter := encoder.mapType.UnsafeIterate(ptr) - for i := 0; iter.HasNext(); i++ { - if i != 0 { - stream.WriteMore() - } - key, elem := iter.UnsafeNext() - encoder.keyEncoder.Encode(key, stream) - if stream.indention > 0 { - stream.writeTwoBytes(byte(':'), byte(' ')) - } else { - stream.writeByte(':') - } - encoder.elemEncoder.Encode(elem, stream) - } - stream.WriteObjectEnd() -} - -func (encoder *mapEncoder) IsEmpty(ptr unsafe.Pointer) bool { - iter := encoder.mapType.UnsafeIterate(ptr) - return !iter.HasNext() -} - -type sortKeysMapEncoder struct { - mapType *reflect2.UnsafeMapType - keyEncoder ValEncoder - elemEncoder ValEncoder -} - -func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - if *(*unsafe.Pointer)(ptr) == nil { - stream.WriteNil() - return - } - stream.WriteObjectStart() - mapIter := encoder.mapType.UnsafeIterate(ptr) - subStream := stream.cfg.BorrowStream(nil) - subStream.Attachment = stream.Attachment - subIter := stream.cfg.BorrowIterator(nil) - keyValues := encodedKeyValues{} - for mapIter.HasNext() { - key, elem := mapIter.UnsafeNext() - subStreamIndex := subStream.Buffered() - encoder.keyEncoder.Encode(key, subStream) - if subStream.Error != nil && subStream.Error != io.EOF && stream.Error == nil { - stream.Error = subStream.Error - } - encodedKey := subStream.Buffer()[subStreamIndex:] - subIter.ResetBytes(encodedKey) - decodedKey := subIter.ReadString() - if stream.indention > 0 { - subStream.writeTwoBytes(byte(':'), byte(' ')) - } else { - subStream.writeByte(':') - } - encoder.elemEncoder.Encode(elem, subStream) - keyValues = append(keyValues, encodedKV{ - key: decodedKey, - keyValue: subStream.Buffer()[subStreamIndex:], - }) - } - sort.Sort(keyValues) - for i, keyValue := range keyValues { - if i != 0 { - stream.WriteMore() - } - stream.Write(keyValue.keyValue) - } - if subStream.Error != nil && stream.Error == nil { - stream.Error = subStream.Error - } - stream.WriteObjectEnd() - stream.cfg.ReturnStream(subStream) - stream.cfg.ReturnIterator(subIter) -} - -func (encoder *sortKeysMapEncoder) IsEmpty(ptr unsafe.Pointer) bool { - iter := encoder.mapType.UnsafeIterate(ptr) - return !iter.HasNext() -} - -type encodedKeyValues []encodedKV - -type encodedKV struct { - key string - keyValue []byte -} - -func (sv encodedKeyValues) Len() int { return len(sv) } -func (sv encodedKeyValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } -func (sv encodedKeyValues) Less(i, j int) bool { return sv[i].key < sv[j].key } diff --git a/ui/wasm/vendor/github.com/json-iterator/go/reflect_marshaler.go b/ui/wasm/vendor/github.com/json-iterator/go/reflect_marshaler.go deleted file mode 100644 index 3e21f375671..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/reflect_marshaler.go +++ /dev/null @@ -1,225 +0,0 @@ -package jsoniter - -import ( - "encoding" - "encoding/json" - "unsafe" - - "github.com/modern-go/reflect2" -) - -var marshalerType = reflect2.TypeOfPtr((*json.Marshaler)(nil)).Elem() -var unmarshalerType = reflect2.TypeOfPtr((*json.Unmarshaler)(nil)).Elem() -var textMarshalerType = reflect2.TypeOfPtr((*encoding.TextMarshaler)(nil)).Elem() -var textUnmarshalerType = reflect2.TypeOfPtr((*encoding.TextUnmarshaler)(nil)).Elem() - -func createDecoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValDecoder { - ptrType := reflect2.PtrTo(typ) - if ptrType.Implements(unmarshalerType) { - return &referenceDecoder{ - &unmarshalerDecoder{ptrType}, - } - } - if ptrType.Implements(textUnmarshalerType) { - return &referenceDecoder{ - &textUnmarshalerDecoder{ptrType}, - } - } - return nil -} - -func createEncoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValEncoder { - if typ == marshalerType { - checkIsEmpty := createCheckIsEmpty(ctx, typ) - var encoder ValEncoder = &directMarshalerEncoder{ - checkIsEmpty: checkIsEmpty, - } - return encoder - } - if typ.Implements(marshalerType) { - checkIsEmpty := createCheckIsEmpty(ctx, typ) - var encoder ValEncoder = &marshalerEncoder{ - valType: typ, - checkIsEmpty: checkIsEmpty, - } - return encoder - } - ptrType := reflect2.PtrTo(typ) - if ctx.prefix != "" && ptrType.Implements(marshalerType) { - checkIsEmpty := createCheckIsEmpty(ctx, ptrType) - var encoder ValEncoder = &marshalerEncoder{ - valType: ptrType, - checkIsEmpty: checkIsEmpty, - } - return &referenceEncoder{encoder} - } - if typ == textMarshalerType { - checkIsEmpty := createCheckIsEmpty(ctx, typ) - var encoder ValEncoder = &directTextMarshalerEncoder{ - checkIsEmpty: checkIsEmpty, - stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), - } - return encoder - } - if typ.Implements(textMarshalerType) { - checkIsEmpty := createCheckIsEmpty(ctx, typ) - var encoder ValEncoder = &textMarshalerEncoder{ - valType: typ, - stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), - checkIsEmpty: checkIsEmpty, - } - return encoder - } - // if prefix is empty, the type is the root type - if ctx.prefix != "" && ptrType.Implements(textMarshalerType) { - checkIsEmpty := createCheckIsEmpty(ctx, ptrType) - var encoder ValEncoder = &textMarshalerEncoder{ - valType: ptrType, - stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), - checkIsEmpty: checkIsEmpty, - } - return &referenceEncoder{encoder} - } - return nil -} - -type marshalerEncoder struct { - checkIsEmpty checkIsEmpty - valType reflect2.Type -} - -func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - obj := encoder.valType.UnsafeIndirect(ptr) - if encoder.valType.IsNullable() && reflect2.IsNil(obj) { - stream.WriteNil() - return - } - marshaler := obj.(json.Marshaler) - bytes, err := marshaler.MarshalJSON() - if err != nil { - stream.Error = err - } else { - // html escape was already done by jsoniter - // but the extra '\n' should be trimed - l := len(bytes) - if l > 0 && bytes[l-1] == '\n' { - bytes = bytes[:l-1] - } - stream.Write(bytes) - } -} - -func (encoder *marshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.checkIsEmpty.IsEmpty(ptr) -} - -type directMarshalerEncoder struct { - checkIsEmpty checkIsEmpty -} - -func (encoder *directMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - marshaler := *(*json.Marshaler)(ptr) - if marshaler == nil { - stream.WriteNil() - return - } - bytes, err := marshaler.MarshalJSON() - if err != nil { - stream.Error = err - } else { - stream.Write(bytes) - } -} - -func (encoder *directMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.checkIsEmpty.IsEmpty(ptr) -} - -type textMarshalerEncoder struct { - valType reflect2.Type - stringEncoder ValEncoder - checkIsEmpty checkIsEmpty -} - -func (encoder *textMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - obj := encoder.valType.UnsafeIndirect(ptr) - if encoder.valType.IsNullable() && reflect2.IsNil(obj) { - stream.WriteNil() - return - } - marshaler := (obj).(encoding.TextMarshaler) - bytes, err := marshaler.MarshalText() - if err != nil { - stream.Error = err - } else { - str := string(bytes) - encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream) - } -} - -func (encoder *textMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.checkIsEmpty.IsEmpty(ptr) -} - -type directTextMarshalerEncoder struct { - stringEncoder ValEncoder - checkIsEmpty checkIsEmpty -} - -func (encoder *directTextMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - marshaler := *(*encoding.TextMarshaler)(ptr) - if marshaler == nil { - stream.WriteNil() - return - } - bytes, err := marshaler.MarshalText() - if err != nil { - stream.Error = err - } else { - str := string(bytes) - encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream) - } -} - -func (encoder *directTextMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.checkIsEmpty.IsEmpty(ptr) -} - -type unmarshalerDecoder struct { - valType reflect2.Type -} - -func (decoder *unmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - valType := decoder.valType - obj := valType.UnsafeIndirect(ptr) - unmarshaler := obj.(json.Unmarshaler) - iter.nextToken() - iter.unreadByte() // skip spaces - bytes := iter.SkipAndReturnBytes() - err := unmarshaler.UnmarshalJSON(bytes) - if err != nil { - iter.ReportError("unmarshalerDecoder", err.Error()) - } -} - -type textUnmarshalerDecoder struct { - valType reflect2.Type -} - -func (decoder *textUnmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - valType := decoder.valType - obj := valType.UnsafeIndirect(ptr) - if reflect2.IsNil(obj) { - ptrType := valType.(*reflect2.UnsafePtrType) - elemType := ptrType.Elem() - elem := elemType.UnsafeNew() - ptrType.UnsafeSet(ptr, unsafe.Pointer(&elem)) - obj = valType.UnsafeIndirect(ptr) - } - unmarshaler := (obj).(encoding.TextUnmarshaler) - str := iter.ReadString() - err := unmarshaler.UnmarshalText([]byte(str)) - if err != nil { - iter.ReportError("textUnmarshalerDecoder", err.Error()) - } -} diff --git a/ui/wasm/vendor/github.com/json-iterator/go/reflect_native.go b/ui/wasm/vendor/github.com/json-iterator/go/reflect_native.go deleted file mode 100644 index f88722d14d1..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/reflect_native.go +++ /dev/null @@ -1,453 +0,0 @@ -package jsoniter - -import ( - "encoding/base64" - "reflect" - "strconv" - "unsafe" - - "github.com/modern-go/reflect2" -) - -const ptrSize = 32 << uintptr(^uintptr(0)>>63) - -func createEncoderOfNative(ctx *ctx, typ reflect2.Type) ValEncoder { - if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 { - sliceDecoder := decoderOfSlice(ctx, typ) - return &base64Codec{sliceDecoder: sliceDecoder} - } - typeName := typ.String() - kind := typ.Kind() - switch kind { - case reflect.String: - if typeName != "string" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem()) - } - return &stringCodec{} - case reflect.Int: - if typeName != "int" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem()) - } - if strconv.IntSize == 32 { - return &int32Codec{} - } - return &int64Codec{} - case reflect.Int8: - if typeName != "int8" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem()) - } - return &int8Codec{} - case reflect.Int16: - if typeName != "int16" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem()) - } - return &int16Codec{} - case reflect.Int32: - if typeName != "int32" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem()) - } - return &int32Codec{} - case reflect.Int64: - if typeName != "int64" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem()) - } - return &int64Codec{} - case reflect.Uint: - if typeName != "uint" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem()) - } - if strconv.IntSize == 32 { - return &uint32Codec{} - } - return &uint64Codec{} - case reflect.Uint8: - if typeName != "uint8" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem()) - } - return &uint8Codec{} - case reflect.Uint16: - if typeName != "uint16" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem()) - } - return &uint16Codec{} - case reflect.Uint32: - if typeName != "uint32" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem()) - } - return &uint32Codec{} - case reflect.Uintptr: - if typeName != "uintptr" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem()) - } - if ptrSize == 32 { - return &uint32Codec{} - } - return &uint64Codec{} - case reflect.Uint64: - if typeName != "uint64" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem()) - } - return &uint64Codec{} - case reflect.Float32: - if typeName != "float32" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem()) - } - return &float32Codec{} - case reflect.Float64: - if typeName != "float64" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem()) - } - return &float64Codec{} - case reflect.Bool: - if typeName != "bool" { - return encoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem()) - } - return &boolCodec{} - } - return nil -} - -func createDecoderOfNative(ctx *ctx, typ reflect2.Type) ValDecoder { - if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 { - sliceDecoder := decoderOfSlice(ctx, typ) - return &base64Codec{sliceDecoder: sliceDecoder} - } - typeName := typ.String() - switch typ.Kind() { - case reflect.String: - if typeName != "string" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem()) - } - return &stringCodec{} - case reflect.Int: - if typeName != "int" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem()) - } - if strconv.IntSize == 32 { - return &int32Codec{} - } - return &int64Codec{} - case reflect.Int8: - if typeName != "int8" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem()) - } - return &int8Codec{} - case reflect.Int16: - if typeName != "int16" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem()) - } - return &int16Codec{} - case reflect.Int32: - if typeName != "int32" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem()) - } - return &int32Codec{} - case reflect.Int64: - if typeName != "int64" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem()) - } - return &int64Codec{} - case reflect.Uint: - if typeName != "uint" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem()) - } - if strconv.IntSize == 32 { - return &uint32Codec{} - } - return &uint64Codec{} - case reflect.Uint8: - if typeName != "uint8" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem()) - } - return &uint8Codec{} - case reflect.Uint16: - if typeName != "uint16" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem()) - } - return &uint16Codec{} - case reflect.Uint32: - if typeName != "uint32" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem()) - } - return &uint32Codec{} - case reflect.Uintptr: - if typeName != "uintptr" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem()) - } - if ptrSize == 32 { - return &uint32Codec{} - } - return &uint64Codec{} - case reflect.Uint64: - if typeName != "uint64" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem()) - } - return &uint64Codec{} - case reflect.Float32: - if typeName != "float32" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem()) - } - return &float32Codec{} - case reflect.Float64: - if typeName != "float64" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem()) - } - return &float64Codec{} - case reflect.Bool: - if typeName != "bool" { - return decoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem()) - } - return &boolCodec{} - } - return nil -} - -type stringCodec struct { -} - -func (codec *stringCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - *((*string)(ptr)) = iter.ReadString() -} - -func (codec *stringCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - str := *((*string)(ptr)) - stream.WriteString(str) -} - -func (codec *stringCodec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*string)(ptr)) == "" -} - -type int8Codec struct { -} - -func (codec *int8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*int8)(ptr)) = iter.ReadInt8() - } -} - -func (codec *int8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteInt8(*((*int8)(ptr))) -} - -func (codec *int8Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*int8)(ptr)) == 0 -} - -type int16Codec struct { -} - -func (codec *int16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*int16)(ptr)) = iter.ReadInt16() - } -} - -func (codec *int16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteInt16(*((*int16)(ptr))) -} - -func (codec *int16Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*int16)(ptr)) == 0 -} - -type int32Codec struct { -} - -func (codec *int32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*int32)(ptr)) = iter.ReadInt32() - } -} - -func (codec *int32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteInt32(*((*int32)(ptr))) -} - -func (codec *int32Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*int32)(ptr)) == 0 -} - -type int64Codec struct { -} - -func (codec *int64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*int64)(ptr)) = iter.ReadInt64() - } -} - -func (codec *int64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteInt64(*((*int64)(ptr))) -} - -func (codec *int64Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*int64)(ptr)) == 0 -} - -type uint8Codec struct { -} - -func (codec *uint8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*uint8)(ptr)) = iter.ReadUint8() - } -} - -func (codec *uint8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteUint8(*((*uint8)(ptr))) -} - -func (codec *uint8Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*uint8)(ptr)) == 0 -} - -type uint16Codec struct { -} - -func (codec *uint16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*uint16)(ptr)) = iter.ReadUint16() - } -} - -func (codec *uint16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteUint16(*((*uint16)(ptr))) -} - -func (codec *uint16Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*uint16)(ptr)) == 0 -} - -type uint32Codec struct { -} - -func (codec *uint32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*uint32)(ptr)) = iter.ReadUint32() - } -} - -func (codec *uint32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteUint32(*((*uint32)(ptr))) -} - -func (codec *uint32Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*uint32)(ptr)) == 0 -} - -type uint64Codec struct { -} - -func (codec *uint64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*uint64)(ptr)) = iter.ReadUint64() - } -} - -func (codec *uint64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteUint64(*((*uint64)(ptr))) -} - -func (codec *uint64Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*uint64)(ptr)) == 0 -} - -type float32Codec struct { -} - -func (codec *float32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*float32)(ptr)) = iter.ReadFloat32() - } -} - -func (codec *float32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteFloat32(*((*float32)(ptr))) -} - -func (codec *float32Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*float32)(ptr)) == 0 -} - -type float64Codec struct { -} - -func (codec *float64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*float64)(ptr)) = iter.ReadFloat64() - } -} - -func (codec *float64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteFloat64(*((*float64)(ptr))) -} - -func (codec *float64Codec) IsEmpty(ptr unsafe.Pointer) bool { - return *((*float64)(ptr)) == 0 -} - -type boolCodec struct { -} - -func (codec *boolCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.ReadNil() { - *((*bool)(ptr)) = iter.ReadBool() - } -} - -func (codec *boolCodec) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteBool(*((*bool)(ptr))) -} - -func (codec *boolCodec) IsEmpty(ptr unsafe.Pointer) bool { - return !(*((*bool)(ptr))) -} - -type base64Codec struct { - sliceType *reflect2.UnsafeSliceType - sliceDecoder ValDecoder -} - -func (codec *base64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { - if iter.ReadNil() { - codec.sliceType.UnsafeSetNil(ptr) - return - } - switch iter.WhatIsNext() { - case StringValue: - src := iter.ReadString() - dst, err := base64.StdEncoding.DecodeString(src) - if err != nil { - iter.ReportError("decode base64", err.Error()) - } else { - codec.sliceType.UnsafeSet(ptr, unsafe.Pointer(&dst)) - } - case ArrayValue: - codec.sliceDecoder.Decode(ptr, iter) - default: - iter.ReportError("base64Codec", "invalid input") - } -} - -func (codec *base64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - if codec.sliceType.UnsafeIsNil(ptr) { - stream.WriteNil() - return - } - src := *((*[]byte)(ptr)) - encoding := base64.StdEncoding - stream.writeByte('"') - if len(src) != 0 { - size := encoding.EncodedLen(len(src)) - buf := make([]byte, size) - encoding.Encode(buf, src) - stream.buf = append(stream.buf, buf...) - } - stream.writeByte('"') -} - -func (codec *base64Codec) IsEmpty(ptr unsafe.Pointer) bool { - return len(*((*[]byte)(ptr))) == 0 -} diff --git a/ui/wasm/vendor/github.com/json-iterator/go/reflect_optional.go b/ui/wasm/vendor/github.com/json-iterator/go/reflect_optional.go deleted file mode 100644 index fa71f474891..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/reflect_optional.go +++ /dev/null @@ -1,129 +0,0 @@ -package jsoniter - -import ( - "github.com/modern-go/reflect2" - "unsafe" -) - -func decoderOfOptional(ctx *ctx, typ reflect2.Type) ValDecoder { - ptrType := typ.(*reflect2.UnsafePtrType) - elemType := ptrType.Elem() - decoder := decoderOfType(ctx, elemType) - return &OptionalDecoder{elemType, decoder} -} - -func encoderOfOptional(ctx *ctx, typ reflect2.Type) ValEncoder { - ptrType := typ.(*reflect2.UnsafePtrType) - elemType := ptrType.Elem() - elemEncoder := encoderOfType(ctx, elemType) - encoder := &OptionalEncoder{elemEncoder} - return encoder -} - -type OptionalDecoder struct { - ValueType reflect2.Type - ValueDecoder ValDecoder -} - -func (decoder *OptionalDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if iter.ReadNil() { - *((*unsafe.Pointer)(ptr)) = nil - } else { - if *((*unsafe.Pointer)(ptr)) == nil { - //pointer to null, we have to allocate memory to hold the value - newPtr := decoder.ValueType.UnsafeNew() - decoder.ValueDecoder.Decode(newPtr, iter) - *((*unsafe.Pointer)(ptr)) = newPtr - } else { - //reuse existing instance - decoder.ValueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) - } - } -} - -type dereferenceDecoder struct { - // only to deference a pointer - valueType reflect2.Type - valueDecoder ValDecoder -} - -func (decoder *dereferenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if *((*unsafe.Pointer)(ptr)) == nil { - //pointer to null, we have to allocate memory to hold the value - newPtr := decoder.valueType.UnsafeNew() - decoder.valueDecoder.Decode(newPtr, iter) - *((*unsafe.Pointer)(ptr)) = newPtr - } else { - //reuse existing instance - decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) - } -} - -type OptionalEncoder struct { - ValueEncoder ValEncoder -} - -func (encoder *OptionalEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - if *((*unsafe.Pointer)(ptr)) == nil { - stream.WriteNil() - } else { - encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream) - } -} - -func (encoder *OptionalEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return *((*unsafe.Pointer)(ptr)) == nil -} - -type dereferenceEncoder struct { - ValueEncoder ValEncoder -} - -func (encoder *dereferenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - if *((*unsafe.Pointer)(ptr)) == nil { - stream.WriteNil() - } else { - encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream) - } -} - -func (encoder *dereferenceEncoder) IsEmpty(ptr unsafe.Pointer) bool { - dePtr := *((*unsafe.Pointer)(ptr)) - if dePtr == nil { - return true - } - return encoder.ValueEncoder.IsEmpty(dePtr) -} - -func (encoder *dereferenceEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool { - deReferenced := *((*unsafe.Pointer)(ptr)) - if deReferenced == nil { - return true - } - isEmbeddedPtrNil, converted := encoder.ValueEncoder.(IsEmbeddedPtrNil) - if !converted { - return false - } - fieldPtr := unsafe.Pointer(deReferenced) - return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr) -} - -type referenceEncoder struct { - encoder ValEncoder -} - -func (encoder *referenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - encoder.encoder.Encode(unsafe.Pointer(&ptr), stream) -} - -func (encoder *referenceEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr)) -} - -type referenceDecoder struct { - decoder ValDecoder -} - -func (decoder *referenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - decoder.decoder.Decode(unsafe.Pointer(&ptr), iter) -} diff --git a/ui/wasm/vendor/github.com/json-iterator/go/reflect_slice.go b/ui/wasm/vendor/github.com/json-iterator/go/reflect_slice.go deleted file mode 100644 index 9441d79df33..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/reflect_slice.go +++ /dev/null @@ -1,99 +0,0 @@ -package jsoniter - -import ( - "fmt" - "github.com/modern-go/reflect2" - "io" - "unsafe" -) - -func decoderOfSlice(ctx *ctx, typ reflect2.Type) ValDecoder { - sliceType := typ.(*reflect2.UnsafeSliceType) - decoder := decoderOfType(ctx.append("[sliceElem]"), sliceType.Elem()) - return &sliceDecoder{sliceType, decoder} -} - -func encoderOfSlice(ctx *ctx, typ reflect2.Type) ValEncoder { - sliceType := typ.(*reflect2.UnsafeSliceType) - encoder := encoderOfType(ctx.append("[sliceElem]"), sliceType.Elem()) - return &sliceEncoder{sliceType, encoder} -} - -type sliceEncoder struct { - sliceType *reflect2.UnsafeSliceType - elemEncoder ValEncoder -} - -func (encoder *sliceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - if encoder.sliceType.UnsafeIsNil(ptr) { - stream.WriteNil() - return - } - length := encoder.sliceType.UnsafeLengthOf(ptr) - if length == 0 { - stream.WriteEmptyArray() - return - } - stream.WriteArrayStart() - encoder.elemEncoder.Encode(encoder.sliceType.UnsafeGetIndex(ptr, 0), stream) - for i := 1; i < length; i++ { - stream.WriteMore() - elemPtr := encoder.sliceType.UnsafeGetIndex(ptr, i) - encoder.elemEncoder.Encode(elemPtr, stream) - } - stream.WriteArrayEnd() - if stream.Error != nil && stream.Error != io.EOF { - stream.Error = fmt.Errorf("%v: %s", encoder.sliceType, stream.Error.Error()) - } -} - -func (encoder *sliceEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.sliceType.UnsafeLengthOf(ptr) == 0 -} - -type sliceDecoder struct { - sliceType *reflect2.UnsafeSliceType - elemDecoder ValDecoder -} - -func (decoder *sliceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - decoder.doDecode(ptr, iter) - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%v: %s", decoder.sliceType, iter.Error.Error()) - } -} - -func (decoder *sliceDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { - c := iter.nextToken() - sliceType := decoder.sliceType - if c == 'n' { - iter.skipThreeBytes('u', 'l', 'l') - sliceType.UnsafeSetNil(ptr) - return - } - if c != '[' { - iter.ReportError("decode slice", "expect [ or n, but found "+string([]byte{c})) - return - } - c = iter.nextToken() - if c == ']' { - sliceType.UnsafeSet(ptr, sliceType.UnsafeMakeSlice(0, 0)) - return - } - iter.unreadByte() - sliceType.UnsafeGrow(ptr, 1) - elemPtr := sliceType.UnsafeGetIndex(ptr, 0) - decoder.elemDecoder.Decode(elemPtr, iter) - length := 1 - for c = iter.nextToken(); c == ','; c = iter.nextToken() { - idx := length - length += 1 - sliceType.UnsafeGrow(ptr, length) - elemPtr = sliceType.UnsafeGetIndex(ptr, idx) - decoder.elemDecoder.Decode(elemPtr, iter) - } - if c != ']' { - iter.ReportError("decode slice", "expect ], but found "+string([]byte{c})) - return - } -} diff --git a/ui/wasm/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/ui/wasm/vendor/github.com/json-iterator/go/reflect_struct_decoder.go deleted file mode 100644 index 92ae912dc24..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/reflect_struct_decoder.go +++ /dev/null @@ -1,1097 +0,0 @@ -package jsoniter - -import ( - "fmt" - "io" - "strings" - "unsafe" - - "github.com/modern-go/reflect2" -) - -func decoderOfStruct(ctx *ctx, typ reflect2.Type) ValDecoder { - bindings := map[string]*Binding{} - structDescriptor := describeStruct(ctx, typ) - for _, binding := range structDescriptor.Fields { - for _, fromName := range binding.FromNames { - old := bindings[fromName] - if old == nil { - bindings[fromName] = binding - continue - } - ignoreOld, ignoreNew := resolveConflictBinding(ctx.frozenConfig, old, binding) - if ignoreOld { - delete(bindings, fromName) - } - if !ignoreNew { - bindings[fromName] = binding - } - } - } - fields := map[string]*structFieldDecoder{} - for k, binding := range bindings { - fields[k] = binding.Decoder.(*structFieldDecoder) - } - - if !ctx.caseSensitive() { - for k, binding := range bindings { - if _, found := fields[strings.ToLower(k)]; !found { - fields[strings.ToLower(k)] = binding.Decoder.(*structFieldDecoder) - } - } - } - - return createStructDecoder(ctx, typ, fields) -} - -func createStructDecoder(ctx *ctx, typ reflect2.Type, fields map[string]*structFieldDecoder) ValDecoder { - if ctx.disallowUnknownFields { - return &generalStructDecoder{typ: typ, fields: fields, disallowUnknownFields: true} - } - knownHash := map[int64]struct{}{ - 0: {}, - } - - switch len(fields) { - case 0: - return &skipObjectDecoder{typ} - case 1: - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName, ctx.caseSensitive()) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields, false} - } - knownHash[fieldHash] = struct{}{} - return &oneFieldStructDecoder{typ, fieldHash, fieldDecoder} - } - case 2: - var fieldHash1 int64 - var fieldHash2 int64 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName, ctx.caseSensitive()) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields, false} - } - knownHash[fieldHash] = struct{}{} - if fieldHash1 == 0 { - fieldHash1 = fieldHash - fieldDecoder1 = fieldDecoder - } else { - fieldHash2 = fieldHash - fieldDecoder2 = fieldDecoder - } - } - return &twoFieldsStructDecoder{typ, fieldHash1, fieldDecoder1, fieldHash2, fieldDecoder2} - case 3: - var fieldName1 int64 - var fieldName2 int64 - var fieldName3 int64 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName, ctx.caseSensitive()) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields, false} - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } - } - return &threeFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, - fieldName2, fieldDecoder2, - fieldName3, fieldDecoder3} - case 4: - var fieldName1 int64 - var fieldName2 int64 - var fieldName3 int64 - var fieldName4 int64 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName, ctx.caseSensitive()) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields, false} - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } - } - return &fourFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, - fieldName2, fieldDecoder2, - fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4} - case 5: - var fieldName1 int64 - var fieldName2 int64 - var fieldName3 int64 - var fieldName4 int64 - var fieldName5 int64 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - var fieldDecoder5 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName, ctx.caseSensitive()) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields, false} - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else if fieldName4 == 0 { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } else { - fieldName5 = fieldHash - fieldDecoder5 = fieldDecoder - } - } - return &fiveFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, - fieldName2, fieldDecoder2, - fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, - fieldName5, fieldDecoder5} - case 6: - var fieldName1 int64 - var fieldName2 int64 - var fieldName3 int64 - var fieldName4 int64 - var fieldName5 int64 - var fieldName6 int64 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - var fieldDecoder5 *structFieldDecoder - var fieldDecoder6 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName, ctx.caseSensitive()) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields, false} - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else if fieldName4 == 0 { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } else if fieldName5 == 0 { - fieldName5 = fieldHash - fieldDecoder5 = fieldDecoder - } else { - fieldName6 = fieldHash - fieldDecoder6 = fieldDecoder - } - } - return &sixFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, - fieldName2, fieldDecoder2, - fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, - fieldName5, fieldDecoder5, - fieldName6, fieldDecoder6} - case 7: - var fieldName1 int64 - var fieldName2 int64 - var fieldName3 int64 - var fieldName4 int64 - var fieldName5 int64 - var fieldName6 int64 - var fieldName7 int64 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - var fieldDecoder5 *structFieldDecoder - var fieldDecoder6 *structFieldDecoder - var fieldDecoder7 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName, ctx.caseSensitive()) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields, false} - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else if fieldName4 == 0 { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } else if fieldName5 == 0 { - fieldName5 = fieldHash - fieldDecoder5 = fieldDecoder - } else if fieldName6 == 0 { - fieldName6 = fieldHash - fieldDecoder6 = fieldDecoder - } else { - fieldName7 = fieldHash - fieldDecoder7 = fieldDecoder - } - } - return &sevenFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, - fieldName2, fieldDecoder2, - fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, - fieldName5, fieldDecoder5, - fieldName6, fieldDecoder6, - fieldName7, fieldDecoder7} - case 8: - var fieldName1 int64 - var fieldName2 int64 - var fieldName3 int64 - var fieldName4 int64 - var fieldName5 int64 - var fieldName6 int64 - var fieldName7 int64 - var fieldName8 int64 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - var fieldDecoder5 *structFieldDecoder - var fieldDecoder6 *structFieldDecoder - var fieldDecoder7 *structFieldDecoder - var fieldDecoder8 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName, ctx.caseSensitive()) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields, false} - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else if fieldName4 == 0 { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } else if fieldName5 == 0 { - fieldName5 = fieldHash - fieldDecoder5 = fieldDecoder - } else if fieldName6 == 0 { - fieldName6 = fieldHash - fieldDecoder6 = fieldDecoder - } else if fieldName7 == 0 { - fieldName7 = fieldHash - fieldDecoder7 = fieldDecoder - } else { - fieldName8 = fieldHash - fieldDecoder8 = fieldDecoder - } - } - return &eightFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, - fieldName2, fieldDecoder2, - fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, - fieldName5, fieldDecoder5, - fieldName6, fieldDecoder6, - fieldName7, fieldDecoder7, - fieldName8, fieldDecoder8} - case 9: - var fieldName1 int64 - var fieldName2 int64 - var fieldName3 int64 - var fieldName4 int64 - var fieldName5 int64 - var fieldName6 int64 - var fieldName7 int64 - var fieldName8 int64 - var fieldName9 int64 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - var fieldDecoder5 *structFieldDecoder - var fieldDecoder6 *structFieldDecoder - var fieldDecoder7 *structFieldDecoder - var fieldDecoder8 *structFieldDecoder - var fieldDecoder9 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName, ctx.caseSensitive()) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields, false} - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else if fieldName4 == 0 { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } else if fieldName5 == 0 { - fieldName5 = fieldHash - fieldDecoder5 = fieldDecoder - } else if fieldName6 == 0 { - fieldName6 = fieldHash - fieldDecoder6 = fieldDecoder - } else if fieldName7 == 0 { - fieldName7 = fieldHash - fieldDecoder7 = fieldDecoder - } else if fieldName8 == 0 { - fieldName8 = fieldHash - fieldDecoder8 = fieldDecoder - } else { - fieldName9 = fieldHash - fieldDecoder9 = fieldDecoder - } - } - return &nineFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, - fieldName2, fieldDecoder2, - fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, - fieldName5, fieldDecoder5, - fieldName6, fieldDecoder6, - fieldName7, fieldDecoder7, - fieldName8, fieldDecoder8, - fieldName9, fieldDecoder9} - case 10: - var fieldName1 int64 - var fieldName2 int64 - var fieldName3 int64 - var fieldName4 int64 - var fieldName5 int64 - var fieldName6 int64 - var fieldName7 int64 - var fieldName8 int64 - var fieldName9 int64 - var fieldName10 int64 - var fieldDecoder1 *structFieldDecoder - var fieldDecoder2 *structFieldDecoder - var fieldDecoder3 *structFieldDecoder - var fieldDecoder4 *structFieldDecoder - var fieldDecoder5 *structFieldDecoder - var fieldDecoder6 *structFieldDecoder - var fieldDecoder7 *structFieldDecoder - var fieldDecoder8 *structFieldDecoder - var fieldDecoder9 *structFieldDecoder - var fieldDecoder10 *structFieldDecoder - for fieldName, fieldDecoder := range fields { - fieldHash := calcHash(fieldName, ctx.caseSensitive()) - _, known := knownHash[fieldHash] - if known { - return &generalStructDecoder{typ, fields, false} - } - knownHash[fieldHash] = struct{}{} - if fieldName1 == 0 { - fieldName1 = fieldHash - fieldDecoder1 = fieldDecoder - } else if fieldName2 == 0 { - fieldName2 = fieldHash - fieldDecoder2 = fieldDecoder - } else if fieldName3 == 0 { - fieldName3 = fieldHash - fieldDecoder3 = fieldDecoder - } else if fieldName4 == 0 { - fieldName4 = fieldHash - fieldDecoder4 = fieldDecoder - } else if fieldName5 == 0 { - fieldName5 = fieldHash - fieldDecoder5 = fieldDecoder - } else if fieldName6 == 0 { - fieldName6 = fieldHash - fieldDecoder6 = fieldDecoder - } else if fieldName7 == 0 { - fieldName7 = fieldHash - fieldDecoder7 = fieldDecoder - } else if fieldName8 == 0 { - fieldName8 = fieldHash - fieldDecoder8 = fieldDecoder - } else if fieldName9 == 0 { - fieldName9 = fieldHash - fieldDecoder9 = fieldDecoder - } else { - fieldName10 = fieldHash - fieldDecoder10 = fieldDecoder - } - } - return &tenFieldsStructDecoder{typ, - fieldName1, fieldDecoder1, - fieldName2, fieldDecoder2, - fieldName3, fieldDecoder3, - fieldName4, fieldDecoder4, - fieldName5, fieldDecoder5, - fieldName6, fieldDecoder6, - fieldName7, fieldDecoder7, - fieldName8, fieldDecoder8, - fieldName9, fieldDecoder9, - fieldName10, fieldDecoder10} - } - return &generalStructDecoder{typ, fields, false} -} - -type generalStructDecoder struct { - typ reflect2.Type - fields map[string]*structFieldDecoder - disallowUnknownFields bool -} - -func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - if !iter.incrementDepth() { - return - } - var c byte - for c = ','; c == ','; c = iter.nextToken() { - decoder.decodeOneField(ptr, iter) - } - if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { - iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) - } - if c != '}' { - iter.ReportError("struct Decode", `expect }, but found `+string([]byte{c})) - } - iter.decrementDepth() -} - -func (decoder *generalStructDecoder) decodeOneField(ptr unsafe.Pointer, iter *Iterator) { - var field string - var fieldDecoder *structFieldDecoder - if iter.cfg.objectFieldMustBeSimpleString { - fieldBytes := iter.ReadStringAsSlice() - field = *(*string)(unsafe.Pointer(&fieldBytes)) - fieldDecoder = decoder.fields[field] - if fieldDecoder == nil && !iter.cfg.caseSensitive { - fieldDecoder = decoder.fields[strings.ToLower(field)] - } - } else { - field = iter.ReadString() - fieldDecoder = decoder.fields[field] - if fieldDecoder == nil && !iter.cfg.caseSensitive { - fieldDecoder = decoder.fields[strings.ToLower(field)] - } - } - if fieldDecoder == nil { - if decoder.disallowUnknownFields { - msg := "found unknown field: " + field - iter.ReportError("ReadObject", msg) - } - c := iter.nextToken() - if c != ':' { - iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) - } - iter.Skip() - return - } - c := iter.nextToken() - if c != ':' { - iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) - } - fieldDecoder.Decode(ptr, iter) -} - -type skipObjectDecoder struct { - typ reflect2.Type -} - -func (decoder *skipObjectDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - valueType := iter.WhatIsNext() - if valueType != ObjectValue && valueType != NilValue { - iter.ReportError("skipObjectDecoder", "expect object or null") - return - } - iter.Skip() -} - -type oneFieldStructDecoder struct { - typ reflect2.Type - fieldHash int64 - fieldDecoder *structFieldDecoder -} - -func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - if !iter.incrementDepth() { - return - } - for { - if iter.readFieldHash() == decoder.fieldHash { - decoder.fieldDecoder.Decode(ptr, iter) - } else { - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { - iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) - } - iter.decrementDepth() -} - -type twoFieldsStructDecoder struct { - typ reflect2.Type - fieldHash1 int64 - fieldDecoder1 *structFieldDecoder - fieldHash2 int64 - fieldDecoder2 *structFieldDecoder -} - -func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - if !iter.incrementDepth() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { - iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) - } - iter.decrementDepth() -} - -type threeFieldsStructDecoder struct { - typ reflect2.Type - fieldHash1 int64 - fieldDecoder1 *structFieldDecoder - fieldHash2 int64 - fieldDecoder2 *structFieldDecoder - fieldHash3 int64 - fieldDecoder3 *structFieldDecoder -} - -func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - if !iter.incrementDepth() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { - iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) - } - iter.decrementDepth() -} - -type fourFieldsStructDecoder struct { - typ reflect2.Type - fieldHash1 int64 - fieldDecoder1 *structFieldDecoder - fieldHash2 int64 - fieldDecoder2 *structFieldDecoder - fieldHash3 int64 - fieldDecoder3 *structFieldDecoder - fieldHash4 int64 - fieldDecoder4 *structFieldDecoder -} - -func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - if !iter.incrementDepth() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { - iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) - } - iter.decrementDepth() -} - -type fiveFieldsStructDecoder struct { - typ reflect2.Type - fieldHash1 int64 - fieldDecoder1 *structFieldDecoder - fieldHash2 int64 - fieldDecoder2 *structFieldDecoder - fieldHash3 int64 - fieldDecoder3 *structFieldDecoder - fieldHash4 int64 - fieldDecoder4 *structFieldDecoder - fieldHash5 int64 - fieldDecoder5 *structFieldDecoder -} - -func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - if !iter.incrementDepth() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - case decoder.fieldHash5: - decoder.fieldDecoder5.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { - iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) - } - iter.decrementDepth() -} - -type sixFieldsStructDecoder struct { - typ reflect2.Type - fieldHash1 int64 - fieldDecoder1 *structFieldDecoder - fieldHash2 int64 - fieldDecoder2 *structFieldDecoder - fieldHash3 int64 - fieldDecoder3 *structFieldDecoder - fieldHash4 int64 - fieldDecoder4 *structFieldDecoder - fieldHash5 int64 - fieldDecoder5 *structFieldDecoder - fieldHash6 int64 - fieldDecoder6 *structFieldDecoder -} - -func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - if !iter.incrementDepth() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - case decoder.fieldHash5: - decoder.fieldDecoder5.Decode(ptr, iter) - case decoder.fieldHash6: - decoder.fieldDecoder6.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { - iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) - } - iter.decrementDepth() -} - -type sevenFieldsStructDecoder struct { - typ reflect2.Type - fieldHash1 int64 - fieldDecoder1 *structFieldDecoder - fieldHash2 int64 - fieldDecoder2 *structFieldDecoder - fieldHash3 int64 - fieldDecoder3 *structFieldDecoder - fieldHash4 int64 - fieldDecoder4 *structFieldDecoder - fieldHash5 int64 - fieldDecoder5 *structFieldDecoder - fieldHash6 int64 - fieldDecoder6 *structFieldDecoder - fieldHash7 int64 - fieldDecoder7 *structFieldDecoder -} - -func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - if !iter.incrementDepth() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - case decoder.fieldHash5: - decoder.fieldDecoder5.Decode(ptr, iter) - case decoder.fieldHash6: - decoder.fieldDecoder6.Decode(ptr, iter) - case decoder.fieldHash7: - decoder.fieldDecoder7.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { - iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) - } - iter.decrementDepth() -} - -type eightFieldsStructDecoder struct { - typ reflect2.Type - fieldHash1 int64 - fieldDecoder1 *structFieldDecoder - fieldHash2 int64 - fieldDecoder2 *structFieldDecoder - fieldHash3 int64 - fieldDecoder3 *structFieldDecoder - fieldHash4 int64 - fieldDecoder4 *structFieldDecoder - fieldHash5 int64 - fieldDecoder5 *structFieldDecoder - fieldHash6 int64 - fieldDecoder6 *structFieldDecoder - fieldHash7 int64 - fieldDecoder7 *structFieldDecoder - fieldHash8 int64 - fieldDecoder8 *structFieldDecoder -} - -func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - if !iter.incrementDepth() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - case decoder.fieldHash5: - decoder.fieldDecoder5.Decode(ptr, iter) - case decoder.fieldHash6: - decoder.fieldDecoder6.Decode(ptr, iter) - case decoder.fieldHash7: - decoder.fieldDecoder7.Decode(ptr, iter) - case decoder.fieldHash8: - decoder.fieldDecoder8.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { - iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) - } - iter.decrementDepth() -} - -type nineFieldsStructDecoder struct { - typ reflect2.Type - fieldHash1 int64 - fieldDecoder1 *structFieldDecoder - fieldHash2 int64 - fieldDecoder2 *structFieldDecoder - fieldHash3 int64 - fieldDecoder3 *structFieldDecoder - fieldHash4 int64 - fieldDecoder4 *structFieldDecoder - fieldHash5 int64 - fieldDecoder5 *structFieldDecoder - fieldHash6 int64 - fieldDecoder6 *structFieldDecoder - fieldHash7 int64 - fieldDecoder7 *structFieldDecoder - fieldHash8 int64 - fieldDecoder8 *structFieldDecoder - fieldHash9 int64 - fieldDecoder9 *structFieldDecoder -} - -func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - if !iter.incrementDepth() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - case decoder.fieldHash5: - decoder.fieldDecoder5.Decode(ptr, iter) - case decoder.fieldHash6: - decoder.fieldDecoder6.Decode(ptr, iter) - case decoder.fieldHash7: - decoder.fieldDecoder7.Decode(ptr, iter) - case decoder.fieldHash8: - decoder.fieldDecoder8.Decode(ptr, iter) - case decoder.fieldHash9: - decoder.fieldDecoder9.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { - iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) - } - iter.decrementDepth() -} - -type tenFieldsStructDecoder struct { - typ reflect2.Type - fieldHash1 int64 - fieldDecoder1 *structFieldDecoder - fieldHash2 int64 - fieldDecoder2 *structFieldDecoder - fieldHash3 int64 - fieldDecoder3 *structFieldDecoder - fieldHash4 int64 - fieldDecoder4 *structFieldDecoder - fieldHash5 int64 - fieldDecoder5 *structFieldDecoder - fieldHash6 int64 - fieldDecoder6 *structFieldDecoder - fieldHash7 int64 - fieldDecoder7 *structFieldDecoder - fieldHash8 int64 - fieldDecoder8 *structFieldDecoder - fieldHash9 int64 - fieldDecoder9 *structFieldDecoder - fieldHash10 int64 - fieldDecoder10 *structFieldDecoder -} - -func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if !iter.readObjectStart() { - return - } - if !iter.incrementDepth() { - return - } - for { - switch iter.readFieldHash() { - case decoder.fieldHash1: - decoder.fieldDecoder1.Decode(ptr, iter) - case decoder.fieldHash2: - decoder.fieldDecoder2.Decode(ptr, iter) - case decoder.fieldHash3: - decoder.fieldDecoder3.Decode(ptr, iter) - case decoder.fieldHash4: - decoder.fieldDecoder4.Decode(ptr, iter) - case decoder.fieldHash5: - decoder.fieldDecoder5.Decode(ptr, iter) - case decoder.fieldHash6: - decoder.fieldDecoder6.Decode(ptr, iter) - case decoder.fieldHash7: - decoder.fieldDecoder7.Decode(ptr, iter) - case decoder.fieldHash8: - decoder.fieldDecoder8.Decode(ptr, iter) - case decoder.fieldHash9: - decoder.fieldDecoder9.Decode(ptr, iter) - case decoder.fieldHash10: - decoder.fieldDecoder10.Decode(ptr, iter) - default: - iter.Skip() - } - if iter.isObjectEnd() { - break - } - } - if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 { - iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) - } - iter.decrementDepth() -} - -type structFieldDecoder struct { - field reflect2.StructField - fieldDecoder ValDecoder -} - -func (decoder *structFieldDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - fieldPtr := decoder.field.UnsafeGet(ptr) - decoder.fieldDecoder.Decode(fieldPtr, iter) - if iter.Error != nil && iter.Error != io.EOF { - iter.Error = fmt.Errorf("%s: %s", decoder.field.Name(), iter.Error.Error()) - } -} - -type stringModeStringDecoder struct { - elemDecoder ValDecoder - cfg *frozenConfig -} - -func (decoder *stringModeStringDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - decoder.elemDecoder.Decode(ptr, iter) - str := *((*string)(ptr)) - tempIter := decoder.cfg.BorrowIterator([]byte(str)) - defer decoder.cfg.ReturnIterator(tempIter) - *((*string)(ptr)) = tempIter.ReadString() -} - -type stringModeNumberDecoder struct { - elemDecoder ValDecoder -} - -func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { - if iter.WhatIsNext() == NilValue { - decoder.elemDecoder.Decode(ptr, iter) - return - } - - c := iter.nextToken() - if c != '"' { - iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) - return - } - decoder.elemDecoder.Decode(ptr, iter) - if iter.Error != nil { - return - } - c = iter.readByte() - if c != '"' { - iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) - return - } -} diff --git a/ui/wasm/vendor/github.com/json-iterator/go/reflect_struct_encoder.go b/ui/wasm/vendor/github.com/json-iterator/go/reflect_struct_encoder.go deleted file mode 100644 index 152e3ef5a93..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/reflect_struct_encoder.go +++ /dev/null @@ -1,211 +0,0 @@ -package jsoniter - -import ( - "fmt" - "github.com/modern-go/reflect2" - "io" - "reflect" - "unsafe" -) - -func encoderOfStruct(ctx *ctx, typ reflect2.Type) ValEncoder { - type bindingTo struct { - binding *Binding - toName string - ignored bool - } - orderedBindings := []*bindingTo{} - structDescriptor := describeStruct(ctx, typ) - for _, binding := range structDescriptor.Fields { - for _, toName := range binding.ToNames { - new := &bindingTo{ - binding: binding, - toName: toName, - } - for _, old := range orderedBindings { - if old.toName != toName { - continue - } - old.ignored, new.ignored = resolveConflictBinding(ctx.frozenConfig, old.binding, new.binding) - } - orderedBindings = append(orderedBindings, new) - } - } - if len(orderedBindings) == 0 { - return &emptyStructEncoder{} - } - finalOrderedFields := []structFieldTo{} - for _, bindingTo := range orderedBindings { - if !bindingTo.ignored { - finalOrderedFields = append(finalOrderedFields, structFieldTo{ - encoder: bindingTo.binding.Encoder.(*structFieldEncoder), - toName: bindingTo.toName, - }) - } - } - return &structEncoder{typ, finalOrderedFields} -} - -func createCheckIsEmpty(ctx *ctx, typ reflect2.Type) checkIsEmpty { - encoder := createEncoderOfNative(ctx, typ) - if encoder != nil { - return encoder - } - kind := typ.Kind() - switch kind { - case reflect.Interface: - return &dynamicEncoder{typ} - case reflect.Struct: - return &structEncoder{typ: typ} - case reflect.Array: - return &arrayEncoder{} - case reflect.Slice: - return &sliceEncoder{} - case reflect.Map: - return encoderOfMap(ctx, typ) - case reflect.Ptr: - return &OptionalEncoder{} - default: - return &lazyErrorEncoder{err: fmt.Errorf("unsupported type: %v", typ)} - } -} - -func resolveConflictBinding(cfg *frozenConfig, old, new *Binding) (ignoreOld, ignoreNew bool) { - newTagged := new.Field.Tag().Get(cfg.getTagKey()) != "" - oldTagged := old.Field.Tag().Get(cfg.getTagKey()) != "" - if newTagged { - if oldTagged { - if len(old.levels) > len(new.levels) { - return true, false - } else if len(new.levels) > len(old.levels) { - return false, true - } else { - return true, true - } - } else { - return true, false - } - } else { - if oldTagged { - return true, false - } - if len(old.levels) > len(new.levels) { - return true, false - } else if len(new.levels) > len(old.levels) { - return false, true - } else { - return true, true - } - } -} - -type structFieldEncoder struct { - field reflect2.StructField - fieldEncoder ValEncoder - omitempty bool -} - -func (encoder *structFieldEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - fieldPtr := encoder.field.UnsafeGet(ptr) - encoder.fieldEncoder.Encode(fieldPtr, stream) - if stream.Error != nil && stream.Error != io.EOF { - stream.Error = fmt.Errorf("%s: %s", encoder.field.Name(), stream.Error.Error()) - } -} - -func (encoder *structFieldEncoder) IsEmpty(ptr unsafe.Pointer) bool { - fieldPtr := encoder.field.UnsafeGet(ptr) - return encoder.fieldEncoder.IsEmpty(fieldPtr) -} - -func (encoder *structFieldEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool { - isEmbeddedPtrNil, converted := encoder.fieldEncoder.(IsEmbeddedPtrNil) - if !converted { - return false - } - fieldPtr := encoder.field.UnsafeGet(ptr) - return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr) -} - -type IsEmbeddedPtrNil interface { - IsEmbeddedPtrNil(ptr unsafe.Pointer) bool -} - -type structEncoder struct { - typ reflect2.Type - fields []structFieldTo -} - -type structFieldTo struct { - encoder *structFieldEncoder - toName string -} - -func (encoder *structEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteObjectStart() - isNotFirst := false - for _, field := range encoder.fields { - if field.encoder.omitempty && field.encoder.IsEmpty(ptr) { - continue - } - if field.encoder.IsEmbeddedPtrNil(ptr) { - continue - } - if isNotFirst { - stream.WriteMore() - } - stream.WriteObjectField(field.toName) - field.encoder.Encode(ptr, stream) - isNotFirst = true - } - stream.WriteObjectEnd() - if stream.Error != nil && stream.Error != io.EOF { - stream.Error = fmt.Errorf("%v.%s", encoder.typ, stream.Error.Error()) - } -} - -func (encoder *structEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return false -} - -type emptyStructEncoder struct { -} - -func (encoder *emptyStructEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.WriteEmptyObject() -} - -func (encoder *emptyStructEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return false -} - -type stringModeNumberEncoder struct { - elemEncoder ValEncoder -} - -func (encoder *stringModeNumberEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - stream.writeByte('"') - encoder.elemEncoder.Encode(ptr, stream) - stream.writeByte('"') -} - -func (encoder *stringModeNumberEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.elemEncoder.IsEmpty(ptr) -} - -type stringModeStringEncoder struct { - elemEncoder ValEncoder - cfg *frozenConfig -} - -func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { - tempStream := encoder.cfg.BorrowStream(nil) - tempStream.Attachment = stream.Attachment - defer encoder.cfg.ReturnStream(tempStream) - encoder.elemEncoder.Encode(ptr, tempStream) - stream.WriteString(string(tempStream.Buffer())) -} - -func (encoder *stringModeStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { - return encoder.elemEncoder.IsEmpty(ptr) -} diff --git a/ui/wasm/vendor/github.com/json-iterator/go/stream.go b/ui/wasm/vendor/github.com/json-iterator/go/stream.go deleted file mode 100644 index 23d8a3ad6b1..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/stream.go +++ /dev/null @@ -1,210 +0,0 @@ -package jsoniter - -import ( - "io" -) - -// stream is a io.Writer like object, with JSON specific write functions. -// Error is not returned as return value, but stored as Error member on this stream instance. -type Stream struct { - cfg *frozenConfig - out io.Writer - buf []byte - Error error - indention int - Attachment interface{} // open for customized encoder -} - -// NewStream create new stream instance. -// cfg can be jsoniter.ConfigDefault. -// out can be nil if write to internal buffer. -// bufSize is the initial size for the internal buffer in bytes. -func NewStream(cfg API, out io.Writer, bufSize int) *Stream { - return &Stream{ - cfg: cfg.(*frozenConfig), - out: out, - buf: make([]byte, 0, bufSize), - Error: nil, - indention: 0, - } -} - -// Pool returns a pool can provide more stream with same configuration -func (stream *Stream) Pool() StreamPool { - return stream.cfg -} - -// Reset reuse this stream instance by assign a new writer -func (stream *Stream) Reset(out io.Writer) { - stream.out = out - stream.buf = stream.buf[:0] -} - -// Available returns how many bytes are unused in the buffer. -func (stream *Stream) Available() int { - return cap(stream.buf) - len(stream.buf) -} - -// Buffered returns the number of bytes that have been written into the current buffer. -func (stream *Stream) Buffered() int { - return len(stream.buf) -} - -// Buffer if writer is nil, use this method to take the result -func (stream *Stream) Buffer() []byte { - return stream.buf -} - -// SetBuffer allows to append to the internal buffer directly -func (stream *Stream) SetBuffer(buf []byte) { - stream.buf = buf -} - -// Write writes the contents of p into the buffer. -// It returns the number of bytes written. -// If nn < len(p), it also returns an error explaining -// why the write is short. -func (stream *Stream) Write(p []byte) (nn int, err error) { - stream.buf = append(stream.buf, p...) - if stream.out != nil { - nn, err = stream.out.Write(stream.buf) - stream.buf = stream.buf[nn:] - return - } - return len(p), nil -} - -// WriteByte writes a single byte. -func (stream *Stream) writeByte(c byte) { - stream.buf = append(stream.buf, c) -} - -func (stream *Stream) writeTwoBytes(c1 byte, c2 byte) { - stream.buf = append(stream.buf, c1, c2) -} - -func (stream *Stream) writeThreeBytes(c1 byte, c2 byte, c3 byte) { - stream.buf = append(stream.buf, c1, c2, c3) -} - -func (stream *Stream) writeFourBytes(c1 byte, c2 byte, c3 byte, c4 byte) { - stream.buf = append(stream.buf, c1, c2, c3, c4) -} - -func (stream *Stream) writeFiveBytes(c1 byte, c2 byte, c3 byte, c4 byte, c5 byte) { - stream.buf = append(stream.buf, c1, c2, c3, c4, c5) -} - -// Flush writes any buffered data to the underlying io.Writer. -func (stream *Stream) Flush() error { - if stream.out == nil { - return nil - } - if stream.Error != nil { - return stream.Error - } - _, err := stream.out.Write(stream.buf) - if err != nil { - if stream.Error == nil { - stream.Error = err - } - return err - } - stream.buf = stream.buf[:0] - return nil -} - -// WriteRaw write string out without quotes, just like []byte -func (stream *Stream) WriteRaw(s string) { - stream.buf = append(stream.buf, s...) -} - -// WriteNil write null to stream -func (stream *Stream) WriteNil() { - stream.writeFourBytes('n', 'u', 'l', 'l') -} - -// WriteTrue write true to stream -func (stream *Stream) WriteTrue() { - stream.writeFourBytes('t', 'r', 'u', 'e') -} - -// WriteFalse write false to stream -func (stream *Stream) WriteFalse() { - stream.writeFiveBytes('f', 'a', 'l', 's', 'e') -} - -// WriteBool write true or false into stream -func (stream *Stream) WriteBool(val bool) { - if val { - stream.WriteTrue() - } else { - stream.WriteFalse() - } -} - -// WriteObjectStart write { with possible indention -func (stream *Stream) WriteObjectStart() { - stream.indention += stream.cfg.indentionStep - stream.writeByte('{') - stream.writeIndention(0) -} - -// WriteObjectField write "field": with possible indention -func (stream *Stream) WriteObjectField(field string) { - stream.WriteString(field) - if stream.indention > 0 { - stream.writeTwoBytes(':', ' ') - } else { - stream.writeByte(':') - } -} - -// WriteObjectEnd write } with possible indention -func (stream *Stream) WriteObjectEnd() { - stream.writeIndention(stream.cfg.indentionStep) - stream.indention -= stream.cfg.indentionStep - stream.writeByte('}') -} - -// WriteEmptyObject write {} -func (stream *Stream) WriteEmptyObject() { - stream.writeByte('{') - stream.writeByte('}') -} - -// WriteMore write , with possible indention -func (stream *Stream) WriteMore() { - stream.writeByte(',') - stream.writeIndention(0) -} - -// WriteArrayStart write [ with possible indention -func (stream *Stream) WriteArrayStart() { - stream.indention += stream.cfg.indentionStep - stream.writeByte('[') - stream.writeIndention(0) -} - -// WriteEmptyArray write [] -func (stream *Stream) WriteEmptyArray() { - stream.writeTwoBytes('[', ']') -} - -// WriteArrayEnd write ] with possible indention -func (stream *Stream) WriteArrayEnd() { - stream.writeIndention(stream.cfg.indentionStep) - stream.indention -= stream.cfg.indentionStep - stream.writeByte(']') -} - -func (stream *Stream) writeIndention(delta int) { - if stream.indention == 0 { - return - } - stream.writeByte('\n') - toWrite := stream.indention - delta - for i := 0; i < toWrite; i++ { - stream.buf = append(stream.buf, ' ') - } -} diff --git a/ui/wasm/vendor/github.com/json-iterator/go/stream_float.go b/ui/wasm/vendor/github.com/json-iterator/go/stream_float.go deleted file mode 100644 index 826aa594ac6..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/stream_float.go +++ /dev/null @@ -1,111 +0,0 @@ -package jsoniter - -import ( - "fmt" - "math" - "strconv" -) - -var pow10 []uint64 - -func init() { - pow10 = []uint64{1, 10, 100, 1000, 10000, 100000, 1000000} -} - -// WriteFloat32 write float32 to stream -func (stream *Stream) WriteFloat32(val float32) { - if math.IsInf(float64(val), 0) || math.IsNaN(float64(val)) { - stream.Error = fmt.Errorf("unsupported value: %f", val) - return - } - abs := math.Abs(float64(val)) - fmt := byte('f') - // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. - if abs != 0 { - if float32(abs) < 1e-6 || float32(abs) >= 1e21 { - fmt = 'e' - } - } - stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 32) -} - -// WriteFloat32Lossy write float32 to stream with ONLY 6 digits precision although much much faster -func (stream *Stream) WriteFloat32Lossy(val float32) { - if math.IsInf(float64(val), 0) || math.IsNaN(float64(val)) { - stream.Error = fmt.Errorf("unsupported value: %f", val) - return - } - if val < 0 { - stream.writeByte('-') - val = -val - } - if val > 0x4ffffff { - stream.WriteFloat32(val) - return - } - precision := 6 - exp := uint64(1000000) // 6 - lval := uint64(float64(val)*float64(exp) + 0.5) - stream.WriteUint64(lval / exp) - fval := lval % exp - if fval == 0 { - return - } - stream.writeByte('.') - for p := precision - 1; p > 0 && fval < pow10[p]; p-- { - stream.writeByte('0') - } - stream.WriteUint64(fval) - for stream.buf[len(stream.buf)-1] == '0' { - stream.buf = stream.buf[:len(stream.buf)-1] - } -} - -// WriteFloat64 write float64 to stream -func (stream *Stream) WriteFloat64(val float64) { - if math.IsInf(val, 0) || math.IsNaN(val) { - stream.Error = fmt.Errorf("unsupported value: %f", val) - return - } - abs := math.Abs(val) - fmt := byte('f') - // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. - if abs != 0 { - if abs < 1e-6 || abs >= 1e21 { - fmt = 'e' - } - } - stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 64) -} - -// WriteFloat64Lossy write float64 to stream with ONLY 6 digits precision although much much faster -func (stream *Stream) WriteFloat64Lossy(val float64) { - if math.IsInf(val, 0) || math.IsNaN(val) { - stream.Error = fmt.Errorf("unsupported value: %f", val) - return - } - if val < 0 { - stream.writeByte('-') - val = -val - } - if val > 0x4ffffff { - stream.WriteFloat64(val) - return - } - precision := 6 - exp := uint64(1000000) // 6 - lval := uint64(val*float64(exp) + 0.5) - stream.WriteUint64(lval / exp) - fval := lval % exp - if fval == 0 { - return - } - stream.writeByte('.') - for p := precision - 1; p > 0 && fval < pow10[p]; p-- { - stream.writeByte('0') - } - stream.WriteUint64(fval) - for stream.buf[len(stream.buf)-1] == '0' { - stream.buf = stream.buf[:len(stream.buf)-1] - } -} diff --git a/ui/wasm/vendor/github.com/json-iterator/go/stream_int.go b/ui/wasm/vendor/github.com/json-iterator/go/stream_int.go deleted file mode 100644 index d1059ee4c20..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/stream_int.go +++ /dev/null @@ -1,190 +0,0 @@ -package jsoniter - -var digits []uint32 - -func init() { - digits = make([]uint32, 1000) - for i := uint32(0); i < 1000; i++ { - digits[i] = (((i / 100) + '0') << 16) + ((((i / 10) % 10) + '0') << 8) + i%10 + '0' - if i < 10 { - digits[i] += 2 << 24 - } else if i < 100 { - digits[i] += 1 << 24 - } - } -} - -func writeFirstBuf(space []byte, v uint32) []byte { - start := v >> 24 - if start == 0 { - space = append(space, byte(v>>16), byte(v>>8)) - } else if start == 1 { - space = append(space, byte(v>>8)) - } - space = append(space, byte(v)) - return space -} - -func writeBuf(buf []byte, v uint32) []byte { - return append(buf, byte(v>>16), byte(v>>8), byte(v)) -} - -// WriteUint8 write uint8 to stream -func (stream *Stream) WriteUint8(val uint8) { - stream.buf = writeFirstBuf(stream.buf, digits[val]) -} - -// WriteInt8 write int8 to stream -func (stream *Stream) WriteInt8(nval int8) { - var val uint8 - if nval < 0 { - val = uint8(-nval) - stream.buf = append(stream.buf, '-') - } else { - val = uint8(nval) - } - stream.buf = writeFirstBuf(stream.buf, digits[val]) -} - -// WriteUint16 write uint16 to stream -func (stream *Stream) WriteUint16(val uint16) { - q1 := val / 1000 - if q1 == 0 { - stream.buf = writeFirstBuf(stream.buf, digits[val]) - return - } - r1 := val - q1*1000 - stream.buf = writeFirstBuf(stream.buf, digits[q1]) - stream.buf = writeBuf(stream.buf, digits[r1]) - return -} - -// WriteInt16 write int16 to stream -func (stream *Stream) WriteInt16(nval int16) { - var val uint16 - if nval < 0 { - val = uint16(-nval) - stream.buf = append(stream.buf, '-') - } else { - val = uint16(nval) - } - stream.WriteUint16(val) -} - -// WriteUint32 write uint32 to stream -func (stream *Stream) WriteUint32(val uint32) { - q1 := val / 1000 - if q1 == 0 { - stream.buf = writeFirstBuf(stream.buf, digits[val]) - return - } - r1 := val - q1*1000 - q2 := q1 / 1000 - if q2 == 0 { - stream.buf = writeFirstBuf(stream.buf, digits[q1]) - stream.buf = writeBuf(stream.buf, digits[r1]) - return - } - r2 := q1 - q2*1000 - q3 := q2 / 1000 - if q3 == 0 { - stream.buf = writeFirstBuf(stream.buf, digits[q2]) - } else { - r3 := q2 - q3*1000 - stream.buf = append(stream.buf, byte(q3+'0')) - stream.buf = writeBuf(stream.buf, digits[r3]) - } - stream.buf = writeBuf(stream.buf, digits[r2]) - stream.buf = writeBuf(stream.buf, digits[r1]) -} - -// WriteInt32 write int32 to stream -func (stream *Stream) WriteInt32(nval int32) { - var val uint32 - if nval < 0 { - val = uint32(-nval) - stream.buf = append(stream.buf, '-') - } else { - val = uint32(nval) - } - stream.WriteUint32(val) -} - -// WriteUint64 write uint64 to stream -func (stream *Stream) WriteUint64(val uint64) { - q1 := val / 1000 - if q1 == 0 { - stream.buf = writeFirstBuf(stream.buf, digits[val]) - return - } - r1 := val - q1*1000 - q2 := q1 / 1000 - if q2 == 0 { - stream.buf = writeFirstBuf(stream.buf, digits[q1]) - stream.buf = writeBuf(stream.buf, digits[r1]) - return - } - r2 := q1 - q2*1000 - q3 := q2 / 1000 - if q3 == 0 { - stream.buf = writeFirstBuf(stream.buf, digits[q2]) - stream.buf = writeBuf(stream.buf, digits[r2]) - stream.buf = writeBuf(stream.buf, digits[r1]) - return - } - r3 := q2 - q3*1000 - q4 := q3 / 1000 - if q4 == 0 { - stream.buf = writeFirstBuf(stream.buf, digits[q3]) - stream.buf = writeBuf(stream.buf, digits[r3]) - stream.buf = writeBuf(stream.buf, digits[r2]) - stream.buf = writeBuf(stream.buf, digits[r1]) - return - } - r4 := q3 - q4*1000 - q5 := q4 / 1000 - if q5 == 0 { - stream.buf = writeFirstBuf(stream.buf, digits[q4]) - stream.buf = writeBuf(stream.buf, digits[r4]) - stream.buf = writeBuf(stream.buf, digits[r3]) - stream.buf = writeBuf(stream.buf, digits[r2]) - stream.buf = writeBuf(stream.buf, digits[r1]) - return - } - r5 := q4 - q5*1000 - q6 := q5 / 1000 - if q6 == 0 { - stream.buf = writeFirstBuf(stream.buf, digits[q5]) - } else { - stream.buf = writeFirstBuf(stream.buf, digits[q6]) - r6 := q5 - q6*1000 - stream.buf = writeBuf(stream.buf, digits[r6]) - } - stream.buf = writeBuf(stream.buf, digits[r5]) - stream.buf = writeBuf(stream.buf, digits[r4]) - stream.buf = writeBuf(stream.buf, digits[r3]) - stream.buf = writeBuf(stream.buf, digits[r2]) - stream.buf = writeBuf(stream.buf, digits[r1]) -} - -// WriteInt64 write int64 to stream -func (stream *Stream) WriteInt64(nval int64) { - var val uint64 - if nval < 0 { - val = uint64(-nval) - stream.buf = append(stream.buf, '-') - } else { - val = uint64(nval) - } - stream.WriteUint64(val) -} - -// WriteInt write int to stream -func (stream *Stream) WriteInt(val int) { - stream.WriteInt64(int64(val)) -} - -// WriteUint write uint to stream -func (stream *Stream) WriteUint(val uint) { - stream.WriteUint64(uint64(val)) -} diff --git a/ui/wasm/vendor/github.com/json-iterator/go/stream_str.go b/ui/wasm/vendor/github.com/json-iterator/go/stream_str.go deleted file mode 100644 index 54c2ba0b3a2..00000000000 --- a/ui/wasm/vendor/github.com/json-iterator/go/stream_str.go +++ /dev/null @@ -1,372 +0,0 @@ -package jsoniter - -import ( - "unicode/utf8" -) - -// htmlSafeSet holds the value true if the ASCII character with the given -// array position can be safely represented inside a JSON string, embedded -// inside of HTML